section stringlengths 2 30 | filename stringlengths 1 82 | text stringlengths 783 28M |
|---|---|---|
Test | TestWeb | import urllib.request
import pytest
try:
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.expected_conditions import staleness_of, title_is
from selenium.webdriver.support.ui import WebDriverWait
except:
pass
class WaitForPageLoad(object):
def __init__(self, browser):
self.browser = browser
def __enter__(self):
self.old_page = self.browser.find_element_by_tag_name("html")
def __exit__(self, *args):
WebDriverWait(self.browser, 10).until(staleness_of(self.old_page))
def getContextUrl(browser):
return browser.execute_script("return window.location.toString()")
def getUrl(url):
content = urllib.request.urlopen(url).read()
assert "server error" not in content.lower(), "Got a server error! " + repr(url)
return content
@pytest.mark.usefixtures("resetSettings")
@pytest.mark.webtest
class TestWeb:
def testFileSecurity(self, site_url):
assert "Not Found" in getUrl("%s/media/sites.json" % site_url)
assert "Forbidden" in getUrl("%s/media/./sites.json" % site_url)
assert "Forbidden" in getUrl("%s/media/../config.py" % site_url)
assert "Forbidden" in getUrl(
"%s/media/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../sites.json" % site_url
)
assert "Forbidden" in getUrl(
"%s/media/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/..//sites.json" % site_url
)
assert "Forbidden" in getUrl(
"%s/media/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../../zeronet.py" % site_url
)
assert "Not Found" in getUrl("%s/raw/sites.json" % site_url)
assert "Forbidden" in getUrl("%s/raw/./sites.json" % site_url)
assert "Forbidden" in getUrl("%s/raw/../config.py" % site_url)
assert "Forbidden" in getUrl(
"%s/raw/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../sites.json" % site_url
)
assert "Forbidden" in getUrl(
"%s/raw/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/..//sites.json" % site_url
)
assert "Forbidden" in getUrl(
"%s/raw/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../../zeronet.py" % site_url
)
assert "Forbidden" in getUrl(
"%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../sites.json" % site_url
)
assert "Forbidden" in getUrl(
"%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/..//sites.json" % site_url
)
assert "Forbidden" in getUrl(
"%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/../../zeronet.py" % site_url
)
assert "Forbidden" in getUrl("%s/content.db" % site_url)
assert "Forbidden" in getUrl("%s/./users.json" % site_url)
assert "Forbidden" in getUrl("%s/./key-rsa.pem" % site_url)
assert "Forbidden" in getUrl(
"%s/././././././././././//////sites.json" % site_url
)
def testLinkSecurity(self, browser, site_url):
browser.get(
"%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/test/security.html" % site_url
)
WebDriverWait(browser, 10).until(title_is("ZeroHello - ZeroNet"))
assert (
getContextUrl(browser)
== "%s/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/test/security.html" % site_url
)
# Switch to inner frame
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
assert "wrapper_nonce" in getContextUrl(browser)
assert browser.find_element_by_id("script_output").text == "Result: Works"
browser.switch_to.default_content()
# Clicking on links without target
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
with WaitForPageLoad(browser):
browser.find_element_by_id("link_to_current").click()
assert "wrapper_nonce" not in getContextUrl(
browser
) # The browser object back to default content
assert "Forbidden" not in browser.page_source
# Check if we have frame inside frame
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
with pytest.raises(NoSuchElementException):
assert not browser.find_element_by_id("inner-iframe")
browser.switch_to.default_content()
# Clicking on link with target=_top
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
with WaitForPageLoad(browser):
browser.find_element_by_id("link_to_top").click()
assert "wrapper_nonce" not in getContextUrl(
browser
) # The browser object back to default content
assert "Forbidden" not in browser.page_source
browser.switch_to.default_content()
# Try to escape from inner_frame
browser.switch_to.frame(browser.find_element_by_id("inner-iframe"))
assert "wrapper_nonce" in getContextUrl(
browser
) # Make sure we are inside of the inner-iframe
with WaitForPageLoad(browser):
browser.execute_script("window.top.location = window.location")
assert "wrapper_nonce" in getContextUrl(
browser
) # We try to use nonce-ed html without iframe
assert "<iframe" in browser.page_source # Only allow to use nonce once-time
browser.switch_to.default_content()
def testRaw(self, browser, site_url):
browser.get(
"%s/raw/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/test/security.html" % site_url
)
WebDriverWait(browser, 10).until(title_is("Security tests"))
assert (
getContextUrl(browser)
== "%s/raw/1EU1tbG9oC1A8jz2ouVwGZyQ5asrNsE4Vr/test/security.html" % site_url
)
assert browser.find_element_by_id("script_output").text == "Result: Fail"
|
PyObjCTools | FndCategories | """
A number of usefull categories on Foundation classes
"""
__all__ = ()
import objc
from Foundation import NSAffineTransform
class NSAffineTransform(objc.Category(NSAffineTransform)):
def rotateByDegrees_atPoint_(self, angle, point):
"""
Rotate the coordinatespace ``angle`` degrees around
``point``.
"""
self.rotateByDegrees_(angle)
tf = NSAffineTransform.transform()
tf.rotateByDegrees_(-angle)
oldPt = tf.transformPoint_(point)
oldPt.x -= point.x
oldPt.y -= point.y
self.translateXBy_yBy_(oldPt.x, oldPt.y)
def rotateByRadians_atPoint_(self, angle, point):
"""
Rotate the coordinatespace ``angle`` radians around
``point``.
"""
self.rotateByRadians_(angle)
tf = NSAffineTransform.transform()
tf.rotateByRadians_(-angle)
oldPt = tf.transformPoint_(point)
oldPt.x -= point.x
oldPt.y -= point.y
self.translateXBy_yBy_(oldPt.x, oldPt.y)
|
core | graph | from __future__ import print_function
import json
import logging
import os
import re
import weakref
from collections import OrderedDict, defaultdict
from contextlib import contextmanager
from enum import Enum
import meshroom
import meshroom.core
from meshroom.common import BaseObject, DictModel, Property, Signal, Slot
from meshroom.core import Version
from meshroom.core.attribute import Attribute, ListAttribute
from meshroom.core.exception import StopBranchVisit, StopGraphVisit
from meshroom.core.node import CompatibilityNode, Node, Status, nodeFactory
# Replace default encoder to support Enums
DefaultJSONEncoder = json.JSONEncoder # store the original one
class MyJSONEncoder(DefaultJSONEncoder): # declare a new one with Enum support
def default(self, obj):
if isinstance(obj, Enum):
return obj.name
return DefaultJSONEncoder.default(
self, obj
) # use the default one for all other types
json.JSONEncoder = MyJSONEncoder # replace the default implementation with our new one
@contextmanager
def GraphModification(graph):
"""
A Context Manager that can be used to trigger only one Graph update
for a group of several modifications.
GraphModifications can be nested.
"""
if not isinstance(graph, Graph):
raise ValueError("GraphModification expects a Graph instance")
# Store update policy for nested usage
enabled = graph.updateEnabled
# Disable graph update for nested block
# (does nothing if already disabled)
graph.updateEnabled = False
try:
yield # Execute nested block
except Exception:
raise
finally:
# Restore update policy
graph.updateEnabled = enabled
class Edge(BaseObject):
def __init__(self, src, dst, parent=None):
super(Edge, self).__init__(parent)
self._src = weakref.ref(src)
self._dst = weakref.ref(dst)
self._repr = "<Edge> {} -> {}".format(self._src(), self._dst())
@property
def src(self):
return self._src()
@property
def dst(self):
return self._dst()
src = Property(Attribute, src.fget, constant=True)
dst = Property(Attribute, dst.fget, constant=True)
WHITE = 0
GRAY = 1
BLACK = 2
class Visitor(object):
"""
Base class for Graph Visitors that does nothing.
Sub-classes can override any method to implement specific algorithms.
"""
def __init__(self, reverse, dependenciesOnly):
super(Visitor, self).__init__()
self.reverse = reverse
self.dependenciesOnly = dependenciesOnly
# def initializeVertex(self, s, g):
# '''is invoked on every vertex of the graph before the start of the graph search.'''
# pass
# def startVertex(self, s, g):
# '''is invoked on the source vertex once before the start of the search.'''
# pass
def discoverVertex(self, u, g):
"""Is invoked when a vertex is encountered for the first time."""
pass
def examineEdge(self, e, g):
"""Is invoked on every out-edge of each vertex after it is discovered."""
pass
def treeEdge(self, e, g):
"""Is invoked on each edge as it becomes a member of the edges that form the search tree.
If you wish to record predecessors, do so at this event point."""
pass
def backEdge(self, e, g):
"""Is invoked on the back edges in the graph."""
pass
def forwardOrCrossEdge(self, e, g):
"""Is invoked on forward or cross edges in the graph.
In an undirected graph this method is never called."""
pass
def finishEdge(self, e, g):
"""Is invoked on the non-tree edges in the graph
as well as on each tree edge after its target vertex is finished."""
pass
def finishVertex(self, u, g):
"""Is invoked on a vertex after all of its out edges have been added to the search tree and all of the
adjacent vertices have been discovered (but before their out-edges have been examined).
"""
pass
def changeTopology(func):
"""
Graph methods modifying the graph topology (add/remove edges or nodes)
must be decorated with 'changeTopology' for update mechanism to work as intended.
"""
def decorator(self, *args, **kwargs):
assert isinstance(self, Graph)
# call method
result = func(self, *args, **kwargs)
# mark graph dirty
self.dirtyTopology = True
# request graph update
self.update()
return result
return decorator
class Graph(BaseObject):
"""
_________________ _________________ _________________
| | | | | |
| Node A | | Node B | | Node C |
| | edge | | edge | |
|input output|>---->|input output|>---->|input output|
|_______________| |_______________| |_______________|
Data structures:
nodes = {'A': <nodeA>, 'B': <nodeB>, 'C': <nodeC>}
edges = {B.input: A.output, C.input: B.output,}
"""
_cacheDir = ""
class IO(object):
"""Centralize Graph file keys and IO version."""
__version__ = "1.1"
class Keys(object):
"""File Keys."""
# Doesn't inherit enum to simplify usage (Graph.IO.Keys.XX, without .value)
Header = "header"
NodesVersions = "nodesVersions"
ReleaseVersion = "releaseVersion"
FileVersion = "fileVersion"
Graph = "graph"
class Features(Enum):
"""File Features."""
Graph = "graph"
Header = "header"
NodesVersions = "nodesVersions"
PrecomputedOutputs = "precomputedOutputs"
NodesPositions = "nodesPositions"
@staticmethod
def getFeaturesForVersion(fileVersion):
"""Return the list of supported features based on a file version.
Args:
fileVersion (str, Version): the file version
Returns:
tuple of Graph.IO.Features: the list of supported features
"""
if isinstance(fileVersion, str):
fileVersion = Version(fileVersion)
features = [Graph.IO.Features.Graph]
if fileVersion >= Version("1.0"):
features += [
Graph.IO.Features.Header,
Graph.IO.Features.NodesVersions,
Graph.IO.Features.PrecomputedOutputs,
]
if fileVersion >= Version("1.1"):
features += [Graph.IO.Features.NodesPositions]
return tuple(features)
def __init__(self, name, parent=None):
super(Graph, self).__init__(parent)
self.name = name
self._updateEnabled = True
self._updateRequested = False
self.dirtyTopology = False
self._nodesMinMaxDepths = {}
self._computationBlocked = {}
self._canComputeLeaves = True
self._nodes = DictModel(keyAttrName="name", parent=self)
self._edges = DictModel(
keyAttrName="dst", parent=self
) # use dst attribute as unique key since it can only have one input connection
self._importedNodes = DictModel(keyAttrName="name", parent=self)
self._compatibilityNodes = DictModel(keyAttrName="name", parent=self)
self.cacheDir = meshroom.core.defaultCacheFolder
self._filepath = ""
self.header = {}
def clear(self):
self.header.clear()
self._compatibilityNodes.clear()
self._edges.clear()
# Tell QML nodes are going to be deleted
for node in self._nodes:
node.alive = False
self._importedNodes.clear()
self._nodes.clear()
@property
def fileFeatures(self):
"""Get loaded file supported features based on its version."""
return Graph.IO.getFeaturesForVersion(
self.header.get(Graph.IO.Keys.FileVersion, "0.0")
)
@Slot(str)
def load(
self, filepath, setupProjectFile=True, importProject=False, publishOutputs=False
):
"""
Load a Meshroom graph ".mg" file.
Args:
filepath: project filepath to load
setupProjectFile: Store the reference to the project file and setup the cache directory.
If false, it only loads the graph of the project file as a template.
importProject: True if the project that is loaded will be imported in the current graph, instead
of opened.
publishOutputs: True if "Publish" nodes from templates should not be ignored.
"""
if not importProject:
self.clear()
with open(filepath) as jsonFile:
fileData = json.load(jsonFile)
# older versions of Meshroom files only contained the serialized nodes
graphData = fileData.get(Graph.IO.Keys.Graph, fileData)
if importProject:
self._importedNodes.clear()
graphData = self.updateImportedProject(graphData)
if not isinstance(graphData, dict):
raise RuntimeError(
"loadGraph error: Graph is not a dict. File: {}".format(filepath)
)
self.header = fileData.get(Graph.IO.Keys.Header, {})
nodesVersions = self.header.get(Graph.IO.Keys.NodesVersions, {})
# check whether the file was saved as a template in minimal mode
isTemplate = self.header.get("template", False)
with GraphModification(self):
# iterate over nodes sorted by suffix index in their names
for nodeName, nodeData in sorted(
graphData.items(), key=lambda x: self.getNodeIndexFromName(x[0])
):
if not isinstance(nodeData, dict):
raise RuntimeError(
"loadGraph error: Node is not a dict. File: {}".format(filepath)
)
# retrieve version from
# 1. nodeData: node saved from a CompatibilityNode
# 2. nodesVersion in file header: node saved from a Node
# 3. fallback to no version "0.0": retro-compatibility
if "version" not in nodeData:
nodeData["version"] = nodesVersions.get(nodeData["nodeType"], "0.0")
# if the node is a "Publish" node and comes from a template file, it should be ignored
# unless publishOutputs is True
if (
isTemplate
and not publishOutputs
and nodeData["nodeType"] == "Publish"
):
continue
n = nodeFactory(nodeData, nodeName, template=isTemplate)
# Add node to the graph with raw attributes values
self._addNode(n, nodeName)
if importProject:
self._importedNodes.add(n)
# Create graph edges by resolving attributes expressions
self._applyExpr()
if setupProjectFile:
# Update filepath related members
# Note: needs to be done at the end as it will trigger an updateInternals.
self._setFilepath(filepath)
elif not isTemplate:
# If no filepath is being set but the graph is not a template, trigger an updateInternals either way.
self.updateInternals()
# By this point, the graph has been fully loaded and an updateInternals has been triggered, so all the nodes'
# links have been resolved and their UID computations are all complete.
# It is now possible to check whether the UIDs stored in the graph file for each node correspond to the ones
# that were computed.
if not isTemplate: # UIDs are not stored in templates
self._evaluateUidConflicts(graphData)
self._applyExpr()
return True
def _evaluateUidConflicts(self, data):
"""
Compare the UIDs of all the nodes in the graph with the UID that is expected in the graph file. If there
are mismatches, the nodes with the unexpected UID are replaced with "UidConflict" compatibility nodes.
Already existing nodes are removed and re-added to the graph identically to preserve all the edges,
which may otherwise be invalidated when a node with output edges but a UID conflict is re-generated as a
compatibility node.
Args:
data (dict): the dictionary containing all the nodes to import and their data
"""
for nodeName, nodeData in sorted(
data.items(), key=lambda x: self.getNodeIndexFromName(x[0])
):
node = self.node(nodeName)
savedUid = nodeData.get("uids", "").get(
"0", ""
) # Node's UID from the graph file
graphUid = node._uids.get(0) # Node's UID from the graph itself
if savedUid != graphUid and graphUid is not None:
# Different UIDs, remove the existing node from the graph and replace it with a CompatibilityNode
logging.debug("UID conflict detected for {}".format(nodeName))
self.removeNode(nodeName)
n = nodeFactory(nodeData, nodeName, template=False, uidConflict=True)
self._addNode(n, nodeName)
else:
# f connecting nodes have UID conflicts and are removed/re-added to the graph, some edges may be lost:
# the links will be erroneously updated, and any further resolution will fail.
# Recreating the entire graph as it was ensures that all edges will be correctly preserved.
self.removeNode(nodeName)
n = nodeFactory(nodeData, nodeName, template=False, uidConflict=False)
self._addNode(n, nodeName)
def updateImportedProject(self, data):
"""
Update the names and links of the project to import so that it can fit
correctly in the existing graph.
Parse all the nodes from the project that is going to be imported.
If their name already exists in the graph, replace them with new names,
then parse all the nodes' inputs/outputs to replace the old names with
the new ones in the links.
Args:
data (dict): the dictionary containing all the nodes to import and their data
Returns:
updatedData (dict): the dictionary containing all the nodes to import with their updated names and data
"""
nameCorrespondences = {} # maps the old node name to its updated one
updatedData = {} # input data with updated node names and links
def createUniqueNodeName(nodeNames, inputName):
"""
Create a unique name that does not already exist in the current graph or in the list
of nodes that will be imported.
"""
i = 1
while i:
newName = "{name}_{index}".format(name=inputName, index=i)
if newName not in nodeNames and newName not in updatedData.keys():
return newName
i += 1
# First pass to get all the names that already exist in the graph, update them, and keep track of the changes
for nodeName, nodeData in sorted(
data.items(), key=lambda x: self.getNodeIndexFromName(x[0])
):
if not isinstance(nodeData, dict):
raise RuntimeError("updateImportedProject error: Node is not a dict.")
if nodeName in self._nodes.keys() or nodeName in updatedData.keys():
newName = createUniqueNodeName(self._nodes.keys(), nodeData["nodeType"])
updatedData[newName] = nodeData
nameCorrespondences[nodeName] = newName
else:
updatedData[nodeName] = nodeData
newNames = [
nodeName for nodeName in updatedData
] # names of all the nodes that will be added
# Second pass to update all the links in the input/output attributes for every node with the new names
for nodeName, nodeData in updatedData.items():
nodeType = nodeData.get("nodeType", None)
nodeDesc = meshroom.core.nodesDesc[nodeType]
inputs = nodeData.get("inputs", {})
outputs = nodeData.get("outputs", {})
if inputs:
inputs = self.updateLinks(inputs, nameCorrespondences)
inputs = self.resetExternalLinks(inputs, nodeDesc.inputs, newNames)
updatedData[nodeName]["inputs"] = inputs
if outputs:
outputs = self.updateLinks(outputs, nameCorrespondences)
outputs = self.resetExternalLinks(outputs, nodeDesc.outputs, newNames)
updatedData[nodeName]["outputs"] = outputs
return updatedData
@staticmethod
def updateLinks(attributes, nameCorrespondences):
"""
Update all the links that refer to nodes that are going to be imported and whose
names have to be updated.
Args:
attributes (dict): attributes whose links need to be updated
nameCorrespondences (dict): node names to replace in the links with the name to replace them with
Returns:
attributes (dict): the attributes with all the updated links
"""
for key, val in attributes.items():
for corr in nameCorrespondences.keys():
if isinstance(val, str) and corr in val:
attributes[key] = val.replace(corr, nameCorrespondences[corr])
elif isinstance(val, list):
for v in val:
if isinstance(v, str):
if corr in v:
val[val.index(v)] = v.replace(
corr, nameCorrespondences[corr]
)
else: # the list does not contain strings, so there cannot be links to update
break
attributes[key] = val
return attributes
@staticmethod
def resetExternalLinks(attributes, nodeDesc, newNames):
"""
Reset all links to nodes that are not part of the nodes which are going to be imported:
if there are links to nodes that are not in the list, then it means that the references
are made to external nodes, and we want to get rid of those.
Args:
attributes (dict): attributes whose links might need to be reset
nodeDesc (list): list with all the attributes' description (including their default value)
newNames (list): names of the nodes that are going to be imported; no node name should be referenced
in the links except those contained in this list
Returns:
attributes (dict): the attributes with all the links referencing nodes outside those which will be imported
reset to their default values
"""
for key, val in attributes.items():
defaultValue = None
for desc in nodeDesc:
if desc.name == key:
defaultValue = desc.value
break
if isinstance(val, str):
if Attribute.isLinkExpression(val) and not any(
name in val for name in newNames
):
if (
defaultValue is not None
): # prevents from not entering condition if defaultValue = ''
attributes[key] = defaultValue
elif isinstance(val, list):
removedCnt = len(
val
) # counter to know whether all the list entries will be deemed invalid
tmpVal = list(
val
) # deep copy to ensure we iterate over the entire list (even if elements are removed)
for v in tmpVal:
if (
isinstance(v, str)
and Attribute.isLinkExpression(v)
and not any(name in v for name in newNames)
):
val.remove(v)
removedCnt -= 1
if (
removedCnt == 0 and defaultValue is not None
): # if all links were wrong, reset the attribute
attributes[key] = defaultValue
return attributes
@property
def updateEnabled(self):
return self._updateEnabled
@updateEnabled.setter
def updateEnabled(self, enabled):
self._updateEnabled = enabled
if enabled and self._updateRequested:
# Trigger an update if requested while disabled
self.update()
self._updateRequested = False
@changeTopology
def _addNode(self, node, uniqueName):
"""
Internal method to add the given node to this Graph, with the given name (must be unique).
Attribute expressions are not resolved.
"""
if node.graph is not None and node.graph != self:
raise RuntimeError(
'Node "{}" cannot be part of the Graph "{}", as it is already part of the other graph "{}".'.format(
node.nodeType, self.name, node.graph.name
)
)
assert uniqueName not in self._nodes.keys()
node._name = uniqueName
node.graph = self
self._nodes.add(node)
def addNode(self, node, uniqueName=None):
"""
Add the given node to this Graph with an optional unique name,
and resolve attributes expressions.
"""
self._addNode(
node,
uniqueName if uniqueName else self._createUniqueNodeName(node.nodeType),
)
# Resolve attribute expressions
with GraphModification(self):
node._applyExpr()
return node
def copyNode(self, srcNode, withEdges=False):
"""
Get a copy instance of a node outside the graph.
Args:
srcNode (Node): the node to copy
withEdges (bool): whether to copy edges
Returns:
Node, dict: the created node instance,
a dictionary of linked attributes with their original value (empty if withEdges is True)
"""
with GraphModification(self):
# create a new node of the same type and with the same attributes values
# keep links as-is so that CompatibilityNodes attributes can be created with correct automatic description
# (File params for link expressions)
node = nodeFactory(
srcNode.toDict(), srcNode.nodeType
) # use nodeType as name
# skip edges: filter out attributes which are links by resetting default values
skippedEdges = {}
if not withEdges:
for n, attr in node.attributes.items():
# find top-level links
if Attribute.isLinkExpression(attr.value):
skippedEdges[attr] = attr.value
attr.resetValue()
# find links in ListAttribute children
elif isinstance(attr, ListAttribute):
for child in attr.value:
if Attribute.isLinkExpression(child.value):
skippedEdges[child] = child.value
child.resetValue()
return node, skippedEdges
def duplicateNodes(self, srcNodes):
"""Duplicate nodes in the graph with their connections.
Args:
srcNodes: the nodes to duplicate
Returns:
OrderedDict[Node, Node]: the source->duplicate map
"""
# use OrderedDict to keep duplicated nodes creation order
duplicates = OrderedDict()
with GraphModification(self):
duplicateEdges = {}
# first, duplicate all nodes without edges and keep a 'source=>duplicate' map
# keeps tracks of non-created edges for later remap
for srcNode in srcNodes:
node, edges = self.copyNode(srcNode, withEdges=False)
duplicate = self.addNode(node)
duplicateEdges.update(edges)
duplicates.setdefault(srcNode, []).append(duplicate)
# re-create edges taking into account what has been duplicated
for attr, linkExpression in duplicateEdges.items():
link = linkExpression[1:-1] # remove starting '{' and trailing '}'
# get source node and attribute name
edgeSrcNodeName, edgeSrcAttrName = link.split(".", 1)
edgeSrcNode = self.node(edgeSrcNodeName)
# if the edge's source node has been duplicated (the key exists in the dictionary),
# use the duplicate; otherwise use the original node
if edgeSrcNode in duplicates:
edgeSrcNode = duplicates.get(edgeSrcNode)[0]
self.addEdge(edgeSrcNode.attribute(edgeSrcAttrName), attr)
return duplicates
def pasteNodes(self, data, position):
"""
Paste node(s) in the graph with their connections. The connections can only be between
the pasted nodes and not with the rest of the graph.
Args:
data (dict): the dictionary containing the information about the nodes to paste, with their names and
links already updated to be added to the graph
position (list): the list of positions for each node to paste
Returns:
list: the list of Node objects that were pasted and added to the graph
"""
nodes = []
with GraphModification(self):
positionCnt = 0 # always valid because we know the data is sorted the same way as the position list
for key in sorted(data):
nodeType = data[key].get("nodeType", None)
if not nodeType: # this case should never occur, as the data should have been prefiltered first
pass
attributes = {}
attributes.update(data[key].get("inputs", {}))
attributes.update(data[key].get("outputs", {}))
node = Node(nodeType, position=position[positionCnt], **attributes)
self._addNode(node, key)
nodes.append(node)
positionCnt += 1
self._applyExpr()
return nodes
def outEdges(self, attribute):
"""Return the list of edges starting from the given attribute"""
# type: (Attribute,) -> [Edge]
return [edge for edge in self.edges if edge.src == attribute]
def nodeInEdges(self, node):
# type: (Node) -> [Edge]
"""Return the list of edges arriving to this node"""
return [edge for edge in self.edges if edge.dst.node == node]
def nodeOutEdges(self, node):
# type: (Node) -> [Edge]
"""Return the list of edges starting from this node"""
return [edge for edge in self.edges if edge.src.node == node]
@changeTopology
def removeNode(self, nodeName):
"""
Remove the node identified by 'nodeName' from the graph
and return in and out edges removed by this operation in two dicts {dstAttr.getFullNameToNode(), srcAttr.getFullNameToNode()}
"""
node = self.node(nodeName)
inEdges = {}
outEdges = {}
# Remove all edges arriving to and starting from this node
with GraphModification(self):
for edge in self.nodeOutEdges(node):
self.removeEdge(edge.dst)
outEdges[edge.dst.getFullNameToNode()] = edge.src.getFullNameToNode()
for edge in self.nodeInEdges(node):
self.removeEdge(edge.dst)
inEdges[edge.dst.getFullNameToNode()] = edge.src.getFullNameToNode()
node.alive = False
self._nodes.remove(node)
if node in self._importedNodes:
self._importedNodes.remove(node)
self.update()
return inEdges, outEdges
def addNewNode(self, nodeType, name=None, position=None, **kwargs):
"""
Create and add a new node to the graph.
Args:
nodeType (str): the node type name.
name (str): if specified, the desired name for this node. If not unique, will be prefixed (_N).
position (Position): (optional) the position of the node
**kwargs: keyword arguments to initialize node's attributes
Returns:
The newly created node.
"""
if name and name in self._nodes.keys():
name = self._createUniqueNodeName(name)
n = self.addNode(Node(nodeType, position=position, **kwargs), uniqueName=name)
n.updateInternals()
return n
def _createUniqueNodeName(self, inputName):
i = 1
while i:
newName = "{name}_{index}".format(name=inputName, index=i)
if newName not in self._nodes.objects:
return newName
i += 1
def node(self, nodeName):
return self._nodes.get(nodeName)
def upgradeNode(self, nodeName):
"""
Upgrade the CompatibilityNode identified as 'nodeName'
Args:
nodeName (str): the name of the CompatibilityNode to upgrade
Returns:
the list of deleted input/output edges
"""
node = self.node(nodeName)
if not isinstance(node, CompatibilityNode):
raise ValueError(
"Upgrade is only available on CompatibilityNode instances."
)
upgradedNode = node.upgrade()
with GraphModification(self):
inEdges, outEdges = self.removeNode(nodeName)
self.addNode(upgradedNode, nodeName)
for dst, src in outEdges.items():
try:
self.addEdge(self.attribute(src), self.attribute(dst))
except (KeyError, ValueError) as e:
logging.warning(
"Failed to restore edge {} -> {}: {}".format(src, dst, str(e))
)
return upgradedNode, inEdges, outEdges
def upgradeAllNodes(self):
"""Upgrade all upgradable CompatibilityNode instances in the graph."""
nodeNames = [
name for name, n in self._compatibilityNodes.items() if n.canUpgrade
]
with GraphModification(self):
for nodeName in nodeNames:
self.upgradeNode(nodeName)
@Slot(str, result=Attribute)
def attribute(self, fullName):
# type: (str) -> Attribute
"""
Return the attribute identified by the unique name 'fullName'.
If it does not exist, return None.
"""
node, attribute = fullName.split(".", 1)
if self.node(node).hasAttribute(attribute):
return self.node(node).attribute(attribute)
return None
@Slot(str, result=Attribute)
def internalAttribute(self, fullName):
# type: (str) -> Attribute
"""
Return the internal attribute identified by the unique name 'fullName'.
If it does not exist, return None.
"""
node, attribute = fullName.split(".", 1)
if self.node(node).hasInternalAttribute(attribute):
return self.node(node).internalAttribute(attribute)
return None
@staticmethod
def getNodeIndexFromName(name):
"""Nodes are created with a suffix index; returns this index by parsing node name.
Args:
name (str): the node name
Returns:
int: the index retrieved from node name (-1 if not found)
"""
try:
return int(name.split("_")[-1])
except:
return -1
@staticmethod
def sortNodesByIndex(nodes):
"""
Sort the given list of Nodes using the suffix index in their names.
[NodeName_1, NodeName_0] => [NodeName_0, NodeName_1]
Args:
nodes (list[Node]): the list of Nodes to sort
Returns:
list[Node]: the sorted list of Nodes based on their index
"""
return sorted(nodes, key=lambda x: Graph.getNodeIndexFromName(x.name))
def nodesOfType(self, nodeType, sortedByIndex=True):
"""
Returns all Nodes of the given nodeType.
Args:
nodeType (str): the node type name to consider.
sortedByIndex (bool): whether to sort the nodes by their index (see Graph.sortNodesByIndex)
Returns:
list[Node]: the list of nodes matching the given nodeType.
"""
nodes = [n for n in self._nodes.values() if n.nodeType == nodeType]
return self.sortNodesByIndex(nodes) if sortedByIndex else nodes
def findInitNodes(self):
"""
Returns:
list[Node]: the list of Init nodes (nodes inheriting from InitNode)
"""
nodes = [
n
for n in self._nodes.values()
if isinstance(n.nodeDesc, meshroom.core.desc.InitNode)
]
return nodes
def findNodeCandidates(self, nodeNameExpr):
pattern = re.compile(nodeNameExpr)
return [v for k, v in self._nodes.objects.items() if pattern.match(k)]
def findNode(self, nodeExpr):
candidates = self.findNodeCandidates("^" + nodeExpr)
if not candidates:
raise KeyError('No node candidate for "{}"'.format(nodeExpr))
if len(candidates) > 1:
for c in candidates:
if c.name == nodeExpr:
return c
raise KeyError(
'Multiple node candidates for "{}": {}'.format(
nodeExpr, str([c.name for c in candidates])
)
)
return candidates[0]
def findNodes(self, nodesExpr):
if isinstance(nodesExpr, list):
return [self.findNode(nodeName) for nodeName in nodesExpr]
return [self.findNode(nodesExpr)]
def edge(self, dstAttributeName):
return self._edges.get(dstAttributeName)
def getLeafNodes(self, dependenciesOnly):
nodesWithOutputLink = set(
[edge.src.node for edge in self.getEdges(dependenciesOnly)]
)
return set(self._nodes) - nodesWithOutputLink
def getRootNodes(self, dependenciesOnly):
nodesWithInputLink = set(
[edge.dst.node for edge in self.getEdges(dependenciesOnly)]
)
return set(self._nodes) - nodesWithInputLink
@changeTopology
def addEdge(self, srcAttr, dstAttr):
assert isinstance(srcAttr, Attribute)
assert isinstance(dstAttr, Attribute)
if srcAttr.node.graph != self or dstAttr.node.graph != self:
raise RuntimeError(
"The attributes of the edge should be part of a common graph."
)
if dstAttr in self.edges.keys():
raise RuntimeError(
'Destination attribute "{}" is already connected.'.format(
dstAttr.getFullNameToNode()
)
)
edge = Edge(srcAttr, dstAttr)
self.edges.add(edge)
self.markNodesDirty(dstAttr.node)
dstAttr.valueChanged.emit()
dstAttr.isLinkChanged.emit()
srcAttr.hasOutputConnectionsChanged.emit()
return edge
def addEdges(self, *edges):
with GraphModification(self):
for edge in edges:
self.addEdge(*edge)
@changeTopology
def removeEdge(self, dstAttr):
if dstAttr not in self.edges.keys():
raise RuntimeError(
'Attribute "{}" is not connected'.format(dstAttr.getFullNameToNode())
)
edge = self.edges.pop(dstAttr)
self.markNodesDirty(dstAttr.node)
dstAttr.valueChanged.emit()
dstAttr.isLinkChanged.emit()
edge.src.hasOutputConnectionsChanged.emit()
def getDepth(self, node, minimal=False):
"""Return node's depth in this Graph.
By default, returns the maximal depth of the node unless minimal is set to True.
Args:
node (Node): the node to consider.
minimal (bool): whether to return the minimal depth instead of the maximal one (default).
Returns:
int: the node's depth in this Graph.
"""
assert node.graph == self
assert not self.dirtyTopology
minDepth, maxDepth = self._nodesMinMaxDepths[node]
return minDepth if minimal else maxDepth
def getInputEdges(self, node, dependenciesOnly):
return set(
[
edge
for edge in self.getEdges(dependenciesOnly=dependenciesOnly)
if edge.dst.node is node
]
)
def _getInputEdgesPerNode(self, dependenciesOnly):
nodeEdges = defaultdict(set)
for edge in self.getEdges(dependenciesOnly=dependenciesOnly):
nodeEdges[edge.dst.node].add(edge.src.node)
return nodeEdges
def _getOutputEdgesPerNode(self, dependenciesOnly):
nodeEdges = defaultdict(set)
for edge in self.getEdges(dependenciesOnly=dependenciesOnly):
nodeEdges[edge.src.node].add(edge.dst.node)
return nodeEdges
def dfs(self, visitor, startNodes=None, longestPathFirst=False):
# Default direction (visitor.reverse=False): from node to root
# Reverse direction (visitor.reverse=True): from node to leaves
nodeChildren = (
self._getOutputEdgesPerNode(visitor.dependenciesOnly)
if visitor.reverse
else self._getInputEdgesPerNode(visitor.dependenciesOnly)
)
# Initialize color map
colors = {}
for u in self._nodes:
colors[u] = WHITE
if longestPathFirst and visitor.reverse:
# Because we have no knowledge of the node's count between a node and its leaves,
# it is not possible to handle this case at the moment
raise NotImplementedError(
"Graph.dfs(): longestPathFirst=True and visitor.reverse=True are not compatible yet."
)
nodes = startNodes or (
self.getRootNodes(visitor.dependenciesOnly)
if visitor.reverse
else self.getLeafNodes(visitor.dependenciesOnly)
)
if longestPathFirst:
# Graph topology must be known and node depths up-to-date
assert not self.dirtyTopology
nodes = sorted(nodes, key=lambda item: item.depth)
try:
for node in nodes:
self.dfsVisit(node, visitor, colors, nodeChildren, longestPathFirst)
except StopGraphVisit:
pass
def dfsVisit(self, u, visitor, colors, nodeChildren, longestPathFirst):
try:
self._dfsVisit(u, visitor, colors, nodeChildren, longestPathFirst)
except StopBranchVisit:
pass
def _dfsVisit(self, u, visitor, colors, nodeChildren, longestPathFirst):
colors[u] = GRAY
visitor.discoverVertex(u, self)
# d_time[u] = time = time + 1
children = nodeChildren[u]
if longestPathFirst:
assert not self.dirtyTopology
children = sorted(
children,
reverse=True,
key=lambda item: self._nodesMinMaxDepths[item][1],
)
for v in children:
visitor.examineEdge((u, v), self)
if colors[v] == WHITE:
visitor.treeEdge((u, v), self)
# (u,v) is a tree edge
self.dfsVisit(
v, visitor, colors, nodeChildren, longestPathFirst
) # TODO: avoid recursion
elif colors[v] == GRAY:
# (u,v) is a back edge
visitor.backEdge((u, v), self)
elif colors[v] == BLACK:
# (u,v) is a cross or forward edge
visitor.forwardOrCrossEdge((u, v), self)
visitor.finishEdge((u, v), self)
colors[u] = BLACK
visitor.finishVertex(u, self)
def dfsOnFinish(
self,
startNodes=None,
longestPathFirst=False,
reverse=False,
dependenciesOnly=False,
):
"""
Return the node chain from startNodes to the graph roots/leaves.
Order is defined by the visit and finishVertex event.
Args:
startNodes (Node list): the nodes to start the visit from.
longestPathFirst (bool): (optional) if multiple paths, nodes belonging to
the longest one will be visited first.
reverse (bool): (optional) direction of visit.
True is for getting nodes depending on the startNodes (to leaves).
False is for getting nodes required for the startNodes (to roots).
Returns:
The list of nodes and edges, from startNodes to the graph roots/leaves following edges.
"""
nodes = []
edges = []
visitor = Visitor(reverse=reverse, dependenciesOnly=dependenciesOnly)
visitor.finishVertex = lambda vertex, graph: nodes.append(vertex)
visitor.finishEdge = lambda edge, graph: edges.append(edge)
self.dfs(
visitor=visitor, startNodes=startNodes, longestPathFirst=longestPathFirst
)
return nodes, edges
def dfsOnDiscover(
self,
startNodes=None,
filterTypes=None,
longestPathFirst=False,
reverse=False,
dependenciesOnly=False,
):
"""
Return the node chain from startNodes to the graph roots/leaves.
Order is defined by the visit and discoverVertex event.
Args:
startNodes (Node list): the nodes to start the visit from.
filterTypes (str list): (optional) only return the nodes of the given types
(does not stop the visit, this is a post-process only)
longestPathFirst (bool): (optional) if multiple paths, nodes belonging to
the longest one will be visited first.
reverse (bool): (optional) direction of visit.
True is for getting nodes depending on the startNodes (to leaves).
False is for getting nodes required for the startNodes (to roots).
Returns:
The list of nodes and edges, from startNodes to the graph roots/leaves following edges.
"""
nodes = []
edges = []
visitor = Visitor(reverse=reverse, dependenciesOnly=dependenciesOnly)
def discoverVertex(vertex, graph):
if not filterTypes or vertex.nodeType in filterTypes:
nodes.append(vertex)
visitor.discoverVertex = discoverVertex
visitor.examineEdge = lambda edge, graph: edges.append(edge)
self.dfs(
visitor=visitor, startNodes=startNodes, longestPathFirst=longestPathFirst
)
return nodes, edges
def dfsToProcess(self, startNodes=None):
"""
Return the full list of predecessor nodes to process in order to compute the given nodes.
Args:
startNodes: list of starting nodes. Use all leaves if empty.
Returns:
visited nodes and edges that are not already computed (node.status != SUCCESS).
The order is defined by the visit and finishVertex event.
"""
nodes = []
edges = []
visitor = Visitor(reverse=False, dependenciesOnly=True)
def discoverVertex(vertex, graph):
if vertex.hasStatus(Status.SUCCESS):
# stop branch visit if discovering a node already computed
raise StopBranchVisit()
def finishVertex(vertex, graph):
chunksToProcess = []
for chunk in vertex.chunks:
if chunk.status.status is not Status.SUCCESS:
chunksToProcess.append(chunk)
if chunksToProcess:
nodes.append(vertex) # We could collect specific chunks
def finishEdge(edge, graph):
if edge[0].hasStatus(Status.SUCCESS) or edge[1].hasStatus(Status.SUCCESS):
return
edges.append(edge)
visitor.finishVertex = finishVertex
visitor.finishEdge = finishEdge
visitor.discoverVertex = discoverVertex
self.dfs(visitor=visitor, startNodes=startNodes)
return nodes, edges
@Slot(Node, result=bool)
def canCompute(self, node):
"""
Return the computability of a node based on itself and its dependency chain.
Computation can't happen for:
- CompatibilityNodes
- nodes having a non-computed CompatibilityNode in its dependency chain
Args:
node (Node): the node to evaluate
Returns:
bool: whether the node can be computed
"""
if isinstance(node, CompatibilityNode):
return False
return not self._computationBlocked[node]
def updateNodesTopologicalData(self):
"""
Compute and cache nodes topological data:
- min and max depth
- computability
"""
self._nodesMinMaxDepths.clear()
self._computationBlocked.clear()
compatNodes = []
visitor = Visitor(reverse=False, dependenciesOnly=True)
def discoverVertex(vertex, graph):
# initialize depths
self._nodesMinMaxDepths[vertex] = (0, 0)
# initialize computability
self._computationBlocked[vertex] = False
if isinstance(vertex, CompatibilityNode):
compatNodes.append(vertex)
# a not computed CompatibilityNode blocks computation
if not vertex.hasStatus(Status.SUCCESS):
self._computationBlocked[vertex] = True
def finishEdge(edge, graph):
currentVertex, inputVertex = edge
# update depths
currentDepths = self._nodesMinMaxDepths[currentVertex]
inputDepths = self._nodesMinMaxDepths[inputVertex]
if currentDepths[0] == 0:
# if not initialized, set the depth of the first child
depthMin = inputDepths[0] + 1
else:
depthMin = min(currentDepths[0], inputDepths[0] + 1)
self._nodesMinMaxDepths[currentVertex] = (
depthMin,
max(currentDepths[1], inputDepths[1] + 1),
)
# update computability
if currentVertex.hasStatus(Status.SUCCESS):
# output is already computed and available,
# does not depend on input connections computability
return
# propagate inputVertex computability
self._computationBlocked[currentVertex] |= self._computationBlocked[
inputVertex
]
leaves = self.getLeafNodes(visitor.dependenciesOnly)
visitor.finishEdge = finishEdge
visitor.discoverVertex = discoverVertex
self.dfs(visitor=visitor, startNodes=leaves)
# update graph computability status
canComputeLeaves = all([self.canCompute(node) for node in leaves])
if self._canComputeLeaves != canComputeLeaves:
self._canComputeLeaves = canComputeLeaves
self.canComputeLeavesChanged.emit()
# update compatibilityNodes model
if len(self._compatibilityNodes) != len(compatNodes):
self._compatibilityNodes.reset(compatNodes)
compatibilityNodes = Property(
BaseObject, lambda self: self._compatibilityNodes, constant=True
)
def dfsMaxEdgeLength(self, startNodes=None, dependenciesOnly=True):
"""
:param startNodes: list of starting nodes. Use all leaves if empty.
:return:
"""
nodesStack = []
edgesScore = defaultdict(lambda: 0)
visitor = Visitor(reverse=False, dependenciesOnly=dependenciesOnly)
def finishEdge(edge, graph):
u, v = edge
for i, n in enumerate(reversed(nodesStack)):
index = i + 1
if index > edgesScore[(n, v)]:
edgesScore[(n, v)] = index
def finishVertex(vertex, graph):
v = nodesStack.pop()
assert v == vertex
visitor.discoverVertex = lambda vertex, graph: nodesStack.append(vertex)
visitor.finishVertex = finishVertex
visitor.finishEdge = finishEdge
self.dfs(visitor=visitor, startNodes=startNodes, longestPathFirst=True)
return edgesScore
def flowEdges(self, startNodes=None, dependenciesOnly=True):
"""
Return as few edges as possible, such that if there is a directed path from one vertex to another in the
original graph, there is also such a path in the reduction.
:param startNodes:
:return: the remaining edges after a transitive reduction of the graph.
"""
flowEdges = []
edgesScore = self.dfsMaxEdgeLength(startNodes, dependenciesOnly)
for link, score in edgesScore.items():
assert score != 0
if score == 1:
flowEdges.append(link)
return flowEdges
def getEdges(self, dependenciesOnly=False):
if not dependenciesOnly:
return self.edges
outEdges = []
for e in self.edges:
attr = e.src
if dependenciesOnly:
if attr.isLink:
attr = attr.getLinkParam(recursive=True)
if not attr.isOutput:
continue
newE = Edge(attr, e.dst)
outEdges.append(newE)
return outEdges
def getInputNodes(self, node, recursive, dependenciesOnly):
"""Return either the first level input nodes of a node or the whole chain."""
if not recursive:
return set(
[
edge.src.node
for edge in self.getEdges(dependenciesOnly)
if edge.dst.node is node
]
)
inputNodes, edges = self.dfsOnDiscover(
startNodes=[node], filterTypes=None, reverse=False
)
return inputNodes[1:] # exclude current node
def getOutputNodes(self, node, recursive, dependenciesOnly):
"""Return either the first level output nodes of a node or the whole chain."""
if not recursive:
return set(
[
edge.dst.node
for edge in self.getEdges(dependenciesOnly)
if edge.src.node is node
]
)
outputNodes, edges = self.dfsOnDiscover(
startNodes=[node], filterTypes=None, reverse=True
)
return outputNodes[1:] # exclude current node
@Slot(Node, result=int)
def canSubmitOrCompute(self, startNode):
"""
Check if a node can be submitted/computed.
Returns:
int: 0 = cannot be submitted or computed /
1 = can be computed /
2 = can be submitted /
3 = can be submitted and computed
"""
if startNode.isAlreadySubmittedOrFinished():
return 0
class SCVisitor(Visitor):
def __init__(self, reverse, dependenciesOnly):
super(SCVisitor, self).__init__(reverse, dependenciesOnly)
canCompute = True
canSubmit = True
def discoverVertex(self, vertex, graph):
if vertex.isAlreadySubmitted():
self.canSubmit = False
if vertex.isExtern():
self.canCompute = False
visitor = SCVisitor(reverse=False, dependenciesOnly=True)
self.dfs(visitor=visitor, startNodes=[startNode])
return visitor.canCompute + (2 * visitor.canSubmit)
def _applyExpr(self):
with GraphModification(self):
for node in self._nodes:
node._applyExpr()
def toDict(self):
return {k: node.toDict() for k, node in self._nodes.objects.items()}
@Slot(result=str)
def asString(self):
return str(self.toDict())
def save(self, filepath=None, setupProjectFile=True, template=False):
path = filepath or self._filepath
if not path:
raise ValueError("filepath must be specified for unsaved files.")
self.header[Graph.IO.Keys.ReleaseVersion] = meshroom.__version__
self.header[Graph.IO.Keys.FileVersion] = Graph.IO.__version__
# store versions of node types present in the graph (excluding CompatibilityNode instances)
usedNodeTypes = set(
[n.nodeDesc.__class__ for n in self._nodes if isinstance(n, Node)]
)
self.header[Graph.IO.Keys.NodesVersions] = {
"{}".format(p.__name__): meshroom.core.nodeVersion(p, "0.0")
for p in usedNodeTypes
}
self.header["template"] = template
data = {}
if template:
data = {
Graph.IO.Keys.Header: self.header,
Graph.IO.Keys.Graph: self.getNonDefaultInputAttributes(),
}
else:
data = {
Graph.IO.Keys.Header: self.header,
Graph.IO.Keys.Graph: self.toDict(),
}
with open(path, "w") as jsonFile:
json.dump(data, jsonFile, indent=4)
if path != self._filepath and setupProjectFile:
self._setFilepath(path)
def getNonDefaultInputAttributes(self):
"""
Instead of getting all the inputs and internal attribute keys, only get the keys of
the attributes whose value is not the default one.
The output attributes, UIDs, parallelization parameters and internal folder are
not relevant for templates, so they are explicitly removed from the returned dictionary.
Returns:
dict: self.toDict() with the output attributes, UIDs, parallelization parameters, internal folder
and input/internal attributes with default values removed
"""
graph = self.toDict()
for nodeName in graph.keys():
node = self.node(nodeName)
inputKeys = list(graph[nodeName]["inputs"].keys())
internalInputKeys = []
internalInputs = graph[nodeName].get("internalInputs", None)
if internalInputs:
internalInputKeys = list(internalInputs.keys())
for attrName in inputKeys:
attribute = node.attribute(attrName)
# check that attribute is not a link for choice attributes
if attribute.isDefault and not attribute.isLink:
del graph[nodeName]["inputs"][attrName]
for attrName in internalInputKeys:
attribute = node.internalAttribute(attrName)
# check that internal attribute is not a link for choice attributes
if attribute.isDefault and not attribute.isLink:
del graph[nodeName]["internalInputs"][attrName]
# If all the internal attributes are set to their default values, remove the entry
if len(graph[nodeName]["internalInputs"]) == 0:
del graph[nodeName]["internalInputs"]
del graph[nodeName]["outputs"]
del graph[nodeName]["uids"]
del graph[nodeName]["internalFolder"]
del graph[nodeName]["parallelization"]
return graph
def _setFilepath(self, filepath):
"""
Set the internal filepath of this Graph.
This method should not be used directly from outside, use save/load instead.
Args:
filepath: the graph file path
"""
if not os.path.isfile(filepath):
self._unsetFilepath()
return
if self._filepath == filepath:
return
self._filepath = filepath
# For now:
# * cache folder is located next to the graph file
# * graph name if the basename of the graph file
self.name = os.path.splitext(os.path.basename(filepath))[0]
self.cacheDir = os.path.join(
os.path.abspath(os.path.dirname(filepath)), meshroom.core.cacheFolderName
)
self.filepathChanged.emit()
def _unsetFilepath(self):
self._filepath = ""
self.name = ""
self.cacheDir = meshroom.core.defaultCacheFolder
self.filepathChanged.emit()
def updateInternals(self, startNodes=None, force=False):
nodes, edges = self.dfsOnFinish(startNodes=startNodes)
for node in nodes:
if node.dirty or force:
node.updateInternals()
def updateStatusFromCache(self, force=False):
for node in self._nodes:
if node.dirty or force:
node.updateStatusFromCache()
def updateStatisticsFromCache(self):
for node in self._nodes:
node.updateStatisticsFromCache()
def updateNodesPerUid(self):
"""Update the duplicate nodes (sharing same uid) list of each node."""
# First step is to construct a map uid/nodes
nodesPerUid = {}
for node in self.nodes:
uid = node._uids.get(0)
# We try to add the node to the list corresponding to this uid
try:
nodesPerUid.get(uid).append(node)
# If it fails because the uid is not in the map, we add it
except AttributeError:
nodesPerUid.update({uid: [node]})
# Now, update each individual node
for node in self.nodes:
node.updateDuplicates(nodesPerUid)
def update(self):
if not self._updateEnabled:
# To do the update once for multiple changes
self._updateRequested = True
return
self.updateInternals()
if os.path.exists(self._cacheDir):
self.updateStatusFromCache()
for node in self.nodes:
node.dirty = False
self.updateNodesPerUid()
# Graph topology has changed
if self.dirtyTopology:
# update nodes topological data cache
self.updateNodesTopologicalData()
self.dirtyTopology = False
self.updated.emit()
def markNodesDirty(self, fromNode):
"""
Mark all nodes following 'fromNode' as dirty.
All nodes marked as dirty will get their outputs to be re-evaluated
during the next graph update.
Args:
fromNode (Node): the node to start the invalidation from
See Also:
Graph.update, Graph.updateInternals, Graph.updateStatusFromCache
"""
nodes, edges = self.dfsOnDiscover(startNodes=[fromNode], reverse=True)
for node in nodes:
node.dirty = True
def stopExecution(self):
"""Request graph execution to be stopped by terminating running chunks"""
for chunk in self.iterChunksByStatus(Status.RUNNING):
if not chunk.isExtern():
chunk.stopProcess()
@Slot()
def forceUnlockNodes(self):
"""Force to unlock all the nodes."""
for node in self.nodes:
node.setLocked(False)
@Slot()
def clearSubmittedNodes(self):
"""Reset the status of already submitted nodes to Status.NONE"""
for node in self.nodes:
node.clearSubmittedChunks()
def clearLocallySubmittedNodes(self):
"""Reset the status of already locally submitted nodes to Status.NONE"""
for node in self.nodes:
node.clearLocallySubmittedChunks()
def iterChunksByStatus(self, status):
"""Iterate over NodeChunks with the given status"""
for node in self.nodes:
for chunk in node.chunks:
if chunk.status.status == status:
yield chunk
def getChunksByStatus(self, status):
"""Return the list of NodeChunks with the given status"""
chunks = []
for node in self.nodes:
chunks += [chunk for chunk in node.chunks if chunk.status.status == status]
return chunks
def getChunks(self, nodes=None):
"""Returns the list of NodeChunks for the given list of nodes (for all nodes if nodes is None)"""
chunks = []
for node in nodes or self.nodes:
chunks += [chunk for chunk in node.chunks]
return chunks
def getOrderedChunks(self):
"""Get chunks as visited by dfsOnFinish.
Returns:
list of NodeChunks: the ordered list of NodeChunks
"""
return self.getChunks(self.dfsOnFinish()[0])
@property
def nodes(self):
return self._nodes
@property
def edges(self):
return self._edges
@property
def importedNodes(self):
""" " Return the list of nodes that were added to the graph with the latest 'Import Project' action."""
return self._importedNodes
@property
def cacheDir(self):
return self._cacheDir
@cacheDir.setter
def cacheDir(self, value):
if self._cacheDir == value:
return
# use unix-style paths for cache directory
self._cacheDir = value.replace(os.path.sep, "/")
self.updateInternals(force=True)
self.updateStatusFromCache(force=True)
self.cacheDirChanged.emit()
def setVerbose(self, v):
with GraphModification(self):
for node in self._nodes:
if node.hasAttribute("verbose"):
try:
node.verbose.value = v
except:
pass
nodes = Property(BaseObject, nodes.fget, constant=True)
edges = Property(BaseObject, edges.fget, constant=True)
filepathChanged = Signal()
filepath = Property(str, lambda self: self._filepath, notify=filepathChanged)
fileReleaseVersion = Property(
str,
lambda self: self.header.get(Graph.IO.Keys.ReleaseVersion, "0.0"),
notify=filepathChanged,
)
cacheDirChanged = Signal()
cacheDir = Property(str, cacheDir.fget, cacheDir.fset, notify=cacheDirChanged)
updated = Signal()
canComputeLeavesChanged = Signal()
canComputeLeaves = Property(
bool, lambda self: self._canComputeLeaves, notify=canComputeLeavesChanged
)
def loadGraph(filepath):
""" """
graph = Graph("")
graph.load(filepath)
graph.update()
return graph
def getAlreadySubmittedChunks(nodes):
out = []
for node in nodes:
for chunk in node.chunks:
if chunk.isAlreadySubmitted():
out.append(chunk)
return out
def executeGraph(graph, toNodes=None, forceCompute=False, forceStatus=False):
""" """
if forceCompute:
nodes, edges = graph.dfsOnFinish(startNodes=toNodes)
else:
nodes, edges = graph.dfsToProcess(startNodes=toNodes)
chunksInConflict = getAlreadySubmittedChunks(nodes)
if chunksInConflict:
chunksStatus = set([chunk.status.status.name for chunk in chunksInConflict])
chunksName = [node.name for node in chunksInConflict]
msg = "WARNING: Some nodes are already submitted with status: {}\nNodes: {}".format(
", ".join(chunksStatus), ", ".join(chunksName)
)
if forceStatus:
print(msg)
else:
raise RuntimeError(msg)
print("Nodes to execute: ", str([n.name for n in nodes]))
for node in nodes:
node.beginSequence(forceCompute)
for n, node in enumerate(nodes):
try:
multiChunks = len(node.chunks) > 1
for c, chunk in enumerate(node.chunks):
if multiChunks:
print(
"\n[{node}/{nbNodes}]({chunk}/{nbChunks}) {nodeName}".format(
node=n + 1,
nbNodes=len(nodes),
chunk=c + 1,
nbChunks=len(node.chunks),
nodeName=node.nodeType,
)
)
else:
print(
"\n[{node}/{nbNodes}] {nodeName}".format(
node=n + 1, nbNodes=len(nodes), nodeName=node.nodeType
)
)
chunk.process(forceCompute)
except Exception as e:
logging.error("Error on node computation: {}".format(e))
graph.clearSubmittedNodes()
raise
for node in nodes:
node.endSequence()
def submitGraph(graph, submitter, toNodes=None, submitLabel="{projectName}"):
nodesToProcess, edgesToProcess = graph.dfsToProcess(startNodes=toNodes)
flowEdges = graph.flowEdges(startNodes=toNodes)
edgesToProcess = set(edgesToProcess).intersection(flowEdges)
if not nodesToProcess:
logging.warning("Nothing to compute")
return
logging.info("Nodes to process: {}".format(edgesToProcess))
logging.info("Edges to process: {}".format(edgesToProcess))
sub = None
if submitter:
sub = meshroom.core.submitters.get(submitter, None)
elif len(meshroom.core.submitters) == 1:
# if only one submitter available use it
sub = meshroom.core.submitters.values()[0]
if sub is None:
raise RuntimeError(
"Unknown Submitter: '{submitter}'. Available submitters are: '{allSubmitters}'.".format(
submitter=submitter, allSubmitters=str(meshroom.core.submitters.keys())
)
)
try:
res = sub.submit(
nodesToProcess, edgesToProcess, graph.filepath, submitLabel=submitLabel
)
if res:
for node in nodesToProcess:
node.submit() # update node status
except Exception as e:
logging.error("Error on submit : {}".format(e))
def submit(graphFile, submitter, toNode=None, submitLabel="{projectName}"):
"""
Submit the given graph via the given submitter.
"""
graph = loadGraph(graphFile)
toNodes = graph.findNodes(toNode) if toNode else None
submitGraph(graph, submitter, toNodes, submitLabel=submitLabel)
|
portfolios | views_pages | #
# Quru Image Server
#
# Document: views_pages.py
# Date started: 09 Mar 2018
# By: Matt Fozard
# Purpose: Portfolio viewing and management pages
# Requires: Flask
# Copyright: Quru Ltd (www.quru.com)
# Licence:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/
#
# Last Changed: $Date$ $Rev$ by $Author$
#
# Notable modifications:
# Date By Details
# ========= ==== ============================================================
#
from flask import render_template
from imageserver.api_util import create_api_error_dict
from imageserver.errors import DoesNotExistError
from imageserver.flask_app import app, data_engine, logger, permissions_engine
from imageserver.models import FolderPermission, FolioPermission
from imageserver.portfolios import blueprint
from imageserver.session_manager import get_session_user
from imageserver.views_util import login_required, safe_error_str, url_for_image_attrs
from .util import get_portfolio_image_attrs
# Portfolios listing/home page
@blueprint.route("/")
@login_required
def portfolios_index():
# Not sure yet whether login is required or what this page should show
return render_template(
"error.html",
error={
"title": "Not yet implemented",
"message": "Sorry, this page is not yet implemented.",
},
), 404
# Portfolio edit page
@blueprint.route("/<string:human_id>/edit/")
@login_required
def portfolio_edit(human_id):
return render_template(
"error.html",
error={
"title": "Not yet implemented",
"message": "Sorry, this page is not yet implemented.",
},
), 404
# Portfolio export/publish page
@blueprint.route("/<string:human_id>/publish/")
@login_required
def portfolio_export(human_id):
return render_template(
"error.html",
error={
"title": "Not yet implemented",
"message": "Sorry, this page is not yet implemented.",
},
), 404
# Portfolio view page
@blueprint.route("/<string:human_id>/")
def portfolio_view(human_id):
try:
# Find the portfolio
folio = data_engine.get_portfolio(human_id=human_id, load_images=True)
if not folio:
raise DoesNotExistError("Portfolio '%s' does not exist" % human_id)
# Ensure that the user has permission to view the portfolio
user = get_session_user()
permissions_engine.ensure_portfolio_permitted(
folio, FolioPermission.ACCESS_VIEW, user
)
# Filter out images that the user can't view
# so that we don't get broken images in the UI
checked_folders = {} # cache folders already checked
folio_images_1 = folio.images
folio_images_2 = []
for fol_img in folio_images_1:
folder_path = fol_img.image.folder.path
if folder_path in checked_folders:
folio_images_2.append(fol_img)
elif permissions_engine.is_folder_permitted(
folder_path,
FolderPermission.ACCESS_VIEW,
user,
folder_must_exist=False, # though it should exist!
):
checked_folders[folder_path] = True
folio_images_2.append(fol_img)
# Replace the original image list with the filtered one
folio.images = folio_images_2
# Generate the image viewing URLs, including any portfolio-specific changes
web_view_params = {"format": "jpg", "colorspace": "srgb"}
sizing_view_params = {"width": 800, "height": 800, "size_fit": True}
pre_sized_images = [
fol_img
for fol_img in folio.images
if fol_img.parameters
and (
("width" in fol_img.parameters and fol_img.parameters["width"]["value"])
or (
"height" in fol_img.parameters
and fol_img.parameters["height"]["value"]
)
)
]
for fol_img in folio.images:
image_attrs = get_portfolio_image_attrs(fol_img, False, False, False)
image_attrs.apply_dict(web_view_params, True, False, False)
if len(pre_sized_images) == 0:
image_attrs.apply_dict(sizing_view_params, True, False, False)
# Here we normalise the attrs only after everything has been applied
image_attrs.normalise_values()
fol_img.url = url_for_image_attrs(image_attrs)
return render_template(
"portfolio_view.html",
title=folio.name,
folio=folio,
removed_count=(len(folio_images_1) - len(folio_images_2)),
)
except Exception as e:
# Although this isn't a JSON API, we're still using it like a viewing API,
# so get the correct HTTP status code to return. create_api_error_dict() also
# logs security errors so we don't need to do that separately here.
error_dict = create_api_error_dict(e, logger)
if app.config["DEBUG"]:
raise
return render_template(
"portfolio_view.html",
title="Portfolio",
err_msg="This portfolio cannot be viewed: " + safe_error_str(e),
), error_dict["status"]
|
example-ipermissionlabels | plugin | # encoding: utf-8
from __future__ import annotations
from typing import Any
from ckan import plugins
from ckan.lib.plugins import DefaultPermissionLabels
from ckan.plugins.toolkit import get_action
class ExampleIPermissionLabelsPlugin(plugins.SingletonPlugin, DefaultPermissionLabels):
"""
Example permission labels plugin that makes datasets whose
notes field starts with "Proposed:" visible only to their
creator and Admin users in the organization assigned to the
dataset.
"""
plugins.implements(plugins.IPermissionLabels)
def get_dataset_labels(self, dataset_obj: Any) -> list[str]:
"""
Use creator-*, admin-* labels for proposed datasets
"""
if dataset_obj.notes.startswith("Proposed:"):
labels = ["creator-%s" % dataset_obj.creator_user_id]
if dataset_obj.owner_org:
return labels + ["admin-%s" % dataset_obj.owner_org]
return labels
return super(ExampleIPermissionLabelsPlugin, self).get_dataset_labels(
dataset_obj
)
def get_user_dataset_labels(self, user_obj: Any) -> list[str]:
"""
Include admin-* labels for users in addition to default labels
creator-*, member-* and public
"""
labels = super(ExampleIPermissionLabelsPlugin, self).get_user_dataset_labels(
user_obj
)
if user_obj:
orgs = get_action("organization_list_for_user")(
{"user": user_obj.id}, {"permission": "admin"}
)
labels.extend("admin-%s" % o["id"] for o in orgs)
return labels
|
lib | auth_basic | # This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
__doc__ = """This module provides a CherryPy 3.x tool which implements
the server-side of HTTP Basic Access Authentication, as described in
:rfc:`2617`.
Example usage, using the built-in checkpassword_dict function which uses a dict
as the credentials store::
userpassdict = {'bird' : 'bebop', 'ornette' : 'wayout'}
checkpassword = cherrypy.lib.auth_basic.checkpassword_dict(userpassdict)
basic_auth = {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'earth',
'tools.auth_basic.checkpassword': checkpassword,
}
app_config = { '/' : basic_auth }
"""
__author__ = "visteya"
__date__ = "April 2009"
import binascii
import cherrypy
from cherrypy._cpcompat import base64_decode
def checkpassword_dict(user_password_dict):
"""Returns a checkpassword function which checks credentials
against a dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, use
checkpassword_dict(my_credentials_dict) as the value for the
checkpassword argument to basic_auth().
"""
def checkpassword(realm, user, password):
p = user_password_dict.get(user)
return p and p == password or False
return checkpassword
def basic_auth(realm, checkpassword, debug=False):
"""A CherryPy tool which hooks at before_handler to perform
HTTP Basic Access Authentication, as specified in :rfc:`2617`.
If the request has an 'authorization' header with a 'Basic' scheme, this
tool attempts to authenticate the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not 'Basic', or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Basic header.
realm
A string containing the authentication realm.
checkpassword
A callable which checks the authentication credentials.
Its signature is checkpassword(realm, username, password). where
username and password are the values obtained from the request's
'authorization' header. If authentication succeeds, checkpassword
returns True, else it returns False.
"""
if '"' in realm:
raise ValueError('Realm cannot contain the " (quote) character.')
request = cherrypy.serving.request
auth_header = request.headers.get("authorization")
if auth_header is not None:
try:
scheme, params = auth_header.split(" ", 1)
if scheme.lower() == "basic":
username, password = base64_decode(params).split(":", 1)
if checkpassword(realm, username, password):
if debug:
cherrypy.log("Auth succeeded", "TOOLS.AUTH_BASIC")
request.login = username
return # successful authentication
# split() error, base64.decodestring() error
except (ValueError, binascii.Error):
raise cherrypy.HTTPError(400, "Bad Request")
# Respond with 401 status and a WWW-Authenticate header
cherrypy.serving.response.headers["www-authenticate"] = 'Basic realm="%s"' % realm
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
neubot | api_results | # neubot/api_results.py
#
# Copyright (c) 2012-2013
# Nexa Center for Internet & Society, Politecnico di Torino (DAUIN)
# and Simone Basso <bassosimone@gmail.com>
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
""" API to populate results.html page """
import cgi
import os
from neubot import utils, utils_hier, utils_path
from neubot.compat import json
from neubot.config import CONFIG
from neubot.http.message import Message
from neubot.utils_api import NotImplementedTest
# Directory that contains the description of each test, which consists of
# two files per test: a JSON file and an HTML file.
TESTDIR = utils_path.join(utils_hier.WWWDIR, "test")
# Config variables to be copied to output: they allow ordinary users to
# configure the appearance of results.html.
COPY_CONFIG_VARIABLES = (
"www_no_description",
"www_no_legend",
"www_no_plot",
"www_no_split_by_ip",
"www_no_table",
"www_no_title",
)
def api_results(stream, request, query):
"""Populates www/results.html page"""
dictionary = cgi.parse_qs(query)
test = CONFIG["www_default_test_to_show"]
if "test" in dictionary:
test = str(dictionary["test"][0])
# Read the directory each time, so you don't need to restart the daemon
# after you have changed the description of a test.
available_tests = {}
for filename in os.listdir(TESTDIR):
if filename.endswith(".json"):
index = filename.rfind(".json")
if index == -1:
raise RuntimeError("api_results: internal error")
name = filename[:index]
available_tests[name] = filename
if not test in available_tests:
raise NotImplementedTest("Test not implemented")
# Allow power users to customize results.html heavily, by creating JSON
# descriptions with local modifications.
filepath = utils_path.append(TESTDIR, available_tests[test], False)
if not filepath:
raise RuntimeError("api_results: append() path failed")
localfilepath = filepath + ".local"
if os.path.isfile(localfilepath):
filep = open(localfilepath, "rb")
else:
filep = open(filepath, "rb")
response_body = json.loads(filep.read())
filep.close()
# Add extra information needed to populate results.html selection that
# allows to select which test results must be shown.
response_body["available_tests"] = available_tests.keys()
response_body["selected_test"] = test
descrpath = filepath.replace(".json", ".html")
if os.path.isfile(descrpath):
filep = open(descrpath, "rb")
response_body["description"] = filep.read()
filep.close()
# Provide the web user interface some settings it needs, but only if they
# were not already provided by the `.local` file.
for variable in COPY_CONFIG_VARIABLES:
if not variable in response_body:
response_body[variable] = CONFIG[variable]
# Note: DO NOT sort keys here: order MUST be preserved
indent, mimetype = None, "application/json"
if "debug" in dictionary and utils.intify(dictionary["debug"][0]):
indent, mimetype = 4, "text/plain"
response = Message()
body = json.dumps(response_body, indent=indent)
response.compose(code="200", reason="Ok", body=body, mimetype=mimetype)
stream.send_response(request, response)
|
mylar | helpers | # This file is part of Mylar.
# -*- coding: utf-8 -*-
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
import calendar
import ctypes
import datetime
import errno
import gzip
import hashlib
import itertools
import json
import os
import platform
import re
import shlex
import shutil
import subprocess
import sys
import time
from datetime import date, timedelta
from operator import itemgetter
import logger
import mylar
import Queue
import requests
from apscheduler.triggers.interval import IntervalTrigger
from mylar import db, getcomics, nzbget, process, sabnzbd
from StringIO import StringIO
def multikeysort(items, columns):
comparers = [((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer)
def checked(variable):
if variable:
return 'Checked'
else:
return ''
def radio(variable, pos):
if variable == pos:
return 'Checked'
else:
return ''
def latinToAscii(unicrap):
"""
From couch potato
"""
xlate = {0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',
0xc6: 'Ae', 0xc7: 'C',
0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E', 0x86: 'e',
0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',
0xd0: 'Th', 0xd1: 'N',
0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',
0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',
0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',
0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',
0xe6: 'ae', 0xe7: 'c',
0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e', 0x0259: 'e',
0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',
0xf0: 'th', 0xf1: 'n',
0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',
0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',
0xfd: 'y', 0xfe: 'th', 0xff: 'y',
0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',
0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',
0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',
0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',
0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: "'",
0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',
0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',
0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',
0xd7: '*', 0xf7: '/'
}
r = ''
for i in unicrap:
if xlate.has_key(ord(i)):
r += xlate[ord(i)]
elif ord(i) >= 0x80:
pass
else:
r += str(i)
return r
def convert_milliseconds(ms):
seconds = ms /1000
gmtime = time.gmtime(seconds)
if seconds > 3600:
minutes = time.strftime("%H:%M:%S", gmtime)
else:
minutes = time.strftime("%M:%S", gmtime)
return minutes
def convert_seconds(s):
gmtime = time.gmtime(s)
if s > 3600:
minutes = time.strftime("%H:%M:%S", gmtime)
else:
minutes = time.strftime("%M:%S", gmtime)
return minutes
def today():
today = datetime.date.today()
yyyymmdd = datetime.date.isoformat(today)
return yyyymmdd
def now():
now = datetime.datetime.now()
return now.strftime("%Y-%m-%d %H:%M:%S")
def utctimestamp():
return time.time()
def bytes_to_mb(bytes):
mb = int(bytes) /1048576
size = '%.1f MB' % mb
return size
def human_size(size_bytes):
"""
format a size in bytes into a 'human' file size, e.g. bytes, KB, MB, GB, TB, PB
Note that bytes/KB will be reported in whole numbers but MB and above will have greater precision
e.g. 1 byte, 43 bytes, 443 KB, 4.3 MB, 4.43 GB, etc
"""
if size_bytes == 1:
# because I really hate unnecessary plurals
return "1 byte"
suffixes_table = [('bytes', 0), ('KB', 0), ('MB', 1), ('GB', 2), ('TB', 2), ('PB', 2)]
num = float(0 if size_bytes is None else size_bytes)
for suffix, precision in suffixes_table:
if num < 1024.0:
break
num /= 1024.0
if precision == 0:
formatted_size = "%d" % num
else:
formatted_size = str(round(num, ndigits=precision))
return "%s %s" % (formatted_size, suffix)
def human2bytes(s):
"""
>>> human2bytes('1M')
1048576
>>> human2bytes('1G')
1073741824
"""
symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
letter = s[-1:].strip().upper()
num = re.sub(',', '', s[:-1])
#assert num.isdigit() and letter in symbols
#use below assert statement to handle sizes with decimal places
if num != '0':
assert float(num) and letter in symbols
num = float(num)
prefix = {symbols[0]: 1}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i +1) *10
return int(num * prefix[letter])
else:
return 0
def replace_all(text, dic):
for i, j in dic.iteritems():
text = text.replace(i, j)
return text.rstrip()
def cleanName(string):
pass1 = latinToAscii(string).lower()
out_string = re.sub('[\/\@\#\$\%\^\*\+\"\[\]\{\}\<\>\=\_]', ' ', pass1).encode('utf-8')
return out_string
def cleanTitle(title):
title = re.sub('[\.\-\/\_]', ' ', title).lower()
# Strip out extra whitespace
title = ' '.join(title.split())
title = title.title()
return title
def extract_logline(s):
# Default log format
pattern = re.compile(r'(?P<timestamp>.*?)\s\-\s(?P<level>.*?)\s*\:\:\s(?P<thread>.*?)\s\:\s(?P<message>.*)', re.VERBOSE)
match = pattern.match(s)
if match:
timestamp = match.group("timestamp")
level = match.group("level")
thread = match.group("thread")
message = match.group("message")
return (timestamp, level, thread, message)
else:
return None
def is_number(s):
try:
float(s)
except (ValueError, TypeError):
return False
else:
return True
def decimal_issue(iss):
iss_find = iss.find('.')
dec_except = None
if iss_find == -1:
#no matches for a decimal, assume we're converting from decimal to int.
#match for special issues with alphanumeric numbering...
if 'au' in iss.lower():
dec_except = 'AU'
decex = iss.lower().find('au')
deciss = int(iss[:decex]) * 1000
else:
deciss = int(iss) * 1000
else:
iss_b4dec = iss[:iss_find]
iss_decval = iss[iss_find +1:]
if int(iss_decval) == 0:
iss = iss_b4dec
issdec = int(iss_decval)
else:
if len(iss_decval) == 1:
iss = iss_b4dec + "." + iss_decval
issdec = int(iss_decval) * 10
else:
iss = iss_b4dec + "." + iss_decval.rstrip('0')
issdec = int(iss_decval.rstrip('0')) * 10
deciss = (int(iss_b4dec) * 1000) + issdec
return deciss, dec_except
def rename_param(comicid, comicname, issue, ofilename, comicyear=None, issueid=None, annualize=None, arc=False):
#import db
myDB = db.DBConnection()
comicid = str(comicid) # it's coming in unicoded...
logger.fdebug(type(comicid))
logger.fdebug(type(issueid))
logger.fdebug('comicid: %s' % comicid)
logger.fdebug('issue# as per cv: %s' % issue)
# the issue here is a non-decimalized version, we need to see if it's got a decimal and if not, add '.00'
# iss_find = issue.find('.')
# if iss_find < 0:
# # no decimal in issue number
# iss = str(int(issue)) + ".00"
# else:
# iss_b4dec = issue[:iss_find]
# iss_decval = issue[iss_find+1:]
# if len(str(int(iss_decval))) == 1:
# iss = str(int(iss_b4dec)) + "." + str(int(iss_decval)*10)
# else:
# if issue.endswith(".00"):
# iss = issue
# else:
# iss = str(int(iss_b4dec)) + "." + iss_decval
# issue = iss
# print ("converted issue#: " + str(issue))
logger.fdebug('issueid:' + str(issueid))
if issueid is None:
logger.fdebug('annualize is ' + str(annualize))
if arc:
#this has to be adjusted to be able to include story arc issues that span multiple arcs
chkissue = myDB.selectone("SELECT * from storyarcs WHERE ComicID=? AND Issue_Number=?", [comicid, issue]).fetchone()
else:
chkissue = myDB.selectone("SELECT * from issues WHERE ComicID=? AND Issue_Number=?", [comicid, issue]).fetchone()
if all([chkissue is None, annualize is None, not mylar.CONFIG.ANNUALS_ON]):
chkissue = myDB.selectone("SELECT * from annuals WHERE ComicID=? AND Issue_Number=?", [comicid, issue]).fetchone()
if chkissue is None:
#rechk chkissue against int value of issue #
if arc:
chkissue = myDB.selectone("SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?", [comicid, issuedigits(issue)]).fetchone()
else:
chkissue = myDB.selectone("SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?", [comicid, issuedigits(issue)]).fetchone()
if all([chkissue is None, annualize == 'yes', mylar.CONFIG.ANNUALS_ON]):
chkissue = myDB.selectone("SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?", [comicid, issuedigits(issue)]).fetchone()
if chkissue is None:
logger.error('Invalid Issue_Number - please validate.')
return
else:
logger.info('Int Issue_number compare found. continuing...')
issueid = chkissue['IssueID']
else:
issueid = chkissue['IssueID']
#use issueid to get publisher, series, year, issue number
logger.fdebug('issueid is now : ' + str(issueid))
if arc:
issuenzb = myDB.selectone("SELECT * from storyarcs WHERE ComicID=? AND IssueID=? AND StoryArc=?", [comicid, issueid, arc]).fetchone()
else:
issuenzb = myDB.selectone("SELECT * from issues WHERE ComicID=? AND IssueID=?", [comicid, issueid]).fetchone()
if issuenzb is None:
logger.fdebug('not an issue, checking against annuals')
issuenzb = myDB.selectone("SELECT * from annuals WHERE ComicID=? AND IssueID=?", [comicid, issueid]).fetchone()
if issuenzb is None:
logger.fdebug('Unable to rename - cannot locate issue id within db')
return
else:
annualize = True
if issuenzb is None:
logger.fdebug('Unable to rename - cannot locate issue id within db')
return
#remap the variables to a common factor.
if arc:
issuenum = issuenzb['IssueNumber']
issuedate = issuenzb['IssueDate']
publisher = issuenzb['IssuePublisher']
series = issuenzb['ComicName']
seriesfilename = series #Alternate FileNaming is not available with story arcs.
seriesyear = issuenzb['SeriesYear']
arcdir = filesafe(issuenzb['StoryArc'])
if mylar.CONFIG.REPLACE_SPACES:
arcdir = arcdir.replace(' ', mylar.CONFIG.REPLACE_CHAR)
if mylar.CONFIG.STORYARCDIR:
storyarcd = os.path.join(mylar.CONFIG.DESTINATION_DIR, "StoryArcs", arcdir)
logger.fdebug('Story Arc Directory set to : ' + storyarcd)
else:
logger.fdebug('Story Arc Directory set to : ' + mylar.CONFIG.GRABBAG_DIR)
storyarcd = os.path.join(mylar.CONFIG.DESTINATION_DIR, mylar.CONFIG.GRABBAG_DIR)
comlocation = storyarcd
comversion = None #need to populate this.
else:
issuenum = issuenzb['Issue_Number']
issuedate = issuenzb['IssueDate']
comicnzb= myDB.selectone("SELECT * from comics WHERE comicid=?", [comicid]).fetchone()
publisher = comicnzb['ComicPublisher']
series = comicnzb['ComicName']
if comicnzb['AlternateFileName'] is None or comicnzb['AlternateFileName'] == 'None':
seriesfilename = series
else:
seriesfilename = comicnzb['AlternateFileName']
logger.fdebug('Alternate File Naming has been enabled for this series. Will rename series title to : ' + seriesfilename)
seriesyear = comicnzb['ComicYear']
comlocation = comicnzb['ComicLocation']
comversion = comicnzb['ComicVersion']
unicodeissue = issuenum
if type(issuenum) == unicode:
vals = {u'\xbd':'.5',u'\xbc':'.25',u'\xbe':'.75',u'\u221e':'9999999999',u'\xe2':'9999999999'}
else:
vals = {'\xbd':'.5','\xbc':'.25','\xbe':'.75','\u221e':'9999999999','\xe2':'9999999999'}
x = [vals[key] for key in vals if key in issuenum]
if x:
issuenum = x[0]
logger.fdebug('issue number formatted: %s' % issuenum)
#comicid = issuenzb['ComicID']
#issueno = str(issuenum).split('.')[0]
issue_except = 'None'
issue_exceptions = ['AU',
'INH',
'NOW',
'AI',
'MU',
'HU',
'A',
'B',
'C',
'X',
'O']
valid_spaces = ('.', '-')
for issexcept in issue_exceptions:
if issexcept.lower() in issuenum.lower():
logger.fdebug('ALPHANUMERIC EXCEPTION : [' + issexcept + ']')
v_chk = [v for v in valid_spaces if v in issuenum]
if v_chk:
iss_space = v_chk[0]
logger.fdebug('character space denoted as : ' + iss_space)
else:
logger.fdebug('character space not denoted.')
iss_space = ''
# if issexcept == 'INH':
# issue_except = '.INH'
if issexcept == 'NOW':
if '!' in issuenum: issuenum = re.sub('\!', '', issuenum)
# issue_except = '.NOW'
issue_except = iss_space + issexcept
logger.fdebug('issue_except denoted as : ' + issue_except)
issuenum = re.sub("[^0-9]", "", issuenum)
break
# if 'au' in issuenum.lower() and issuenum[:1].isdigit():
# issue_except = ' AU'
# elif 'ai' in issuenum.lower() and issuenum[:1].isdigit():
# issuenum = re.sub("[^0-9]", "", issuenum)
# issue_except = ' AI'
# elif 'inh' in issuenum.lower() and issuenum[:1].isdigit():
# issuenum = re.sub("[^0-9]", "", issuenum)
# issue_except = '.INH'
# elif 'now' in issuenum.lower() and issuenum[:1].isdigit():
# if '!' in issuenum: issuenum = re.sub('\!', '', issuenum)
# issuenum = re.sub("[^0-9]", "", issuenum)
# issue_except = '.NOW'
if '.' in issuenum:
iss_find = issuenum.find('.')
iss_b4dec = issuenum[:iss_find]
if iss_find == 0:
iss_b4dec = '0'
iss_decval = issuenum[iss_find +1:]
if iss_decval.endswith('.'):
iss_decval = iss_decval[:-1]
if int(iss_decval) == 0:
iss = iss_b4dec
issdec = int(iss_decval)
issueno = iss
else:
if len(iss_decval) == 1:
iss = iss_b4dec + "." + iss_decval
issdec = int(iss_decval) * 10
else:
iss = iss_b4dec + "." + iss_decval.rstrip('0')
issdec = int(iss_decval.rstrip('0')) * 10
issueno = iss_b4dec
else:
iss = issuenum
issueno = iss
# issue zero-suppression here
if mylar.CONFIG.ZERO_LEVEL == "0":
zeroadd = ""
else:
if mylar.CONFIG.ZERO_LEVEL_N == "none": zeroadd = ""
elif mylar.CONFIG.ZERO_LEVEL_N == "0x": zeroadd = "0"
elif mylar.CONFIG.ZERO_LEVEL_N == "00x": zeroadd = "00"
logger.fdebug('Zero Suppression set to : ' + str(mylar.CONFIG.ZERO_LEVEL_N))
prettycomiss = None
if issueno.isalpha():
logger.fdebug('issue detected as an alpha.')
prettycomiss = str(issueno)
else:
try:
x = float(issuenum)
#validity check
if x < 0:
logger.info('I\'ve encountered a negative issue #: %s. Trying to accomodate.' % issueno)
prettycomiss = '-' + str(zeroadd) + str(issueno[1:])
elif x == 9999999999:
logger.fdebug('Infinity issue found.')
issuenum = 'infinity'
elif x >= 0:
pass
else:
raise ValueError
except ValueError, e:
logger.warn('Unable to properly determine issue number [ %s] - you should probably log this on github for help.' % issueno)
return
if prettycomiss is None and len(str(issueno)) > 0:
#if int(issueno) < 0:
# self._log("issue detected is a negative")
# prettycomiss = '-' + str(zeroadd) + str(abs(issueno))
if int(issueno) < 10:
logger.fdebug('issue detected less than 10')
if '.' in iss:
if int(iss_decval) > 0:
issueno = str(iss)
prettycomiss = str(zeroadd) + str(iss)
else:
prettycomiss = str(zeroadd) + str(int(issueno))
else:
prettycomiss = str(zeroadd) + str(iss)
if issue_except != 'None':
prettycomiss = str(prettycomiss) + issue_except
logger.fdebug('Zero level supplement set to ' + str(mylar.CONFIG.ZERO_LEVEL_N) + '. Issue will be set as : ' + str(prettycomiss))
elif int(issueno) >= 10 and int(issueno) < 100:
logger.fdebug('issue detected greater than 10, but less than 100')
if mylar.CONFIG.ZERO_LEVEL_N == "none":
zeroadd = ""
else:
zeroadd = "0"
if '.' in iss:
if int(iss_decval) > 0:
issueno = str(iss)
prettycomiss = str(zeroadd) + str(iss)
else:
prettycomiss = str(zeroadd) + str(int(issueno))
else:
prettycomiss = str(zeroadd) + str(iss)
if issue_except != 'None':
prettycomiss = str(prettycomiss) + issue_except
logger.fdebug('Zero level supplement set to ' + str(mylar.CONFIG.ZERO_LEVEL_N) + '.Issue will be set as : ' + str(prettycomiss))
else:
logger.fdebug('issue detected greater than 100')
if issuenum == 'infinity':
prettycomiss = 'infinity'
else:
if '.' in iss:
if int(iss_decval) > 0:
issueno = str(iss)
prettycomiss = str(issueno)
if issue_except != 'None':
prettycomiss = str(prettycomiss) + issue_except
logger.fdebug('Zero level supplement set to ' + str(mylar.CONFIG.ZERO_LEVEL_N) + '. Issue will be set as : ' + str(prettycomiss))
elif len(str(issueno)) == 0:
prettycomiss = str(issueno)
logger.fdebug('issue length error - cannot determine length. Defaulting to None: ' + str(prettycomiss))
logger.fdebug('Pretty Comic Issue is : ' + str(prettycomiss))
if mylar.CONFIG.UNICODE_ISSUENUMBER:
logger.fdebug('Setting this to Unicode format as requested: %s' % prettycomiss)
prettycomiss = unicodeissue
issueyear = issuedate[:4]
month = issuedate[5:7].replace('-', '').strip()
month_name = fullmonth(month)
if month_name is None:
month_name = 'None'
logger.fdebug('Issue Year : ' + str(issueyear))
logger.fdebug('Publisher: ' + publisher)
logger.fdebug('Series: ' + series)
logger.fdebug('Year: ' + str(seriesyear))
logger.fdebug('Comic Location: ' + comlocation)
if comversion is None:
comversion = 'None'
#if comversion is None, remove it so it doesn't populate with 'None'
if comversion == 'None':
chunk_f_f = re.sub('\$VolumeN', '', mylar.CONFIG.FILE_FORMAT)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug('No version # found for series, removing from filename')
logger.fdebug("new format: " + str(chunk_file_format))
else:
chunk_file_format = mylar.CONFIG.FILE_FORMAT
if annualize is None:
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug('not an annual - removing from filename paramaters')
logger.fdebug('new format: ' + str(chunk_file_format))
else:
logger.fdebug('chunk_file_format is: ' + str(chunk_file_format))
if mylar.CONFIG.ANNUALS_ON:
if 'annual' in series.lower():
if '$Annual' not in chunk_file_format: # and 'annual' not in ofilename.lower():
#if it's an annual, but $annual isn't specified in file_format, we need to
#force it in there, by default in the format of $Annual $Issue
#prettycomiss = "Annual " + str(prettycomiss)
logger.fdebug('[%s][ANNUALS-ON][ANNUAL IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
else:
#because it exists within title, strip it then use formatting tag for placement of wording.
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug('[%s][ANNUALS-ON][ANNUAL IN SERIES][ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
else:
if '$Annual' not in chunk_file_format: # and 'annual' not in ofilename.lower():
#if it's an annual, but $annual isn't specified in file_format, we need to
#force it in there, by default in the format of $Annual $Issue
prettycomiss = "Annual %s" % prettycomiss
logger.fdebug('[%s][ANNUALS-ON][ANNUAL NOT IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
else:
logger.fdebug('[%s][ANNUALS-ON][ANNUAL NOT IN SERIES][ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
else:
#if annuals aren't enabled, then annuals are being tracked as independent series.
#annualize will be true since it's an annual in the seriesname.
if 'annual' in series.lower():
if '$Annual' not in chunk_file_format: # and 'annual' not in ofilename.lower():
#if it's an annual, but $annual isn't specified in file_format, we need to
#force it in there, by default in the format of $Annual $Issue
#prettycomiss = "Annual " + str(prettycomiss)
logger.fdebug('[%s][ANNUALS-OFF][ANNUAL IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s' (series, prettycomiss))
else:
#because it exists within title, strip it then use formatting tag for placement of wording.
chunk_f_f = re.sub('\$Annual', '', chunk_file_format)
chunk_f = re.compile(r'\s+')
chunk_file_format = chunk_f.sub(' ', chunk_f_f)
logger.fdebug('[%s][ANNUALS-OFF][ANNUAL IN SERIES][ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
else:
if '$Annual' not in chunk_file_format: # and 'annual' not in ofilename.lower():
#if it's an annual, but $annual isn't specified in file_format, we need to
#force it in there, by default in the format of $Annual $Issue
prettycomiss = "Annual %s" % prettycomiss
logger.fdebug('[%s][ANNUALS-OFF][ANNUAL NOT IN SERIES][NO ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
else:
logger.fdebug('[%s][ANNUALS-OFF][ANNUAL NOT IN SERIES][ANNUAL FORMAT] prettycomiss: %s' % (series, prettycomiss))
logger.fdebug('Annual detected within series title of ' + series + '. Not auto-correcting issue #')
seriesfilename = seriesfilename.encode('ascii', 'ignore').strip()
filebad = [':', ',', '/', '?', '!', '\'', '\"', '\*'] #in u_comicname or '/' in u_comicname or ',' in u_comicname or '?' in u_comicname:
for dbd in filebad:
if dbd in seriesfilename:
if any([dbd == '/', dbd == '*']):
repthechar = '-'
else:
repthechar = ''
seriesfilename = seriesfilename.replace(dbd, repthechar)
logger.fdebug('Altering series name due to filenaming restrictions: ' + seriesfilename)
publisher = re.sub('!', '', publisher)
file_values = {'$Series': seriesfilename,
'$Issue': prettycomiss,
'$Year': issueyear,
'$series': series.lower(),
'$Publisher': publisher,
'$publisher': publisher.lower(),
'$VolumeY': 'V' + str(seriesyear),
'$VolumeN': comversion,
'$monthname': month_name,
'$month': month,
'$Annual': 'Annual'
}
extensions = ('.cbr', '.cbz', '.cb7')
if ofilename.lower().endswith(extensions):
path, ext = os.path.splitext(ofilename)
if mylar.CONFIG.FILE_FORMAT == '':
logger.fdebug('Rename Files is not enabled - keeping original filename.')
#check if extension is in nzb_name - will screw up otherwise
if ofilename.lower().endswith(extensions):
nfilename = ofilename[:-4]
else:
nfilename = ofilename
else:
nfilename = replace_all(chunk_file_format, file_values)
if mylar.CONFIG.REPLACE_SPACES:
#mylar.CONFIG.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
nfilename = nfilename.replace(' ', mylar.CONFIG.REPLACE_CHAR)
nfilename = re.sub('[\,\:]', '', nfilename) + ext.lower()
logger.fdebug('New Filename: ' + nfilename)
if mylar.CONFIG.LOWERCASE_FILENAMES:
nfilename = nfilename.lower()
dst = os.path.join(comlocation, nfilename)
else:
dst = os.path.join(comlocation, nfilename)
logger.fdebug('Source: ' + ofilename)
logger.fdebug('Destination: ' + dst)
rename_this = {"destination_dir": dst,
"nfilename": nfilename,
"issueid": issueid,
"comicid": comicid}
return rename_this
def apiremove(apistring, type):
if type == 'nzb':
value_regex = re.compile("(?<=apikey=)(?P<value>.*?)(?=$)")
#match = value_regex.search(apistring)
apiremoved = value_regex.sub("xUDONTNEEDTOKNOWTHISx", apistring)
else:
#type = $ to denote end of string
#type = & to denote up until next api variable
value_regex1 = re.compile("(?<=%26i=1%26r=)(?P<value>.*?)(?=" + str(type) +")")
#match = value_regex.search(apistring)
apiremoved1 = value_regex1.sub("xUDONTNEEDTOKNOWTHISx", apistring)
value_regex = re.compile("(?<=apikey=)(?P<value>.*?)(?=" + str(type) +")")
apiremoved = value_regex.sub("xUDONTNEEDTOKNOWTHISx", apiremoved1)
#need to remove the urlencoded-portions as well in future
return apiremoved
def remove_apikey(payd, key):
#payload = some dictionary with payload values
#key = the key to replace with REDACTED (normally apikey)
for k,v in payd.items():
payd[key] = 'REDACTED'
return payd
def ComicSort(comicorder=None, sequence=None, imported=None):
if sequence:
# if it's on startup, load the sql into a tuple for use to avoid record-locking
i = 0
#import db
myDB = db.DBConnection()
comicsort = myDB.select("SELECT * FROM comics ORDER BY ComicSortName COLLATE NOCASE")
comicorderlist = []
comicorder = {}
comicidlist = []
if sequence == 'update':
mylar.COMICSORT['SortOrder'] = None
mylar.COMICSORT['LastOrderNo'] = None
mylar.COMICSORT['LastOrderID'] = None
for csort in comicsort:
if csort['ComicID'] is None: pass
if not csort['ComicID'] in comicidlist:
if sequence == 'startup':
comicorderlist.append({
'ComicID': csort['ComicID'],
'ComicOrder': i
})
elif sequence == 'update':
comicorderlist.append({
# mylar.COMICSORT['SortOrder'].append({
'ComicID': csort['ComicID'],
'ComicOrder': i
})
comicidlist.append(csort['ComicID'])
i+=1
if sequence == 'startup':
if i == 0:
comicorder['SortOrder'] = ({'ComicID': '99999', 'ComicOrder': 1})
comicorder['LastOrderNo'] = 1
comicorder['LastOrderID'] = 99999
else:
comicorder['SortOrder'] = comicorderlist
comicorder['LastOrderNo'] = i -1
comicorder['LastOrderID'] = comicorder['SortOrder'][i -1]['ComicID']
if i < 0: i == 0
logger.info('Sucessfully ordered ' + str(i -1) + ' series in your watchlist.')
return comicorder
elif sequence == 'update':
mylar.COMICSORT['SortOrder'] = comicorderlist
#print ("i:" + str(i))
if i == 0:
placemnt = 1
else:
placemnt = int(i -1)
mylar.COMICSORT['LastOrderNo'] = placemnt
mylar.COMICSORT['LastOrderID'] = mylar.COMICSORT['SortOrder'][placemnt]['ComicID']
return
else:
# for new series adds, we already know the comicid, so we set the sortorder to an abnormally high #
# we DO NOT write to the db to avoid record-locking.
# if we get 2 999's we're in trouble though.
sortedapp = []
if comicorder['LastOrderNo'] == '999':
lastorderval = int(comicorder['LastOrderNo']) + 1
else:
lastorderval = 999
sortedapp.append({
'ComicID': imported,
'ComicOrder': lastorderval
})
mylar.COMICSORT['SortOrder'] = sortedapp
mylar.COMICSORT['LastOrderNo'] = lastorderval
mylar.COMICSORT['LastOrderID'] = imported
return
def fullmonth(monthno):
#simple numerical to worded month conversion....
basmonths = {'1': 'January', '2': 'February', '3': 'March', '4': 'April', '5': 'May', '6': 'June', '7': 'July', '8': 'August', '9': 'September', '10': 'October', '11': 'November', '12': 'December'}
monthconv = None
for numbs in basmonths:
if int(numbs) == int(monthno):
monthconv = basmonths[numbs]
return monthconv
def updateComicLocation():
#in order for this to work, the ComicLocation MUST be left at the original location.
#in the config.ini - set LOCMOVE = 1 (to enable this to run on the NEXT startup)
# - set NEWCOMDIR = new ComicLocation
#after running, set ComicLocation to new location in Configuration GUI
#import db
myDB = db.DBConnection()
if mylar.CONFIG.NEWCOM_DIR is not None:
logger.info('Performing a one-time mass update to Comic Location')
#create the root dir if it doesn't exist
checkdirectory = mylar.filechecker.validateAndCreateDirectory(mylar.CONFIG.NEWCOM_DIR, create=True)
if not checkdirectory:
logger.warn('Error trying to validate/create directory. Aborting this process at this time.')
return
dirlist = myDB.select("SELECT * FROM comics")
comloc = []
if dirlist is not None:
for dl in dirlist:
u_comicnm = dl['ComicName']
# let's remove the non-standard characters here that will break filenaming / searching.
comicname_folder = filesafe(u_comicnm)
publisher = re.sub('!', '', dl['ComicPublisher']) # thanks Boom!
year = dl['ComicYear']
if dl['Corrected_Type'] is not None:
booktype = dl['Corrected_Type']
else:
booktype = dl['Type']
if booktype == 'Print' or all([booktype != 'Print', mylar.CONFIG.FORMAT_BOOKTYPE is False]):
chunk_fb = re.sub('\$Type', '', mylar.CONFIG.FOLDER_FORMAT)
chunk_b = re.compile(r'\s+')
chunk_folder_format = chunk_b.sub(' ', chunk_fb)
else:
chunk_folder_format = mylar.CONFIG.FOLDER_FORMAT
comversion = dl['ComicVersion']
if comversion is None:
comversion = 'None'
#if comversion is None, remove it so it doesn't populate with 'None'
if comversion == 'None':
chunk_f_f = re.sub('\$VolumeN', '', mylar.CONFIG.FOLDER_FORMAT)
chunk_f = re.compile(r'\s+')
folderformat = chunk_f.sub(' ', chunk_f_f)
else:
folderformat = mylar.CONFIG.FOLDER_FORMAT
#do work to generate folder path
values = {'$Series': comicname_folder,
'$Publisher': publisher,
'$Year': year,
'$series': comicname_folder.lower(),
'$publisher': publisher.lower(),
'$VolumeY': 'V' + str(year),
'$VolumeN': comversion,
'$Annual': 'Annual',
'$Type': booktype
}
#set the paths here with the seperator removed allowing for cross-platform altering.
ccdir = re.sub(r'[\\|/]', '%&', mylar.CONFIG.NEWCOM_DIR)
ddir = re.sub(r'[\\|/]', '%&', mylar.CONFIG.DESTINATION_DIR)
dlc = re.sub(r'[\\|/]', '%&', dl['ComicLocation'])
if mylar.CONFIG.FFTONEWCOM_DIR:
#if this is enabled (1) it will apply the Folder_Format to all the new dirs
if mylar.CONFIG.FOLDER_FORMAT == '':
comlocation = re.sub(ddir, ccdir, dlc).strip()
else:
first = replace_all(folderformat, values)
if mylar.CONFIG.REPLACE_SPACES:
#mylar.CONFIG.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
first = first.replace(' ', mylar.CONFIG.REPLACE_CHAR)
comlocation = os.path.join(mylar.CONFIG.NEWCOM_DIR, first).strip()
else:
#DESTINATION_DIR = /mnt/mediavg/Comics
#NEWCOM_DIR = /mnt/mediavg/Comics/Comics-1
#dl['ComicLocation'] = /mnt/mediavg/Comics/Batman-(2011)
comlocation = re.sub(ddir, ccdir, dlc).strip()
#regenerate the new path location so that it's os.dependent now.
com_done = re.sub('%&', os.sep.encode('unicode-escape'), comlocation).strip()
comloc.append({"comlocation": com_done,
"origlocation": dl['ComicLocation'],
"comicid": dl['ComicID']})
if len(comloc) > 0:
#give the information about what we're doing.
if mylar.CONFIG.FFTONEWCOM_DIR:
logger.info('FFTONEWCOM_DIR is enabled. Applying the existing folder format to ALL directories regardless of existing location paths')
else:
logger.info('FFTONEWCOM_DIR is not enabled. I will keep existing subdirectory paths, and will only change the actual Comic Location in the path.')
logger.fdebug(' (ie. /mnt/Comics/Marvel/Hush-(2012) to /mnt/mynewLocation/Marvel/Hush-(2012) ')
#do the deed.
for cl in comloc:
ctrlVal = {"ComicID": cl['comicid']}
newVal = {"ComicLocation": cl['comlocation']}
myDB.upsert("Comics", newVal, ctrlVal)
logger.fdebug('Updated : ' + cl['origlocation'] + ' .: TO :. ' + cl['comlocation'])
logger.info('Updated ' + str(len(comloc)) + ' series to a new Comic Location as specified in the config.ini')
else:
logger.fdebug('Failed in updating the Comic Locations. Check Folder Format string and/or log the issue.')
else:
logger.info('There are no series in your watchlist to Update the locations. Not updating anything at this time.')
#set the value to 0 here so we don't keep on doing this...
mylar.CONFIG.LOCMOVE = 0
#mylar.config_write()
else:
logger.info('No new ComicLocation path specified - not updating. Set NEWCOMD_DIR in config.ini')
#raise cherrypy.HTTPRedirect("config")
return
def cleanhtml(raw_html):
#cleanr = re.compile('<.*?>')
#cleantext = re.sub(cleanr, '', raw_html)
#return cleantext
from bs4 import BeautifulSoup
VALID_TAGS = ['div', 'p']
soup = BeautifulSoup(raw_html, "html.parser")
for tag in soup.findAll('p'):
if tag.name not in VALID_TAGS:
tag.replaceWith(tag.renderContents())
flipflop = soup.renderContents()
print flipflop
return flipflop
def issuedigits(issnum):
#import db
int_issnum = None
try:
tst = issnum.isdigit()
except:
try:
isstest = str(issnum)
tst = isstest.isdigit()
except:
return 9999999999
else:
issnum = str(issnum)
if issnum.isdigit():
int_issnum = int(issnum) * 1000
else:
#count = 0
#for char in issnum:
# if char.isalpha():
# count += 1
#if count > 5:
# logger.error('This is not an issue number - not enough numerics to parse')
# int_issnum = 999999999999999
# return int_issnum
try:
if 'au' in issnum.lower() and issnum[:1].isdigit():
int_issnum = (int(issnum[:-2]) * 1000) + ord('a') + ord('u')
elif 'ai' in issnum.lower() and issnum[:1].isdigit():
int_issnum = (int(issnum[:-2]) * 1000) + ord('a') + ord('i')
elif 'inh' in issnum.lower() or 'now' in issnum.lower():
remdec = issnum.find('.') #find the decimal position.
if remdec == -1:
#if no decimal, it's all one string
#remove the last 3 characters from the issue # (INH)
int_issnum = (int(issnum[:-3]) * 1000) + ord('i') + ord('n') + ord('h')
else:
int_issnum = (int(issnum[:-4]) * 1000) + ord('i') + ord('n') + ord('h')
elif 'now' in issnum.lower():
if '!' in issnum: issnum = re.sub('\!', '', issnum)
remdec = issnum.find('.') #find the decimal position.
if remdec == -1:
#if no decimal, it's all one string
#remove the last 3 characters from the issue # (NOW)
int_issnum = (int(issnum[:-3]) * 1000) + ord('n') + ord('o') + ord('w')
else:
int_issnum = (int(issnum[:-4]) * 1000) + ord('n') + ord('o') + ord('w')
elif 'mu' in issnum.lower():
remdec = issnum.find('.')
if remdec == -1:
int_issnum = (int(issnum[:-2]) * 1000) + ord('m') + ord('u')
else:
int_issnum = (int(issnum[:-3]) * 1000) + ord('m') + ord('u')
elif 'hu' in issnum.lower():
remdec = issnum.find('.') #find the decimal position.
if remdec == -1:
int_issnum = (int(issnum[:-2]) * 1000) + ord('h') + ord('u')
else:
int_issnum = (int(issnum[:-3]) * 1000) + ord('h') + ord('u')
except ValueError as e:
logger.error('[' + issnum + '] Unable to properly determine the issue number. Error: %s', e)
return 9999999999
if int_issnum is not None:
return int_issnum
#try:
# issnum.decode('ascii')
# logger.fdebug('ascii character.')
#except:
# logger.fdebug('Unicode character detected: ' + issnum)
#else: issnum.decode(mylar.SYS_ENCODING).decode('utf-8')
if type(issnum) == str:
try:
issnum = issnum.decode('utf-8')
except:
issnum = issnum.decode('windows-1252')
if type(issnum) == unicode:
vals = {u'\xbd':.5,u'\xbc':.25,u'\xbe':.75,u'\u221e':9999999999,u'\xe2':9999999999}
else:
vals = {'\xbd':.5,'\xbc':.25,'\xbe':.75,'\u221e':9999999999,'\xe2':9999999999}
x = [vals[key] for key in vals if key in issnum]
if x:
chk = re.sub('[^0-9]', '', issnum).strip()
if len(chk) == 0:
int_issnum = x[0] * 1000
else:
int_issnum = (int(re.sub('[^0-9]', '', issnum).strip()) + x[0]) * 1000
#logger.fdebug('int_issnum: ' + str(int_issnum))
else:
if any(['.' in issnum, ',' in issnum]):
#logger.fdebug('decimal detected.')
if ',' in issnum: issnum = re.sub(',', '.', issnum)
issst = str(issnum).find('.')
if issst == 0:
issb4dec = 0
else:
issb4dec = str(issnum)[:issst]
decis = str(issnum)[issst +1:]
if len(decis) == 1:
decisval = int(decis) * 10
issaftdec = str(decisval)
elif len(decis) == 2:
decisval = int(decis)
issaftdec = str(decisval)
else:
decisval = decis
issaftdec = str(decisval)
#if there's a trailing decimal (ie. 1.50.) and it's either intentional or not, blow it away.
if issaftdec[-1:] == '.':
issaftdec = issaftdec[:-1]
try:
int_issnum = (int(issb4dec) * 1000) + (int(issaftdec) * 10)
except ValueError:
#logger.fdebug('This has no issue # for me to get - Either a Graphic Novel or one-shot.')
int_issnum = 999999999999999
else:
try:
x = float(issnum)
#logger.info(x)
#validity check
if x < 0:
#logger.info("I've encountered a negative issue #: " + str(issnum) + ". Trying to accomodate.")
int_issnum = (int(x) *1000) - 1
elif bool(x):
logger.fdebug('Infinity issue found.')
int_issnum = 9999999999 * 1000
else: raise ValueError
except ValueError, e:
#this will account for any alpha in a issue#, so long as it doesn't have decimals.
x = 0
tstord = None
issno = None
invchk = "false"
if issnum.lower() != 'preview':
while (x < len(issnum)):
if issnum[x].isalpha():
#take first occurance of alpha in string and carry it through
tstord = issnum[x:].rstrip()
tstord = re.sub('[\-\,\.\+]', '', tstord).rstrip()
issno = issnum[:x].rstrip()
issno = re.sub('[\-\,\.\+]', '', issno).rstrip()
try:
isschk = float(issno)
except ValueError, e:
if len(issnum) == 1 and issnum.isalpha():
break
logger.fdebug('[' + issno + '] Invalid numeric for issue - cannot be found. Ignoring.')
issno = None
tstord = None
invchk = "true"
break
x+=1
if tstord is not None and issno is not None:
a = 0
ordtot = 0
if len(issnum) == 1 and issnum.isalpha():
int_issnum = ord(tstord.lower())
else:
while (a < len(tstord)):
ordtot += ord(tstord[a].lower()) #lower-case the letters for simplicty
a+=1
int_issnum = (int(issno) * 1000) + ordtot
elif invchk == "true":
if any([issnum.lower() == 'fall', issnum.lower() == 'spring', issnum.lower() == 'summer', issnum.lower() == 'winter']):
inu = 0
ordtot = 0
while (inu < len(issnum)):
ordtot += ord(issnum[inu].lower()) #lower-case the letters for simplicty
inu+=1
int_issnum = ordtot
else:
logger.fdebug('this does not have an issue # that I can parse properly.')
return 999999999999999
else:
if issnum == '9-5':
issnum = u'9\xbd'
logger.fdebug('issue: 9-5 is an invalid entry. Correcting to : ' + issnum)
int_issnum = (9 * 1000) + (.5 * 1000)
elif issnum == '112/113':
int_issnum = (112 * 1000) + (.5 * 1000)
elif issnum == '14-16':
int_issnum = (15 * 1000) + (.5 * 1000)
elif issnum.lower() == 'preview':
inu = 0
ordtot = 0
while (inu < len(issnum)):
ordtot += ord(issnum[inu].lower()) #lower-case the letters for simplicty
inu+=1
int_issnum = ordtot
else:
logger.error(issnum + ' this has an alpha-numeric in the issue # which I cannot account for.')
return 999999999999999
return int_issnum
def checkthepub(ComicID):
#import db
myDB = db.DBConnection()
publishers = ['marvel', 'dc', 'darkhorse']
pubchk = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone()
if pubchk is None:
logger.fdebug('No publisher information found to aid in determining series..defaulting to base check of 55 days.')
return mylar.CONFIG.BIGGIE_PUB
else:
for publish in publishers:
if publish in pubchk['ComicPublisher'].lower():
#logger.fdebug('Biggie publisher detected - ' + pubchk['ComicPublisher'])
return mylar.CONFIG.BIGGIE_PUB
#logger.fdebug('Indie publisher detected - ' + pubchk['ComicPublisher'])
return mylar.CONFIG.INDIE_PUB
def annual_update():
#import db
myDB = db.DBConnection()
annuallist = myDB.select('SELECT * FROM annuals')
if annuallist is None:
logger.info('no annuals to update.')
return
cnames = []
#populate the ComicName field with the corresponding series name from the comics table.
for ann in annuallist:
coms = myDB.selectone('SELECT * FROM comics WHERE ComicID=?', [ann['ComicID']]).fetchone()
cnames.append({'ComicID': ann['ComicID'],
'ComicName': coms['ComicName']
})
#write in a seperate loop to avoid db locks
i=0
for cns in cnames:
ctrlVal = {"ComicID": cns['ComicID']}
newVal = {"ComicName": cns['ComicName']}
myDB.upsert("annuals", newVal, ctrlVal)
i+=1
logger.info(str(i) + ' series have been updated in the annuals table.')
return
def replacetheslash(data):
# this is necessary for the cache directory to display properly in IE/FF.
# os.path.join will pipe in the '\' in windows, which won't resolve
# when viewing through cherrypy - so convert it and viola.
if platform.system() == "Windows":
slashreplaced = data.replace('\\', '/')
else:
slashreplaced = data
return slashreplaced
def urlretrieve(urlfile, fpath):
chunk = 4096
f = open(fpath, "w")
while 1:
data = urlfile.read(chunk)
if not data:
print "done."
break
f.write(data)
print "Read %s bytes"%len(data)
def renamefile_readingorder(readorder):
logger.fdebug('readingorder#: ' + str(readorder))
if int(readorder) < 10: readord = "00" + str(readorder)
elif int(readorder) >= 10 and int(readorder) < 99: readord = "0" + str(readorder)
else: readord = str(readorder)
return readord
def latestdate_fix():
#import db
datefix = []
cnupdate = []
myDB = db.DBConnection()
comiclist = myDB.select('SELECT * FROM comics')
if comiclist is None:
logger.fdebug('No Series in watchlist to correct latest date')
return
for cl in comiclist:
if cl['ComicName_Filesafe'] is None:
cnupdate.append({"comicid": cl['ComicID'],
"comicname_filesafe": filesafe(cl['ComicName'])})
latestdate = cl['LatestDate']
#logger.fdebug("latestdate: " + str(latestdate))
try:
if latestdate[8:] == '':
#logger.fdebug("invalid date " + str(latestdate) + " appending 01 for day to avoid errors")
if len(latestdate) <= 7:
finddash = latestdate.find('-')
#logger.info('dash found at position ' + str(finddash))
if finddash != 4: #format of mm-yyyy
lat_month = latestdate[:finddash]
lat_year = latestdate[finddash +1:]
else: #format of yyyy-mm
lat_month = latestdate[finddash +1:]
lat_year = latestdate[:finddash]
latestdate = (lat_year) + '-' + str(lat_month) + '-01'
datefix.append({"comicid": cl['ComicID'],
"latestdate": latestdate})
#logger.info('latest date: ' + str(latestdate))
except:
datefix.append({"comicid": cl['ComicID'],
"latestdate": '0000-00-00'})
#now we fix.
if len(datefix) > 0:
logger.info('Preparing to correct/fix ' + str(len(datefix)) + ' series that have incorrect values given for the Latest Date field.')
for df in datefix:
newCtrl = {"ComicID": df['comicid']}
newVal = {"LatestDate": df['latestdate']}
myDB.upsert("comics", newVal, newCtrl)
if len(cnupdate) > 0:
logger.info('Preparing to update ' + str(len(cnupdate)) + ' series on your watchlist for use with non-ascii characters')
for cn in cnupdate:
newCtrl = {"ComicID": cn['comicid']}
newVal = {"ComicName_Filesafe": cn['comicname_filesafe']}
myDB.upsert("comics", newVal, newCtrl)
return
def upgrade_dynamic():
#import db
dynamic_comiclist = []
myDB = db.DBConnection()
#update the comicdb to include the Dynamic Names (and any futher changes as required)
clist = myDB.select('SELECT * FROM Comics')
for cl in clist:
cl_d = mylar.filechecker.FileChecker(watchcomic=cl['ComicName'])
cl_dyninfo = cl_d.dynamic_replace(cl['ComicName'])
dynamic_comiclist.append({'DynamicComicName': re.sub('[\|\s]','', cl_dyninfo['mod_seriesname'].lower()).strip(),
'ComicID': cl['ComicID']})
if len(dynamic_comiclist) > 0:
for dl in dynamic_comiclist:
CtrlVal = {"ComicID": dl['ComicID']}
newVal = {"DynamicComicName": dl['DynamicComicName']}
myDB.upsert("Comics", newVal, CtrlVal)
#update the storyarcsdb to include the Dynamic Names (and any futher changes as required)
dynamic_storylist = []
rlist = myDB.select('SELECT * FROM storyarcs WHERE StoryArcID is not NULL')
for rl in rlist:
rl_d = mylar.filechecker.FileChecker(watchcomic=rl['ComicName'])
rl_dyninfo = cl_d.dynamic_replace(rl['ComicName'])
dynamic_storylist.append({'DynamicComicName': re.sub('[\|\s]','', rl_dyninfo['mod_seriesname'].lower()).strip(),
'IssueArcID': rl['IssueArcID']})
if len(dynamic_storylist) > 0:
for ds in dynamic_storylist:
CtrlVal = {"IssueArcID": ds['IssueArcID']}
newVal = {"DynamicComicName": ds['DynamicComicName']}
myDB.upsert("storyarcs", newVal, CtrlVal)
logger.info('Finished updating ' + str(len(dynamic_comiclist)) + ' / ' + str(len(dynamic_storylist)) + ' entries within the db.')
mylar.CONFIG.DYNAMIC_UPDATE = 4
mylar.CONFIG.writeconfig()
return
def checkFolder(folderpath=None):
from mylar import PostProcessor
queue = Queue.Queue()
#monitor a selected folder for 'snatched' files that haven't been processed
if folderpath is None:
logger.info('Checking folder ' + mylar.CONFIG.CHECK_FOLDER + ' for newly snatched downloads')
path = mylar.CONFIG.CHECK_FOLDER
else:
logger.info('Submitted folder ' + folderpath + ' for direct folder post-processing')
path = folderpath
PostProcess = PostProcessor.PostProcessor('Manual Run', path, queue=queue)
vals = PostProcess.Process()
return
def LoadAlternateSearchNames(seriesname_alt, comicid):
#seriesname_alt = db.comics['AlternateSearch']
AS_Alt = []
Alternate_Names = {}
alt_count = 0
#logger.fdebug('seriesname_alt:' + str(seriesname_alt))
if seriesname_alt is None or seriesname_alt == 'None':
return "no results"
else:
chkthealt = seriesname_alt.split('##')
if chkthealt == 0:
AS_Alternate = seriesname_alt
AS_Alt.append(seriesname_alt)
for calt in chkthealt:
AS_Alter = re.sub('##', '', calt)
u_altsearchcomic = AS_Alter.encode('ascii', 'ignore').strip()
AS_formatrem_seriesname = re.sub('\s+', ' ', u_altsearchcomic)
if AS_formatrem_seriesname[:1] == ' ': AS_formatrem_seriesname = AS_formatrem_seriesname[1:]
AS_Alt.append({"AlternateName": AS_formatrem_seriesname})
alt_count+=1
Alternate_Names['AlternateName'] = AS_Alt
Alternate_Names['ComicID'] = comicid
Alternate_Names['Count'] = alt_count
logger.info('AlternateNames returned:' + str(Alternate_Names))
return Alternate_Names
def havetotals(refreshit=None):
#import db
comics = []
myDB = db.DBConnection()
if refreshit is None:
if mylar.CONFIG.ANNUALS_ON:
comiclist = myDB.select('SELECT comics.*, COUNT(totalAnnuals.IssueID) AS TotalAnnuals FROM comics LEFT JOIN annuals as totalAnnuals on totalAnnuals.ComicID = comics.ComicID GROUP BY comics.ComicID order by comics.ComicSortName COLLATE NOCASE')
else:
comiclist = myDB.select('SELECT * FROM comics GROUP BY ComicID order by ComicSortName COLLATE NOCASE')
else:
comiclist = []
comicref = myDB.selectone('SELECT comics.ComicID AS ComicID, comics.Have AS Have, comics.Total as Total, COUNT(totalAnnuals.IssueID) AS TotalAnnuals FROM comics LEFT JOIN annuals as totalAnnuals on totalAnnuals.ComicID = comics.ComicID WHERE comics.ComicID=? GROUP BY comics.ComicID', [refreshit]).fetchone()
#refreshit is the ComicID passed from the Refresh Series to force/check numerical have totals
comiclist.append({"ComicID": comicref['ComicID'],
"Have": comicref['Have'],
"Total": comicref['Total'],
"TotalAnnuals": comicref['TotalAnnuals']})
for comic in comiclist:
#--not sure about this part
#if comic['Total'] is None:
# if refreshit is not None:
# logger.fdebug(str(comic['ComicID']) + ' has no issuedata available. Forcing complete Refresh/Rescan')
# return True
# else:
# continue
try:
totalissues = comic['Total']
# if mylar.CONFIG.ANNUALS_ON:
# totalissues += comic['TotalAnnuals']
haveissues = comic['Have']
except TypeError:
logger.warning('[Warning] ComicID: ' + str(comic['ComicID']) + ' is incomplete - Removing from DB. You should try to re-add the series.')
myDB.action("DELETE from COMICS WHERE ComicID=? AND ComicName LIKE 'Comic ID%'", [comic['ComicID']])
myDB.action("DELETE from ISSUES WHERE ComicID=? AND ComicName LIKE 'Comic ID%'", [comic['ComicID']])
continue
if not haveissues:
havetracks = 0
if refreshit is not None:
if haveissues > totalissues:
return True # if it's 5/4, send back to updater and don't restore previous status'
else:
return False # if it's 5/5 or 4/5, send back to updater and restore previous status'
try:
percent = (haveissues *100.0) /totalissues
if percent > 100:
percent = 101
except (ZeroDivisionError, TypeError):
percent = 0
totalissues = '?'
if comic['LatestDate'] is None:
logger.warn(comic['ComicName'] + ' has not finished loading. Nulling some values so things display properly until they can populate.')
recentstatus = 'Loading'
elif comic['ComicPublished'] is None or comic['ComicPublished'] == '' or comic['LatestDate'] is None:
recentstatus = 'Unknown'
elif comic['ForceContinuing'] == 1:
recentstatus = 'Continuing'
elif 'present' in comic['ComicPublished'].lower() or (today()[:4] in comic['LatestDate']):
latestdate = comic['LatestDate']
#pull-list f'd up the date by putting '15' instead of '2015' causing 500 server errors
if '-' in latestdate[:3]:
st_date = latestdate.find('-')
st_remainder = latestdate[st_date+1:]
st_year = latestdate[:st_date]
year = '20' + st_year
latestdate = str(year) + '-' + str(st_remainder)
#logger.fdebug('year set to: ' + latestdate)
c_date = datetime.date(int(latestdate[:4]), int(latestdate[5:7]), 1)
n_date = datetime.date.today()
recentchk = (n_date - c_date).days
if comic['NewPublish'] is True:
recentstatus = 'Continuing'
else:
#do this just incase and as an extra measure of accuracy hopefully.
if recentchk < 55:
recentstatus = 'Continuing'
else:
recentstatus = 'Ended'
else:
recentstatus = 'Ended'
if recentstatus == 'Loading':
cpub = comic['ComicPublished']
else:
try:
cpub = re.sub('(N)', '', comic['ComicPublished']).strip()
except Exception as e:
logger.warn('[Error: %s] No Publisher found for %s - you probably want to Refresh the series when you get a chance.' % (e, comic['ComicName']))
cpub = None
comics.append({"ComicID": comic['ComicID'],
"ComicName": comic['ComicName'],
"ComicSortName": comic['ComicSortName'],
"ComicPublisher": comic['ComicPublisher'],
"ComicYear": comic['ComicYear'],
"ComicImage": comic['ComicImage'],
"LatestIssue": comic['LatestIssue'],
"LatestDate": comic['LatestDate'],
"ComicPublished": cpub,
"Status": comic['Status'],
"recentstatus": recentstatus,
"percent": percent,
"totalissues": totalissues,
"haveissues": haveissues,
"DateAdded": comic['LastUpdated'],
"Type": comic['Type'],
"Corrected_Type": comic['Corrected_Type']})
return comics
def filesafe(comic):
import unicodedata
if u'\u2014' in comic:
comic = re.sub(u'\u2014', ' - ', comic)
try:
u_comic = unicodedata.normalize('NFKD', comic).encode('ASCII', 'ignore').strip()
except TypeError:
u_comic = comic.encode('ASCII', 'ignore').strip()
comicname_filesafe = re.sub('[\:\'\"\,\?\!\\\]', '', u_comic)
comicname_filesafe = re.sub('[\/\*]', '-', comicname_filesafe)
return comicname_filesafe
def IssueDetails(filelocation, IssueID=None, justinfo=False):
import zipfile
from xml.dom.minidom import parseString
issuedetails = []
issuetag = None
if justinfo is False:
dstlocation = os.path.join(mylar.CONFIG.CACHE_DIR, 'temp.zip')
if filelocation.endswith('.cbz'):
logger.fdebug('CBZ file detected. Checking for .xml within file')
shutil.copy(filelocation, dstlocation)
else:
logger.fdebug('filename is not a cbz : ' + filelocation)
return
cover = "notfound"
pic_extensions = ('.jpg','.png','.webp')
modtime = os.path.getmtime(dstlocation)
low_infile = 999999
try:
with zipfile.ZipFile(dstlocation, 'r') as inzipfile:
for infile in sorted(inzipfile.namelist()):
tmp_infile = re.sub("[^0-9]","", infile).strip()
if tmp_infile == '':
pass
elif int(tmp_infile) < int(low_infile):
low_infile = tmp_infile
low_infile_name = infile
if infile == 'ComicInfo.xml':
logger.fdebug('Extracting ComicInfo.xml to display.')
dst = os.path.join(mylar.CONFIG.CACHE_DIR, 'ComicInfo.xml')
data = inzipfile.read(infile)
#print str(data)
issuetag = 'xml'
#looks for the first page and assumes it's the cover. (Alternate covers handled later on)
elif any(['000.' in infile, '00.' in infile]) and infile.endswith(pic_extensions) and cover == "notfound":
logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.')
local_file = open(os.path.join(mylar.CONFIG.CACHE_DIR, 'temp.jpg'), "wb")
local_file.write(inzipfile.read(infile))
local_file.close
cover = "found"
elif any(['00a' in infile, '00b' in infile, '00c' in infile, '00d' in infile, '00e' in infile]) and infile.endswith(pic_extensions) and cover == "notfound":
logger.fdebug('Found Alternate cover - ' + infile + ' . Extracting.')
altlist = ('00a', '00b', '00c', '00d', '00e')
for alt in altlist:
if alt in infile:
local_file = open(os.path.join(mylar.CONFIG.CACHE_DIR, 'temp.jpg'), "wb")
local_file.write(inzipfile.read(infile))
local_file.close
cover = "found"
break
elif (any(['001.jpg' in infile, '001.png' in infile, '001.webp' in infile, '01.jpg' in infile, '01.png' in infile, '01.webp' in infile]) or all(['0001' in infile, infile.endswith(pic_extensions)]) or all(['01' in infile, infile.endswith(pic_extensions)])) and cover == "notfound":
logger.fdebug('Extracting primary image ' + infile + ' as coverfile for display.')
local_file = open(os.path.join(mylar.CONFIG.CACHE_DIR, 'temp.jpg'), "wb")
local_file.write(inzipfile.read(infile))
local_file.close
cover = "found"
if cover != "found":
logger.fdebug('Invalid naming sequence for jpgs discovered. Attempting to find the lowest sequence and will use as cover (it might not work). Currently : ' + str(low_infile))
local_file = open(os.path.join(mylar.CONFIG.CACHE_DIR, 'temp.jpg'), "wb")
logger.fdebug('infile_name used for displaying: %s' % low_infile_name)
local_file.write(inzipfile.read(low_infile_name))
local_file.close
cover = "found"
except:
logger.info('ERROR. Unable to properly retrieve the cover for displaying. It\'s probably best to re-tag this file.')
return
ComicImage = os.path.join('cache', 'temp.jpg?' +str(modtime))
IssueImage = replacetheslash(ComicImage)
else:
IssueImage = "None"
try:
with zipfile.ZipFile(filelocation, 'r') as inzipfile:
for infile in sorted(inzipfile.namelist()):
if infile == 'ComicInfo.xml':
logger.fdebug('Found ComicInfo.xml - now retrieving information.')
data = inzipfile.read(infile)
issuetag = 'xml'
break
except:
logger.info('ERROR. Unable to properly retrieve the cover for displaying. It\'s probably best to re-tag this file.')
return
if issuetag is None:
data = None
try:
dz = zipfile.ZipFile(filelocation, 'r')
data = dz.comment
except:
logger.warn('Unable to extract comment field from zipfile.')
return
else:
if data:
issuetag = 'comment'
else:
logger.warn('No metadata available in zipfile comment field.')
return
logger.info('Tag returned as being: ' + str(issuetag))
#logger.info('data:' + str(data))
if issuetag == 'xml':
#import easy to use xml parser called minidom:
dom = parseString(data)
results = dom.getElementsByTagName('ComicInfo')
for result in results:
try:
issue_title = result.getElementsByTagName('Title')[0].firstChild.wholeText
except:
issue_title = "None"
try:
series_title = result.getElementsByTagName('Series')[0].firstChild.wholeText
except:
series_title = "None"
try:
series_volume = result.getElementsByTagName('Volume')[0].firstChild.wholeText
except:
series_volume = "None"
try:
issue_number = result.getElementsByTagName('Number')[0].firstChild.wholeText
except:
issue_number = "None"
try:
summary = result.getElementsByTagName('Summary')[0].firstChild.wholeText
except:
summary = "None"
if '*List' in summary:
summary_cut = summary.find('*List')
summary = summary[:summary_cut]
#check here to see if Covers exist as they will probably be misnamed when trying to determine the actual cover
# (ie. 00a.jpg / 00d.jpg - when there's a Cover A or a Cover D listed)
try:
notes = result.getElementsByTagName('Notes')[0].firstChild.wholeText #IssueID is in here
except:
notes = "None"
try:
year = result.getElementsByTagName('Year')[0].firstChild.wholeText
except:
year = "None"
try:
month = result.getElementsByTagName('Month')[0].firstChild.wholeText
except:
month = "None"
try:
day = result.getElementsByTagName('Day')[0].firstChild.wholeText
except:
day = "None"
try:
writer = result.getElementsByTagName('Writer')[0].firstChild.wholeText
except:
writer = "None"
try:
penciller = result.getElementsByTagName('Penciller')[0].firstChild.wholeText
except:
penciller = "None"
try:
inker = result.getElementsByTagName('Inker')[0].firstChild.wholeText
except:
inker = "None"
try:
colorist = result.getElementsByTagName('Colorist')[0].firstChild.wholeText
except:
colorist = "None"
try:
letterer = result.getElementsByTagName('Letterer')[0].firstChild.wholeText
except:
letterer = "None"
try:
cover_artist = result.getElementsByTagName('CoverArtist')[0].firstChild.wholeText
except:
cover_artist = "None"
try:
editor = result.getElementsByTagName('Editor')[0].firstChild.wholeText
except:
editor = "None"
try:
publisher = result.getElementsByTagName('Publisher')[0].firstChild.wholeText
except:
publisher = "None"
try:
webpage = result.getElementsByTagName('Web')[0].firstChild.wholeText
except:
webpage = "None"
try:
pagecount = result.getElementsByTagName('PageCount')[0].firstChild.wholeText
except:
pagecount = 0
#not used atm.
#to validate a front cover if it's tagged as one within the zip (some do this)
#i = 0
#try:
# pageinfo = result.getElementsByTagName('Page')[0].attributes
# if pageinfo: pageinfo_test == True
#except:
# pageinfo_test = False
#if pageinfo_test:
# while (i < int(pagecount)):
# pageinfo = result.getElementsByTagName('Page')[i].attributes
# attrib = pageinfo.getNamedItem('Image')
# #logger.fdebug('Frontcover validated as being image #: ' + str(attrib.value))
# att = pageinfo.getNamedItem('Type')
# #logger.fdebug('pageinfo: ' + str(pageinfo))
# if att.value == 'FrontCover':
# #logger.fdebug('FrontCover detected. Extracting.')
# break
# i+=1
elif issuetag == 'comment':
logger.info('CBL Tagging.')
stripline = 'Archive: ' + filelocation
data = re.sub(stripline, '', data.encode("utf-8")).strip()
if data is None or data == '':
return
import ast
ast_data = ast.literal_eval(str(data))
lastmodified = ast_data['lastModified']
dt = ast_data['ComicBookInfo/1.0']
try:
publisher = dt['publisher']
except:
publisher = None
try:
year = dt['publicationYear']
except:
year = None
try:
month = dt['publicationMonth']
except:
month = None
try:
day = dt['publicationDay']
except:
day = None
try:
issue_title = dt['title']
except:
issue_title = None
try:
series_title = dt['series']
except:
series_title = None
try:
issue_number = dt['issue']
except:
issue_number = None
try:
summary = dt['comments']
except:
summary = "None"
editor = "None"
colorist = "None"
artist = "None"
writer = "None"
letterer = "None"
cover_artist = "None"
penciller = "None"
inker = "None"
try:
series_volume = dt['volume']
except:
series_volume = None
try:
t = dt['credits']
except:
editor = None
colorist = None
artist = None
writer = None
letterer = None
cover_artist = None
penciller = None
inker = None
else:
for cl in dt['credits']:
if cl['role'] == 'Editor':
if editor == "None": editor = cl['person']
else: editor += ', ' + cl['person']
elif cl['role'] == 'Colorist':
if colorist == "None": colorist = cl['person']
else: colorist += ', ' + cl['person']
elif cl['role'] == 'Artist':
if artist == "None": artist = cl['person']
else: artist += ', ' + cl['person']
elif cl['role'] == 'Writer':
if writer == "None": writer = cl['person']
else: writer += ', ' + cl['person']
elif cl['role'] == 'Letterer':
if letterer == "None": letterer = cl['person']
else: letterer += ', ' + cl['person']
elif cl['role'] == 'Cover':
if cover_artist == "None": cover_artist = cl['person']
else: cover_artist += ', ' + cl['person']
elif cl['role'] == 'Penciller':
if penciller == "None": penciller = cl['person']
else: penciller += ', ' + cl['person']
elif cl['role'] == 'Inker':
if inker == "None": inker = cl['person']
else: inker += ', ' + cl['person']
try:
notes = dt['notes']
except:
notes = "None"
try:
webpage = dt['web']
except:
webpage = "None"
try:
pagecount = dt['pagecount']
except:
pagecount = "None"
else:
logger.warn('Unable to locate any metadata within cbz file. Tag this file and try again if necessary.')
return
issuedetails.append({"title": issue_title,
"series": series_title,
"volume": series_volume,
"issue_number": issue_number,
"summary": summary,
"notes": notes,
"year": year,
"month": month,
"day": day,
"writer": writer,
"penciller": penciller,
"inker": inker,
"colorist": colorist,
"letterer": letterer,
"cover_artist": cover_artist,
"editor": editor,
"publisher": publisher,
"webpage": webpage,
"pagecount": pagecount,
"IssueImage": IssueImage})
return issuedetails
def get_issue_title(IssueID=None, ComicID=None, IssueNumber=None, IssueArcID=None):
#import db
myDB = db.DBConnection()
if IssueID:
issue = myDB.selectone('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
if issue is None:
issue = myDB.selectone('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone()
if issue is None:
logger.fdebug('Unable to locate given IssueID within the db. Assuming Issue Title is None.')
return None
else:
issue = myDB.selectone('SELECT * FROM issues WHERE ComicID=? AND Int_IssueNumber=?', [ComicID, issuedigits(IssueNumber)]).fetchone()
if issue is None:
issue = myDB.selectone('SELECT * FROM annuals WHERE IssueID=?', [IssueID]).fetchone()
if issue is None:
if IssueArcID:
issue = myDB.selectone('SELECT * FROM readlist WHERE IssueArcID=?', [IssueArcID]).fetchone()
if issue is None:
logger.fdebug('Unable to locate given IssueID within the db. Assuming Issue Title is None.')
return None
else:
logger.fdebug('Unable to locate given IssueID within the db. Assuming Issue Title is None.')
return None
return issue['IssueName']
def int_num(s):
try:
return int(s)
except ValueError:
return float(s)
def listPull(weeknumber, year):
#import db
library = {}
myDB = db.DBConnection()
# Get individual comics
list = myDB.select("SELECT ComicID FROM Weekly WHERE weeknumber=? AND year=?", [weeknumber,year])
for row in list:
library[row['ComicID']] = row['ComicID']
return library
def listLibrary(comicid=None):
#import db
library = {}
myDB = db.DBConnection()
if comicid is None:
if mylar.CONFIG.ANNUALS_ON is True:
list = myDB.select("SELECT a.comicid, b.releasecomicid, a.status FROM Comics AS a LEFT JOIN annuals AS b on a.comicid=b.comicid group by a.comicid")
else:
list = myDB.select("SELECT comicid, status FROM Comics group by comicid")
else:
if mylar.CONFIG.ANNUALS_ON is True:
list = myDB.select("SELECT a.comicid, b.releasecomicid, a.status FROM Comics AS a LEFT JOIN annuals AS b on a.comicid=b.comicid WHERE a.comicid=? group by a.comicid", [re.sub('4050-', '', comicid).strip()])
else:
list = myDB.select("SELECT comicid, status FROM Comics WHERE comicid=? group by comicid", [re.sub('4050-', '', comicid).strip()])
for row in list:
library[row['ComicID']] = {'comicid': row['ComicID'],
'status': row['Status']}
try:
if row['ReleaseComicID'] is not None:
library[row['ReleaseComicID']] = {'comicid': row['ComicID'],
'status': row['Status']}
except:
pass
return library
def listStoryArcs():
#import db
library = {}
myDB = db.DBConnection()
# Get Distinct Arc IDs
#list = myDB.select("SELECT DISTINCT(StoryArcID) FROM storyarcs");
#for row in list:
# library[row['StoryArcID']] = row['StoryArcID']
# Get Distinct CV Arc IDs
list = myDB.select("SELECT DISTINCT(CV_ArcID) FROM storyarcs");
for row in list:
library[row['CV_ArcID']] = {'comicid': row['CV_ArcID']}
return library
def listoneoffs(weeknumber, year):
#import db
library = []
myDB = db.DBConnection()
# Get Distinct one-off issues from the pullist that have already been downloaded / snatched
list = myDB.select("SELECT DISTINCT(IssueID), Status, ComicID, ComicName, Status, IssueNumber FROM oneoffhistory WHERE weeknumber=? and year=? AND Status='Downloaded' OR Status='Snatched'", [weeknumber, year])
for row in list:
library.append({'IssueID': row['IssueID'],
'ComicID': row['ComicID'],
'ComicName': row['ComicName'],
'IssueNumber': row['IssueNumber'],
'Status': row['Status'],
'weeknumber': weeknumber,
'year': year})
return library
def manualArc(issueid, reading_order, storyarcid):
#import db
if issueid.startswith('4000-'):
issueid = issueid[5:]
myDB = db.DBConnection()
arc_chk = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=? AND NOT Manual is 'deleted'", [storyarcid])
storyarcname = arc_chk[0]['StoryArc']
storyarcissues = arc_chk[0]['TotalIssues']
iss_arcids = []
for issarc in arc_chk:
iss_arcids.append({"IssueArcID": issarc['IssueArcID'],
"IssueID": issarc['IssueID'],
"Manual": issarc['Manual'],
"ReadingOrder": issarc['ReadingOrder']})
arc_results = mylar.cv.getComic(comicid=None, type='issue', issueid=None, arcid=storyarcid, arclist='M' + str(issueid))
arcval = arc_results['issuechoice'][0]
comicname = arcval['ComicName']
st_d = mylar.filechecker.FileChecker(watchcomic=comicname)
st_dyninfo = st_d.dynamic_replace(comicname)
dynamic_name = re.sub('[\|\s]','', st_dyninfo['mod_seriesname'].lower()).strip()
issname = arcval['Issue_Name']
issid = str(arcval['IssueID'])
comicid = str(arcval['ComicID'])
cidlist = str(comicid)
st_issueid = None
manual_mod = 'added'
new_readorder = []
for aid in iss_arcids:
if aid['IssueID'] == issid:
logger.info('Issue already exists for storyarc [IssueArcID:' + aid['IssueArcID'] + '][Manual:' + aid['Manual'])
st_issueid = aid['IssueArcID']
manual_mod = aid['Manual']
if reading_order is None:
#if no reading order is given, drop in the last spot.
reading_order = len(iss_arcids) + 1
if int(aid['ReadingOrder']) >= int(reading_order):
reading_seq = int(aid['ReadingOrder']) + 1
else:
reading_seq = int(aid['ReadingOrder'])
new_readorder.append({'IssueArcID': aid['IssueArcID'],
'IssueID': aid['IssueID'],
'ReadingOrder': reading_seq})
import random
if st_issueid is None:
st_issueid = str(storyarcid) + "_" + str(random.randint(1000,9999))
issnum = arcval['Issue_Number']
issdate = str(arcval['Issue_Date'])
storedate = str(arcval['Store_Date'])
int_issnum = issuedigits(issnum)
comicid_results = mylar.cv.getComic(comicid=None, type='comicyears', comicidlist=cidlist)
seriesYear = 'None'
issuePublisher = 'None'
seriesVolume = 'None'
if issname is None:
IssueName = 'None'
else:
IssueName = issname[:70]
for cid in comicid_results:
if cid['ComicID'] == comicid:
seriesYear = cid['SeriesYear']
issuePublisher = cid['Publisher']
seriesVolume = cid['Volume']
#assume that the arc is the same
storyarcpublisher = issuePublisher
break
newCtrl = {"IssueID": issid,
"StoryArcID": storyarcid}
newVals = {"ComicID": comicid,
"IssueArcID": st_issueid,
"StoryArc": storyarcname,
"ComicName": comicname,
"Volume": seriesVolume,
"DynamicComicName": dynamic_name,
"IssueName": IssueName,
"IssueNumber": issnum,
"Publisher": storyarcpublisher,
"TotalIssues": str(int(storyarcissues) +1),
"ReadingOrder": int(reading_order), #arbitrarily set it to the last reading order sequence # just to see if it works.
"IssueDate": issdate,
"ReleaseDate": storedate,
"SeriesYear": seriesYear,
"IssuePublisher": issuePublisher,
"CV_ArcID": storyarcid,
"Int_IssueNumber": int_issnum,
"Manual": manual_mod}
myDB.upsert("storyarcs", newVals, newCtrl)
#now we resequence the reading-order to accomdate the change.
logger.info('Adding the new issue into the reading order & resequencing the order to make sure there are no sequence drops...')
new_readorder.append({'IssueArcID': st_issueid,
'IssueID': issid,
'ReadingOrder': int(reading_order)})
newrl = 0
for rl in sorted(new_readorder, key=itemgetter('ReadingOrder'), reverse=False):
if rl['ReadingOrder'] - 1 != newrl:
rorder = newrl + 1
logger.fdebug(rl['IssueID'] + ' - changing reading order seq to : ' + str(rorder))
else:
rorder = rl['ReadingOrder']
logger.fdebug(rl['IssueID'] + ' - setting reading order seq to : ' + str(rorder))
rl_ctrl = {"IssueID": rl['IssueID'],
"IssueArcID": rl['IssueArcID'],
"StoryArcID": storyarcid}
r1_new = {"ReadingOrder": rorder}
newrl = rorder
myDB.upsert("storyarcs", r1_new, rl_ctrl)
#check to see if the issue exists already so we can set the status right away.
iss_chk = myDB.selectone('SELECT * FROM issues where issueid = ?', [issueid]).fetchone()
if iss_chk is None:
logger.info('Issue is not currently in your watchlist. Setting status to Skipped')
status_change = 'Skipped'
else:
status_change = iss_chk['Status']
logger.info('Issue currently exists in your watchlist. Setting status to ' + status_change)
myDB.upsert("storyarcs", {'Status': status_change}, newCtrl)
return
def listIssues(weeknumber, year):
#import db
library = []
myDB = db.DBConnection()
# Get individual issues
list = myDB.select("SELECT issues.Status, issues.ComicID, issues.IssueID, issues.ComicName, issues.IssueDate, issues.ReleaseDate, weekly.publisher, issues.Issue_Number from weekly, issues where weekly.IssueID = issues.IssueID and weeknumber = ? and year = ?", [int(weeknumber), year])
for row in list:
if row['ReleaseDate'] is None:
tmpdate = row['IssueDate']
else:
tmpdate = row['ReleaseDate']
library.append({'ComicID': row['ComicID'],
'Status': row['Status'],
'IssueID': row['IssueID'],
'ComicName': row['ComicName'],
'Publisher': row['publisher'],
'Issue_Number': row['Issue_Number'],
'IssueYear': tmpdate})
# Add the annuals
if mylar.CONFIG.ANNUALS_ON:
list = myDB.select("SELECT annuals.Status, annuals.ComicID, annuals.ReleaseComicID, annuals.IssueID, annuals.ComicName, annuals.ReleaseDate, annuals.IssueDate, weekly.publisher, annuals.Issue_Number from weekly, annuals where weekly.IssueID = annuals.IssueID and weeknumber = ? and year = ?", [int(weeknumber), year])
for row in list:
if row['ReleaseDate'] is None:
tmpdate = row['IssueDate']
else:
tmpdate = row['ReleaseDate']
library.append({'ComicID': row['ComicID'],
'Status': row['Status'],
'IssueID': row['IssueID'],
'ComicName': row['ComicName'],
'Publisher': row['publisher'],
'Issue_Number': row['Issue_Number'],
'IssueYear': tmpdate})
#tmplist = library
#librarylist = []
#for liblist in tmplist:
# lb = myDB.select('SELECT ComicVersion, Type, ComicYear, ComicID from comics WHERE ComicID=?', [liblist['ComicID']])
# librarylist.append(liblist)
# librarylist.update({'Comic_Volume': lb['ComicVersion'],
# 'ComicYear': lb['ComicYear'],
# 'ComicType': lb['Type']})
return library
def incr_snatched(ComicID):
#import db
myDB = db.DBConnection()
incr_count = myDB.selectone("SELECT Have FROM Comics WHERE ComicID=?", [ComicID]).fetchone()
logger.fdebug('Incrementing HAVE count total to : ' + str(incr_count['Have'] + 1))
newCtrl = {"ComicID": ComicID}
newVal = {"Have": incr_count['Have'] + 1}
myDB.upsert("comics", newVal, newCtrl)
return
def duplicate_filecheck(filename, ComicID=None, IssueID=None, StoryArcID=None, rtnval=None):
#filename = the filename in question that's being checked against
#comicid = the comicid of the series that's being checked for duplication
#issueid = the issueid of the issue that's being checked for duplication
#storyarcid = the storyarcid of the issue that's being checked for duplication.
#rtnval = the return value of a previous duplicate_filecheck that's re-running against new values
#
#import db
myDB = db.DBConnection()
logger.info('[DUPECHECK] Duplicate check for ' + filename)
try:
filesz = os.path.getsize(filename)
except OSError as e:
logger.warn('[DUPECHECK] File cannot be located in location specified. Something has moved or altered the name.')
logger.warn('[DUPECHECK] Make sure if you are using ComicRN, you do not have Completed Download Handling enabled (or vice-versa). Aborting')
return
if IssueID:
dupchk = myDB.selectone("SELECT * FROM issues WHERE IssueID=?", [IssueID]).fetchone()
if dupchk is None:
dupchk = myDB.selectone("SELECT * FROM annuals WHERE IssueID=?", [IssueID]).fetchone()
if dupchk is None:
logger.info('[DUPECHECK] Unable to find corresponding Issue within the DB. Do you still have the series on your watchlist?')
return
series = myDB.selectone("SELECT * FROM comics WHERE ComicID=?", [dupchk['ComicID']]).fetchone()
#if it's a retry and the file was already snatched, the status is Snatched and won't hit the dupecheck.
#rtnval will be one of 3:
#'write' - write new file
#'dupe_file' - do not write new file as existing file is better quality
#'dupe_src' - write new file, as existing file is a lesser quality (dupe)
if dupchk['Status'] == 'Downloaded' or dupchk['Status'] == 'Archived':
try:
dupsize = dupchk['ComicSize']
except:
logger.info('[DUPECHECK] Duplication detection returned no hits as this is a new Snatch. This is not a duplicate.')
rtnval = {'action': "write"}
logger.info('[DUPECHECK] Existing Status already set to ' + dupchk['Status'])
cid = []
if dupsize is None:
logger.info('[DUPECHECK] Existing filesize is 0 bytes as I cannot locate the orginal entry - it is probably archived.')
logger.fdebug('[DUPECHECK] Checking series for unrefreshed series syndrome (USS).')
havechk = myDB.selectone('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
if havechk:
if havechk['Have'] > havechk['Total']:
logger.info('[DUPECHECK] Series has invalid issue totals [' + str(havechk['Have']) + '/' + str(havechk['Total']) + '] Attempting to Refresh & continue post-processing this issue.')
cid.append(ComicID)
logger.fdebug('[DUPECHECK] ComicID: ' + str(ComicID))
mylar.updater.dbUpdate(ComicIDList=cid, calledfrom='dupechk')
return duplicate_filecheck(filename, ComicID, IssueID, StoryArcID)
else:
if rtnval is not None:
if rtnval['action'] == 'dont_dupe':
logger.fdebug('[DUPECHECK] File is Archived but no file can be located within the db at the specified location. Assuming this was a manual archival and will not post-process this issue.')
return rtnval
else:
rtnval = {'action': "dont_dupe"}
#file is Archived, but no entry exists in the db for the location. Assume Archived, and don't post-process.
#quick rescan of files in dir, then rerun the dup check again...
mylar.updater.forceRescan(ComicID)
chk1 = duplicate_filecheck(filename, ComicID, IssueID, StoryArcID, rtnval)
rtnval = chk1
else:
rtnval = {'action': "dupe_file",
'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])}
else:
logger.info('[DUPECHECK] Existing file within db :' + dupchk['Location'] + ' has a filesize of : ' + str(dupsize) + ' bytes.')
#keywords to force keep / delete
#this will be eventually user-controlled via the GUI once the options are enabled.
if int(dupsize) == 0:
logger.info('[DUPECHECK] Existing filesize is 0 as I cannot locate the original entry.')
if dupchk['Status'] == 'Archived':
logger.info('[DUPECHECK] Assuming issue is Archived.')
rtnval = {'action': "dupe_file",
'to_dupe': filename}
return rtnval
else:
logger.info('[DUPECHECK] Assuming 0-byte file - this one is gonna get hammered.')
logger.fdebug('[DUPECHECK] Based on duplication preferences I will retain based on : ' + mylar.CONFIG.DUPECONSTRAINT)
tmp_dupeconstraint = mylar.CONFIG.DUPECONSTRAINT
if any(['cbr' in mylar.CONFIG.DUPECONSTRAINT, 'cbz' in mylar.CONFIG.DUPECONSTRAINT]):
if 'cbr' in mylar.CONFIG.DUPECONSTRAINT:
if filename.endswith('.cbr'):
#this has to be configured in config - either retain cbr or cbz.
if dupchk['Location'].endswith('.cbr'):
logger.info('[DUPECHECK-CBR PRIORITY] [#' + dupchk['Issue_Number'] + '] BOTH files are in cbr format. Retaining the larger filesize of the two.')
tmp_dupeconstraint = 'filesize'
else:
#keep filename
logger.info('[DUPECHECK-CBR PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in file : ' + filename)
rtnval = {'action': "dupe_src",
'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])}
else:
if dupchk['Location'].endswith('.cbz'):
logger.info('[DUPECHECK-CBR PRIORITY] [#' + dupchk['Issue_Number'] + '] BOTH files are in cbz format. Retaining the larger filesize of the two.')
tmp_dupeconstraint = 'filesize'
else:
#keep filename
logger.info('[DUPECHECK-CBR PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in file : ' + dupchk['Location'])
rtnval = {'action': "dupe_file",
'to_dupe': filename}
elif 'cbz' in mylar.CONFIG.DUPECONSTRAINT:
if filename.endswith('.cbr'):
if dupchk['Location'].endswith('.cbr'):
logger.info('[DUPECHECK-CBZ PRIORITY] [#' + dupchk['Issue_Number'] + '] BOTH files are in cbr format. Retaining the larger filesize of the two.')
tmp_dupeconstraint = 'filesize'
else:
#keep filename
logger.info('[DUPECHECK-CBZ PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining currently scanned in filename : ' + dupchk['Location'])
rtnval = {'action': "dupe_file",
'to_dupe': filename}
else:
if dupchk['Location'].endswith('.cbz'):
logger.info('[DUPECHECK-CBZ PRIORITY] [#' + dupchk['Issue_Number'] + '] BOTH files are in cbz format. Retaining the larger filesize of the two.')
tmp_dupeconstraint = 'filesize'
else:
#keep filename
logger.info('[DUPECHECK-CBZ PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in filename : ' + filename)
rtnval = {'action': "dupe_src",
'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])}
if mylar.CONFIG.DUPECONSTRAINT == 'filesize' or tmp_dupeconstraint == 'filesize':
if filesz <= int(dupsize) and int(dupsize) != 0:
logger.info('[DUPECHECK-FILESIZE PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining currently scanned in filename : ' + dupchk['Location'])
rtnval = {'action': "dupe_file",
'to_dupe': filename}
else:
logger.info('[DUPECHECK-FILESIZE PRIORITY] [#' + dupchk['Issue_Number'] + '] Retaining newly scanned in filename : ' + filename)
rtnval = {'action': "dupe_src",
'to_dupe': os.path.join(series['ComicLocation'], dupchk['Location'])}
else:
logger.info('[DUPECHECK] Duplication detection returned no hits. This is not a duplicate of anything that I have scanned in as of yet.')
rtnval = {'action': "write"}
return rtnval
def create_https_certificates(ssl_cert, ssl_key):
"""
Create a pair of self-signed HTTPS certificares and store in them in
'ssl_cert' and 'ssl_key'. Method assumes pyOpenSSL is installed.
This code is stolen from SickBeard (http://github.com/midgetspy/Sick-Beard).
"""
from certgen import (TYPE_RSA, createCertificate, createCertRequest,
createKeyPair, serial)
from OpenSSL import crypto
# Create the CA Certificate
cakey = createKeyPair(TYPE_RSA, 2048)
careq = createCertRequest(cakey, CN="Certificate Authority")
cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
pkey = createKeyPair(TYPE_RSA, 2048)
req = createCertRequest(pkey, CN="Mylar")
cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
# Save the key and certificate to disk
try:
with open(ssl_key, "w") as fp:
fp.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
with open(ssl_cert, "w") as fp:
fp.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
except IOError as e:
logger.error("Error creating SSL key and certificate: %s", e)
return False
return True
def torrent_create(site, linkid, alt=None):
if any([site == '32P', site == 'TOR']):
pass
#elif site == 'TPSE':
# if alt is None:
# url = mylar.TPSEURL + 'torrent/' + str(linkid) + '.torrent'
# else:
# url = mylar.TPSEURL + 'torrent/' + str(linkid) + '.torrent'
elif site == 'DEM':
url = mylar.DEMURL + 'files/download/' + str(linkid) + '/'
elif site == 'WWT':
url = mylar.WWTURL + 'download.php'
return url
def parse_32pfeed(rssfeedline):
KEYS_32P = {}
if mylar.CONFIG.ENABLE_32P and len(rssfeedline) > 1:
userid_st = rssfeedline.find('&user')
userid_en = rssfeedline.find('&', userid_st +1)
if userid_en == -1:
USERID_32P = rssfeedline[userid_st +6:]
else:
USERID_32P = rssfeedline[userid_st +6:userid_en]
auth_st = rssfeedline.find('&auth')
auth_en = rssfeedline.find('&', auth_st +1)
if auth_en == -1:
AUTH_32P = rssfeedline[auth_st +6:]
else:
AUTH_32P = rssfeedline[auth_st +6:auth_en]
authkey_st = rssfeedline.find('&authkey')
authkey_en = rssfeedline.find('&', authkey_st +1)
if authkey_en == -1:
AUTHKEY_32P = rssfeedline[authkey_st +9:]
else:
AUTHKEY_32P = rssfeedline[authkey_st +9:authkey_en]
KEYS_32P = {"user": USERID_32P,
"auth": AUTH_32P,
"authkey": AUTHKEY_32P,
"passkey": mylar.CONFIG.PASSKEY_32P}
return KEYS_32P
def humanize_time(amount, units = 'seconds'):
def process_time(amount, units):
INTERVALS = [ 1, 60,
60*60,
60*60*24,
60*60*24*7,
60*60*24*7*4,
60*60*24*7*4*12,
60*60*24*7*4*12*100,
60*60*24*7*4*12*100*10]
NAMES = [('second', 'seconds'),
('minute', 'minutes'),
('hour', 'hours'),
('day', 'days'),
('week', 'weeks'),
('month', 'months'),
('year', 'years'),
('century', 'centuries'),
('millennium', 'millennia')]
result = []
unit = map(lambda a: a[1], NAMES).index(units)
# Convert to seconds
amount = amount * INTERVALS[unit]
for i in range(len(NAMES)-1, -1, -1):
a = amount // INTERVALS[i]
if a > 0:
result.append( (a, NAMES[i][1 % a]) )
amount -= a * INTERVALS[i]
return result
rd = process_time(int(amount), units)
cont = 0
for u in rd:
if u[0] > 0:
cont += 1
buf = ''
i = 0
for u in rd:
if u[0] > 0:
buf += "%d %s" % (u[0], u[1])
cont -= 1
if i < (len(rd)-1):
if cont > 1:
buf += ", "
else:
buf += " and "
i += 1
return buf
def issue_status(IssueID):
#import db
myDB = db.DBConnection()
IssueID = str(IssueID)
logger.fdebug('[ISSUE-STATUS] Issue Status Check for %s' % IssueID)
isschk = myDB.selectone("SELECT * FROM issues WHERE IssueID=?", [IssueID]).fetchone()
if isschk is None:
isschk = myDB.selectone("SELECT * FROM annuals WHERE IssueID=?", [IssueID]).fetchone()
if isschk is None:
isschk = myDB.selectone("SELECT * FROM storyarcs WHERE IssueArcID=?", [IssueID]).fetchone()
if isschk is None:
logger.warn('Unable to retrieve IssueID from db. This is a problem. Aborting.')
return False
if any([isschk['Status'] == 'Downloaded', isschk['Status'] == 'Snatched']):
return True
else:
return False
def crc(filename):
#memory in lieu of speed (line by line)
#prev = 0
#for eachLine in open(filename,"rb"):
# prev = zlib.crc32(eachLine, prev)
#return "%X"%(prev & 0xFFFFFFFF)
#speed in lieu of memory (file into memory entirely)
#return "%X" % (zlib.crc32(open(filename, "rb").read()) & 0xFFFFFFFF)
filename = filename.encode(mylar.SYS_ENCODING)
return hashlib.md5(filename).hexdigest()
def issue_find_ids(ComicName, ComicID, pack, IssueNumber):
#import db
myDB = db.DBConnection()
issuelist = myDB.select("SELECT * FROM issues WHERE ComicID=?", [ComicID])
if 'Annual' not in pack:
packlist = [x.strip() for x in pack.split(',')]
plist = []
pack_issues = []
for pl in packlist:
if '-' in pl:
plist.append(range(int(pl[:pl.find('-')]),int(pl[pl.find('-')+1:])+1))
else:
plist.append(int(pl))
for pi in plist:
if type(pi) == list:
for x in pi:
pack_issues.append(x)
else:
pack_issues.append(pi)
pack_issues.sort()
annualize = False
else:
#remove the annuals wording
tmp_annuals = pack[pack.find('Annual'):]
tmp_ann = re.sub('[annual/annuals/+]', '', tmp_annuals.lower()).strip()
tmp_pack = re.sub('[annual/annuals/+]', '', pack.lower()).strip()
pack_issues_numbers = re.findall(r'\d+', tmp_pack)
pack_issues = range(int(pack_issues_numbers[0]),int(pack_issues_numbers[1])+1)
annualize = True
issues = {}
issueinfo = []
Int_IssueNumber = issuedigits(IssueNumber)
valid = False
for iss in pack_issues:
int_iss = issuedigits(iss)
for xb in issuelist:
if xb['Status'] != 'Downloaded':
if xb['Int_IssueNumber'] == int_iss:
issueinfo.append({'issueid': xb['IssueID'],
'int_iss': int_iss,
'issuenumber': xb['Issue_Number']})
break
for x in issueinfo:
if Int_IssueNumber == x['int_iss']:
valid = True
break
issues['issues'] = issueinfo
if len(issues['issues']) == len(pack_issues):
logger.info('Complete issue count of ' + str(len(pack_issues)) + ' issues are available within this pack for ' + ComicName)
else:
logger.info('Issue counts are not complete (not a COMPLETE pack) for ' + ComicName)
issues['issue_range'] = pack_issues
issues['valid'] = valid
return issues
def conversion(value):
if type(value) == str:
try:
value = value.decode('utf-8')
except:
value = value.decode('windows-1252')
return value
def clean_url(url):
leading = len(url) - len(url.lstrip(' '))
ending = len(url) - len(url.rstrip(' '))
if leading >= 1:
url = url[leading:]
if ending >=1:
url = url[:-ending]
return url
def chunker(seq, size):
#returns a list from a large group of tuples by size (ie. for group in chunker(seq, 3))
return [seq[pos:pos + size] for pos in xrange(0, len(seq), size)]
def cleanHost(host, protocol = True, ssl = False, username = None, password = None):
""" Return a cleaned up host with given url options set
taken verbatim from CouchPotato
Changes protocol to https if ssl is set to True and http if ssl is set to false.
>>> cleanHost("localhost:80", ssl=True)
'https://localhost:80/'
>>> cleanHost("localhost:80", ssl=False)
'http://localhost:80/'
Username and password is managed with the username and password variables
>>> cleanHost("localhost:80", username="user", password="passwd")
'http://user:passwd@localhost:80/'
Output without scheme (protocol) can be forced with protocol=False
>>> cleanHost("localhost:80", protocol=False)
'localhost:80'
"""
if not '://' in host and protocol:
host = ('https://' if ssl else 'http://') + host
if not protocol:
host = host.split('://', 1)[-1]
if protocol and username and password:
try:
auth = re.findall('^(?:.+?//)(.+?):(.+?)@(?:.+)$', host)
if auth:
log.error('Cleanhost error: auth already defined in url: %s, please remove BasicAuth from url.', host)
else:
host = host.replace('://', '://%s:%s@' % (username, password), 1)
except:
pass
host = host.rstrip('/ ')
if protocol:
host += '/'
return host
def checkthe_id(comicid=None, up_vals=None):
#import db
myDB = db.DBConnection()
if not up_vals:
chk = myDB.selectone("SELECT * from ref32p WHERE ComicID=?", [comicid]).fetchone()
if chk is None:
return None
else:
#if updated time hasn't been set or it's > 24 hrs, blank the entry so we can make sure we pull an updated groupid from 32p
if chk['Updated'] is None:
logger.fdebug('Reference found for 32p - but the id has never been verified after populating. Verifying it is still the right id before proceeding.')
return None
else:
c_obj_date = datetime.datetime.strptime(chk['Updated'], "%Y-%m-%d %H:%M:%S")
n_date = datetime.datetime.now()
absdiff = abs(n_date - c_obj_date)
hours = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 3600.0
if hours >= 24:
logger.fdebug('Reference found for 32p - but older than 24hours since last checked. Verifying it is still the right id before proceeding.')
return None
else:
return {'id': chk['ID'],
'series': chk['Series']}
else:
ctrlVal = {'ComicID': comicid}
newVal = {'Series': up_vals[0]['series'],
'ID': up_vals[0]['id'],
'Updated': now()}
myDB.upsert("ref32p", newVal, ctrlVal)
def updatearc_locs(storyarcid, issues):
#import db
myDB = db.DBConnection()
issuelist = []
for x in issues:
issuelist.append(x['IssueID'])
tmpsql = "SELECT a.comicid, a.comiclocation, b.comicid, b.status, b.issueid, b.location FROM comics as a INNER JOIN issues as b ON a.comicid = b.comicid WHERE b.issueid in ({seq})".format(seq=','.join(['?'] *(len(issuelist))))
chkthis = myDB.select(tmpsql, issuelist)
update_iss = []
if chkthis is None:
return
else:
for chk in chkthis:
if chk['Status'] == 'Downloaded':
pathsrc = os.path.join(chk['ComicLocation'], chk['Location'])
if not os.path.exists(pathsrc):
try:
if all([mylar.CONFIG.MULTIPLE_DEST_DIRS is not None, mylar.CONFIG.MULTIPLE_DEST_DIRS != 'None', os.path.join(mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(chk['ComicLocation'])) != chk['ComicLocation'], os.path.exists(os.path.join(mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(chk['ComicLocation'])))]):
pathsrc = os.path.join(mylar.CONFIG.MULTIPLE_DEST_DIRS, os.path.basename(chk['ComicLocation']), chk['Location'])
else:
logger.fdebug(module + ' file does not exist in location: ' + pathdir + '. Cannot valid location - some options will not be available for this item.')
continue
except:
continue
# update_iss.append({'IssueID': chk['IssueID'],
# 'Location': pathdir})
arcinfo = None
for la in issues:
if la['IssueID'] == chk['IssueID']:
arcinfo = la
break
if arcinfo is None:
continue
if arcinfo['Publisher'] is None:
arcpub = arcinfo['IssuePublisher']
else:
arcpub = arcinfo['Publisher']
grdst = arcformat(arcinfo['StoryArc'], spantheyears(arcinfo['StoryArcID']), arcpub)
if grdst is not None:
logger.info('grdst:' + grdst)
#send to renamer here if valid.
dfilename = chk['Location']
if mylar.CONFIG.RENAME_FILES:
renamed_file = rename_param(arcinfo['ComicID'], arcinfo['ComicName'], arcinfo['IssueNumber'], chk['Location'], issueid=arcinfo['IssueID'], arc=arcinfo['StoryArc'])
if renamed_file:
dfilename = renamed_file['nfilename']
if mylar.CONFIG.READ2FILENAME:
#logger.fdebug('readingorder#: ' + str(arcinfo['ReadingOrder']))
#if int(arcinfo['ReadingOrder']) < 10: readord = "00" + str(arcinfo['ReadingOrder'])
#elif int(arcinfo['ReadingOrder']) >= 10 and int(arcinfo['ReadingOrder']) <= 99: readord = "0" + str(arcinfo['ReadingOrder'])
#else: readord = str(arcinfo['ReadingOrder'])
readord = renamefile_readingorder(arcinfo['ReadingOrder'])
dfilename = str(readord) + "-" + dfilename
pathdst = os.path.join(grdst, dfilename)
logger.fdebug('Destination Path : ' + pathdst)
logger.fdebug('Source Path : ' + pathsrc)
if not os.path.isdir(grdst):
logger.fdebug('[ARC-DIRECTORY] Arc directory doesn\'t exist. Creating: %s' % grdst)
mylar.filechecker.validateAndCreateDirectory(grdst, create=True)
if not os.path.isfile(pathdst):
logger.info('[' + mylar.CONFIG.ARC_FILEOPS.upper() + '] ' + pathsrc + ' into directory : ' + pathdst)
try:
#need to ensure that src is pointing to the series in order to do a soft/hard-link properly
fileoperation = file_ops(pathsrc, pathdst, arc=True)
if not fileoperation:
raise OSError
except (OSError, IOError):
logger.fdebug('[' + mylar.CONFIG.ARC_FILEOPS.upper() + '] Failure ' + pathsrc + ' - check directories and manually re-run.')
continue
updateloc = pathdst
else:
updateloc = pathsrc
update_iss.append({'IssueID': chk['IssueID'],
'Location': updateloc})
for ui in update_iss:
logger.info(ui['IssueID'] + ' to update location to: ' + ui['Location'])
myDB.upsert("storyarcs", {'Location': ui['Location']}, {'IssueID': ui['IssueID'], 'StoryArcID': storyarcid})
def spantheyears(storyarcid):
#import db
myDB = db.DBConnection()
totalcnt = myDB.select("SELECT * FROM storyarcs WHERE StoryArcID=?", [storyarcid])
lowyear = 9999
maxyear = 0
for la in totalcnt:
if la['IssueDate'] is None or la['IssueDate'] == '0000-00-00':
continue
else:
if int(la['IssueDate'][:4]) > maxyear:
maxyear = int(la['IssueDate'][:4])
if int(la['IssueDate'][:4]) < lowyear:
lowyear = int(la['IssueDate'][:4])
if maxyear == 0:
spanyears = la['SeriesYear']
elif lowyear == maxyear:
spanyears = str(maxyear)
else:
spanyears = str(lowyear) + ' - ' + str(maxyear) #la['SeriesYear'] + ' - ' + str(maxyear)
return spanyears
def arcformat(arc, spanyears, publisher):
arcdir = filesafe(arc)
if publisher is None:
publisher = 'None'
values = {'$arc': arcdir,
'$spanyears': spanyears,
'$publisher': publisher}
tmp_folderformat = mylar.CONFIG.ARC_FOLDERFORMAT
if publisher == 'None':
chunk_f_f = re.sub('\$publisher', '', tmp_folderformat)
chunk_f = re.compile(r'\s+')
tmp_folderformat = chunk_f.sub(' ', chunk_f_f)
if any([tmp_folderformat == '', tmp_folderformat is None]):
arcpath = arcdir
else:
arcpath = replace_all(tmp_folderformat, values)
if mylar.CONFIG.REPLACE_SPACES:
arcpath = arcpath.replace(' ', mylar.CONFIG.REPLACE_CHAR)
if arcpath.startswith('/'):
arcpath = arcpath[1:]
elif arcpath.startswith('//'):
arcpath = arcpath[2:]
if mylar.CONFIG.STORYARCDIR is True:
dstloc = os.path.join(mylar.CONFIG.DESTINATION_DIR, 'StoryArcs', arcpath)
elif mylar.CONFIG.COPY2ARCDIR is True:
logger.warn('Story arc directory is not configured. Defaulting to grabbag directory: ' + mylar.CONFIG.GRABBAG_DIR)
dstloc = os.path.join(mylar.CONFIG.GRABBAG_DIR, arcpath)
else:
dstloc = None
return dstloc
def torrentinfo(issueid=None, torrent_hash=None, download=False, monitor=False):
#import db
from base64 import b16encode, b32decode
#check the status of the issueid to make sure it's in Snatched status and was grabbed via torrent.
if issueid:
myDB = db.DBConnection()
cinfo = myDB.selectone('SELECT a.Issue_Number, a.ComicName, a.Status, b.Hash from issues as a inner join snatched as b ON a.IssueID=b.IssueID WHERE a.IssueID=?', [issueid]).fetchone()
if cinfo is None:
logger.warn('Unable to locate IssueID of : ' + issueid)
snatch_status = 'ERROR'
if cinfo['Status'] != 'Snatched' or cinfo['Hash'] is None:
logger.warn(cinfo['ComicName'] + ' #' + cinfo['Issue_Number'] + ' is currently in a ' + cinfo['Status'] + ' Status.')
snatch_status = 'ERROR'
torrent_hash = cinfo['Hash']
logger.fdebug("Working on torrent: " + torrent_hash)
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
if not len(torrent_hash) == 40:
logger.error("Torrent hash is missing, or an invalid hash value has been passed")
snatch_status = 'ERROR'
else:
if mylar.USE_RTORRENT:
import test
rp = test.RTorrent()
torrent_info = rp.main(torrent_hash, check=True)
elif mylar.USE_DELUGE:
#need to set the connect here as well....
import torrent.clients.deluge as delu
dp = delu.TorrentClient()
if not dp.connect(mylar.CONFIG.DELUGE_HOST, mylar.CONFIG.DELUGE_USERNAME, mylar.CONFIG.DELUGE_PASSWORD):
logger.warn('Not connected to Deluge!')
torrent_info = dp.get_torrent(torrent_hash)
else:
snatch_status = 'ERROR'
return
logger.info('torrent_info: %s' % torrent_info)
if torrent_info is False or len(torrent_info) == 0:
logger.warn('torrent returned no information. Check logs - aborting auto-snatch at this time.')
snatch_status = 'ERROR'
else:
if mylar.USE_DELUGE:
torrent_status = torrent_info['is_finished']
torrent_files = torrent_info['num_files']
torrent_folder = torrent_info['save_path']
torrent_info['total_filesize'] = torrent_info['total_size']
torrent_info['upload_total'] = torrent_info['total_uploaded']
torrent_info['download_total'] = torrent_info['total_payload_download']
torrent_info['time_started'] = torrent_info['time_added']
elif mylar.USE_RTORRENT:
torrent_status = torrent_info['completed']
torrent_files = len(torrent_info['files'])
torrent_folder = torrent_info['folder']
if all([torrent_status is True, download is True]):
if not issueid:
torrent_info['snatch_status'] = 'STARTING...'
#yield torrent_info
import shlex
import subprocess
logger.info('Torrent is completed and status is currently Snatched. Attempting to auto-retrieve.')
with open(mylar.CONFIG.AUTO_SNATCH_SCRIPT, 'r') as f:
first_line = f.readline()
if mylar.CONFIG.AUTO_SNATCH_SCRIPT.endswith('.sh'):
shell_cmd = re.sub('#!', '', first_line)
if shell_cmd == '' or shell_cmd is None:
shell_cmd = '/bin/bash'
else:
shell_cmd = sys.executable
curScriptName = shell_cmd + ' ' + str(mylar.CONFIG.AUTO_SNATCH_SCRIPT).decode("string_escape")
if torrent_files > 1:
downlocation = torrent_folder.encode('utf-8')
else:
if mylar.USE_DELUGE:
downlocation = os.path.join(torrent_folder.encode('utf-8'), torrent_info['files'][0]['path'])
else:
downlocation = torrent_info['files'][0].encode('utf-8')
autosnatch_env = os.environ.copy()
autosnatch_env['downlocation'] = re.sub("'", "\\'",downlocation)
#these are pulled from the config and are the ssh values to use to retrieve the data
autosnatch_env['host'] = mylar.CONFIG.PP_SSHHOST
autosnatch_env['port'] = mylar.CONFIG.PP_SSHPORT
autosnatch_env['user'] = mylar.CONFIG.PP_SSHUSER
autosnatch_env['localcd'] = mylar.CONFIG.PP_SSHLOCALCD
#bash won't accept None, so send check and send empty strings for the 2 possible None values if needed
if mylar.CONFIG.PP_SSHKEYFILE is not None:
autosnatch_env['keyfile'] = mylar.CONFIG.PP_SSHKEYFILE
else:
autosnatch_env['keyfile'] = ''
if mylar.CONFIG.PP_SSHPASSWD is not None:
autosnatch_env['passwd'] = mylar.CONFIG.PP_SSHPASSWD
else:
autosnatch_env['passwd'] = ''
#downlocation = re.sub("\'", "\\'", downlocation)
#downlocation = re.sub("&", "\&", downlocation)
script_cmd = shlex.split(curScriptName, posix=False) # + [downlocation]
logger.fdebug(u"Executing command " +str(script_cmd))
try:
p = subprocess.Popen(script_cmd, env=dict(autosnatch_env), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=mylar.PROG_DIR)
out, err = p.communicate()
logger.fdebug(u"Script result: " + out)
except OSError, e:
logger.warn(u"Unable to run extra_script: " + e)
snatch_status = 'ERROR'
else:
if 'Access failed: No such file' in out:
logger.fdebug('Not located in location it is supposed to be in - probably has been moved by some script and I got the wrong location due to timing. Trying again...')
snatch_status = 'IN PROGRESS'
else:
snatch_status = 'COMPLETED'
torrent_info['completed'] = torrent_status
torrent_info['files'] = torrent_files
torrent_info['folder'] = torrent_folder
else:
if download is True:
snatch_status = 'IN PROGRESS'
elif monitor is True:
#pause the torrent, copy it to the cache folder, unpause the torrent and return the complete path to the cache location
if mylar.USE_DELUGE:
pauseit = dp.stop_torrent(torrent_hash)
if pauseit is False:
logger.warn('Unable to pause torrent - cannot run post-process on item at this time.')
snatch_status = 'MONITOR FAIL'
else:
try:
new_filepath = os.path.join(torrent_path, '.copy')
logger.fdebug('New_Filepath: %s' % new_filepath)
shutil.copy(torrent_path, new_filepath)
torrent_info['copied_filepath'] = new_filepath
except:
logger.warn('Unexpected Error: %s' % sys.exc_info()[0])
logger.warn('Unable to create temporary directory to perform meta-tagging. Processing cannot continue with given item at this time.')
torrent_info['copied_filepath'] = torrent_path
SNATCH_STATUS = 'MONITOR FAIL'
else:
startit = dp.start_torrent(torrent_hash)
SNATCH_STATUS = 'MONITOR COMPLETE'
else:
snatch_status = 'NOT SNATCHED'
torrent_info['snatch_status'] = snatch_status
return torrent_info
def weekly_info(week=None, year=None, current=None):
#find the current week and save it as a reference point.
todaydate = datetime.datetime.today()
current_weeknumber = todaydate.strftime("%U")
if current is not None:
c_weeknumber = int(current[:current.find('-')])
c_weekyear = int(current[current.find('-')+1:])
else:
c_weeknumber = week
c_weekyear = year
if week:
weeknumber = int(week)
year = int(year)
#monkey patch for 2018/2019 - week 52/week 0
if all([weeknumber == 52, c_weeknumber == 51, c_weekyear == 2018]):
weeknumber = 0
year = 2019
elif all([weeknumber == 52, c_weeknumber == 0, c_weekyear == 2019]):
weeknumber = 51
year = 2018
#monkey patch for 2019/2020 - week 52/week 0
if all([weeknumber == 52, c_weeknumber == 51, c_weekyear == 2019]):
weeknumber = 0
year = 2020
elif all([weeknumber == 52, c_weeknumber == 0, c_weekyear == 2020]):
weeknumber = 51
year = 2019
#view specific week (prev_week, next_week)
startofyear = date(year,1,1)
week0 = startofyear - timedelta(days=startofyear.isoweekday())
stweek = datetime.datetime.strptime(week0.strftime('%Y-%m-%d'), '%Y-%m-%d')
startweek = stweek + timedelta(weeks = weeknumber)
midweek = startweek + timedelta(days = 3)
endweek = startweek + timedelta(days = 6)
else:
#find the given week number for the current day
weeknumber = current_weeknumber
year = todaydate.strftime("%Y")
#monkey patch for 2018/2019 - week 52/week 0
if all([weeknumber == 52, c_weeknumber == 51, c_weekyear == 2018]):
weeknumber = 0
year = 2019
elif all([weeknumber == 52, c_weeknumber == 0, c_weekyear == 2019]):
weeknumber = 51
year = 2018
#monkey patch for 2019/2020 - week 52/week 0
if all([weeknumber == 52, c_weeknumber == 51, c_weekyear == 2019]) or all([weeknumber == '52', year == '2019']):
weeknumber = 0
year = 2020
elif all([weeknumber == 52, c_weeknumber == 0, c_weekyear == 2020]):
weeknumber = 51
year = 2019
stweek = datetime.datetime.strptime(todaydate.strftime('%Y-%m-%d'), '%Y-%m-%d')
startweek = stweek - timedelta(days = (stweek.weekday() + 1) % 7)
midweek = startweek + timedelta(days = 3)
endweek = startweek + timedelta(days = 6)
prev_week = int(weeknumber) - 1
prev_year = year
if prev_week < 0:
prev_week = 52
prev_year = int(year) - 1
next_week = int(weeknumber) + 1
next_year = year
if next_week > 52:
next_year = int(year) + 1
next_week = datetime.date(int(next_year),1,1).strftime("%U")
date_fmt = "%B %d, %Y"
try:
con_startweek = u"" + startweek.strftime(date_fmt).decode('utf-8')
con_endweek = u"" + endweek.strftime(date_fmt).decode('utf-8')
except:
con_startweek = u"" + startweek.strftime(date_fmt).decode('cp1252')
con_endweek = u"" + endweek.strftime(date_fmt).decode('cp1252')
if mylar.CONFIG.WEEKFOLDER_LOC is not None:
weekdst = mylar.CONFIG.WEEKFOLDER_LOC
else:
weekdst = mylar.CONFIG.DESTINATION_DIR
if mylar.SCHED_WEEKLY_LAST is not None:
weekly_stamp = datetime.datetime.fromtimestamp(mylar.SCHED_WEEKLY_LAST)
weekly_last = weekly_stamp.replace(microsecond=0)
else:
weekly_last = 'None'
weekinfo = {'weeknumber': weeknumber,
'startweek': con_startweek,
'midweek': midweek.strftime('%Y-%m-%d'),
'endweek': con_endweek,
'year': year,
'prev_weeknumber': prev_week,
'prev_year': prev_year,
'next_weeknumber': next_week,
'next_year': next_year,
'current_weeknumber': current_weeknumber,
'last_update': weekly_last}
if weekdst is not None:
if mylar.CONFIG.WEEKFOLDER_FORMAT == 0:
weekn = weeknumber
if len(str(weekn)) == 1:
weekn = '%s%s' % ('0', str(weekn))
weekfold = os.path.join(weekdst, '%s-%s' % (weekinfo['year'], weekn))
else:
weekfold = os.path.join(weekdst, str( str(weekinfo['midweek']) ))
else:
weekfold = None
weekinfo['week_folder'] = weekfold
return weekinfo
def latestdate_update():
#import db
myDB = db.DBConnection()
ccheck = myDB.select('SELECT a.ComicID, b.IssueID, a.LatestDate, b.ReleaseDate, b.Issue_Number from comics as a left join issues as b on a.comicid=b.comicid where a.LatestDate < b.ReleaseDate or a.LatestDate like "%Unknown%" group by a.ComicID')
if ccheck is None or len(ccheck) == 0:
return
logger.info('Now preparing to update ' + str(len(ccheck)) + ' series that have out-of-date latest date information.')
ablist = []
for cc in ccheck:
ablist.append({'ComicID': cc['ComicID'],
'LatestDate': cc['ReleaseDate'],
'LatestIssue': cc['Issue_Number']})
#forcibly set the latest date and issue number to the most recent.
for a in ablist:
logger.info(a)
newVal = {'LatestDate': a['LatestDate'],
'LatestIssue': a['LatestIssue']}
ctrlVal = {'ComicID': a['ComicID']}
logger.info('updating latest date for : ' + a['ComicID'] + ' to ' + a['LatestDate'] + ' #' + a['LatestIssue'])
myDB.upsert("comics", newVal, ctrlVal)
def ddl_downloader(queue):
myDB = db.DBConnection()
while True:
if mylar.DDL_LOCK is True:
time.sleep(5)
elif mylar.DDL_LOCK is False and queue.qsize() >= 1:
item = queue.get(True)
if item == 'exit':
logger.info('Cleaning up workers for shutdown')
break
logger.info('Now loading request from DDL queue: %s' % item['series'])
#write this to the table so we have a record of what's going on.
ctrlval = {'id': item['id']}
val = {'status': 'Downloading',
'updated_date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M')}
myDB.upsert('ddl_info', val, ctrlval)
ddz = getcomics.GC()
ddzstat = ddz.downloadit(item['id'], item['link'], item['mainlink'], item['resume'])
if ddzstat['success'] is True:
tdnow = datetime.datetime.now()
nval = {'status': 'Completed',
'updated_date': tdnow.strftime('%Y-%m-%d %H:%M')}
myDB.upsert('ddl_info', nval, ctrlval)
if all([ddzstat['success'] is True, mylar.CONFIG.POST_PROCESSING is True]):
try:
if ddzstat['filename'] is None:
logger.info('%s successfully downloaded - now initiating post-processing.' % (os.path.basename(ddzstat['path'])))
mylar.PP_QUEUE.put({'nzb_name': os.path.basename(ddzstat['path']),
'nzb_folder': ddzstat['path'],
'failed': False,
'issueid': None,
'comicid': item['comicid'],
'apicall': True,
'ddl': True})
else:
logger.info('%s successfully downloaded - now initiating post-processing.' % (ddzstat['filename']))
mylar.PP_QUEUE.put({'nzb_name': ddzstat['filename'],
'nzb_folder': ddzstat['path'],
'failed': False,
'issueid': item['issueid'],
'comicid': item['comicid'],
'apicall': True,
'ddl': True})
except Exception as e:
logger.info('process error: %s [%s]' %(e, ddzstat))
elif all([ddzstat['success'] is True, mylar.CONFIG.POST_PROCESSING is False]):
logger.info('File successfully downloaded. Post Processing is not enabled - item retained here: %s' % os.path.join(ddzstat['path'],ddzstat['filename']))
else:
logger.info('[Status: %s] Failed to download: %s ' % (ddzstat['success'], ddzstat))
nval = {'status': 'Failed',
'updated_date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M')}
myDB.upsert('ddl_info', nval, ctrlval)
else:
time.sleep(5)
def postprocess_main(queue):
while True:
if mylar.APILOCK is True:
time.sleep(5)
elif mylar.APILOCK is False and queue.qsize() >= 1: #len(queue) > 1:
pp = None
item = queue.get(True)
logger.info('Now loading from post-processing queue: %s' % item)
if item == 'exit':
logger.info('Cleaning up workers for shutdown')
break
if mylar.APILOCK is False:
try:
pprocess = process.Process(item['nzb_name'], item['nzb_folder'], item['failed'], item['issueid'], item['comicid'], item['apicall'], item['ddl'])
except:
pprocess = process.Process(item['nzb_name'], item['nzb_folder'], item['failed'], item['issueid'], item['comicid'], item['apicall'])
pp = pprocess.post_process()
time.sleep(5) #arbitrary sleep to let the process attempt to finish pp'ing
if pp is not None:
if pp['mode'] == 'stop':
#reset the lock so any subsequent items can pp and not keep the queue locked up.
mylar.APILOCK = False
if mylar.APILOCK is True:
logger.info('Another item is post-processing still...')
time.sleep(15)
#mylar.PP_QUEUE.put(item)
else:
time.sleep(5)
def search_queue(queue):
while True:
if mylar.SEARCHLOCK is True:
time.sleep(5)
elif mylar.SEARCHLOCK is False and queue.qsize() >= 1: #len(queue) > 1:
item = queue.get(True)
if item == 'exit':
logger.info('[SEARCH-QUEUE] Cleaning up workers for shutdown')
break
logger.info('[SEARCH-QUEUE] Now loading item from search queue: %s' % item)
if mylar.SEARCHLOCK is False:
ss_queue = mylar.search.searchforissue(item['issueid'])
time.sleep(5) #arbitrary sleep to let the process attempt to finish pp'ing
if mylar.SEARCHLOCK is True:
logger.fdebug('[SEARCH-QUEUE] Another item is currently being searched....')
time.sleep(15)
else:
time.sleep(5)
def worker_main(queue):
while True:
if queue.qsize() >= 1:
item = queue.get(True)
logger.info('Now loading from queue: ' + item)
if item == 'exit':
logger.info('Cleaning up workers for shutdown')
break
snstat = torrentinfo(torrent_hash=item, download=True)
if snstat['snatch_status'] == 'IN PROGRESS':
logger.info('Still downloading in client....let us try again momentarily.')
time.sleep(30)
mylar.SNATCHED_QUEUE.put(item)
elif any([snstat['snatch_status'] == 'MONITOR FAIL', snstat['snatch_status'] == 'MONITOR COMPLETE']):
logger.info('File copied for post-processing - submitting as a direct pp.')
threading.Thread(target=self.checkFolder, args=[os.path.abspath(os.path.join(snstat['copied_filepath'], os.pardir))]).start()
else:
time.sleep(15)
def nzb_monitor(queue):
while True:
if queue.qsize() >= 1:
item = queue.get(True)
if item == 'exit':
logger.info('Cleaning up workers for shutdown')
break
logger.info('Now loading from queue: %s' % item)
if all([mylar.USE_SABNZBD is True, mylar.CONFIG.SAB_CLIENT_POST_PROCESSING is True]):
nz = sabnzbd.SABnzbd(item)
nzstat = nz.processor()
elif all([mylar.USE_NZBGET is True, mylar.CONFIG.NZBGET_CLIENT_POST_PROCESSING is True]):
nz = nzbget.NZBGet()
nzstat = nz.processor(item)
else:
logger.warn('There are no NZB Completed Download handlers enabled. Not sending item to completed download handling...')
break
if any([nzstat['status'] == 'file not found', nzstat['status'] == 'double-pp']):
logger.warn('Unable to complete post-processing call due to not finding file in the location provided. [%s]' % item)
elif nzstat['status'] is False:
logger.info('Could not find NZBID %s in the downloader\'s queue. I will requeue this item for post-processing...' % item['NZBID'])
time.sleep(5)
mylar.NZB_QUEUE.put(item)
elif nzstat['status'] is True:
if nzstat['failed'] is False:
logger.info('File successfully downloaded - now initiating completed downloading handling.')
else:
logger.info('File failed either due to being corrupt or incomplete - now initiating completed failed downloading handling.')
try:
mylar.PP_QUEUE.put({'nzb_name': nzstat['name'],
'nzb_folder': nzstat['location'],
'failed': nzstat['failed'],
'issueid': nzstat['issueid'],
'comicid': nzstat['comicid'],
'apicall': nzstat['apicall'],
'ddl': False})
#cc = process.Process(nzstat['name'], nzstat['location'], failed=nzstat['failed'])
#nzpp = cc.post_process()
except Exception as e:
logger.info('process error: %s' % e)
else:
time.sleep(5)
def script_env(mode, vars):
#mode = on-snatch, pre-postprocess, post-postprocess
#var = dictionary containing variables to pass
mylar_env = os.environ.copy()
if mode == 'on-snatch':
runscript = mylar.CONFIG.SNATCH_SCRIPT
if 'torrentinfo' in vars:
if 'hash' in vars['torrentinfo']:
mylar_env['mylar_release_hash'] = vars['torrentinfo']['hash']
if 'torrent_filename' in vars['torrentinfo']:
mylar_env['mylar_torrent_filename'] = vars['torrentinfo']['torrent_filename']
if 'name' in vars['torrentinfo']:
mylar_env['mylar_release_name'] = vars['torrentinfo']['name']
if 'folder' in vars['torrentinfo']:
mylar_env['mylar_release_folder'] = vars['torrentinfo']['folder']
if 'label' in vars['torrentinfo']:
mylar_env['mylar_release_label'] = vars['torrentinfo']['label']
if 'total_filesize' in vars['torrentinfo']:
mylar_env['mylar_release_filesize'] = str(vars['torrentinfo']['total_filesize'])
if 'time_started' in vars['torrentinfo']:
mylar_env['mylar_release_start'] = str(vars['torrentinfo']['time_started'])
if 'filepath' in vars['torrentinfo']:
mylar_env['mylar_torrent_file'] = str(vars['torrentinfo']['filepath'])
else:
try:
mylar_env['mylar_release_files'] = '|'.join(vars['torrentinfo']['files'])
except TypeError:
mylar_env['mylar_release_files'] = '|'.join(json.dumps(vars['torrentinfo']['files']))
elif 'nzbinfo' in vars:
mylar_env['mylar_release_id'] = vars['nzbinfo']['id']
if 'client_id' in vars['nzbinfo']:
mylar_env['mylar_client_id'] = vars['nzbinfo']['client_id']
mylar_env['mylar_release_nzbname'] = vars['nzbinfo']['nzbname']
mylar_env['mylar_release_link'] = vars['nzbinfo']['link']
mylar_env['mylar_release_nzbpath'] = vars['nzbinfo']['nzbpath']
if 'blackhole' in vars['nzbinfo']:
mylar_env['mylar_release_blackhole'] = vars['nzbinfo']['blackhole']
mylar_env['mylar_release_provider'] = vars['provider']
if 'comicinfo' in vars:
try:
if vars['comicinfo']['comicid'] is not None:
mylar_env['mylar_comicid'] = vars['comicinfo']['comicid'] #comicid/issueid are unknown for one-offs (should be fixable tho)
else:
mylar_env['mylar_comicid'] = 'None'
except:
pass
try:
if vars['comicinfo']['issueid'] is not None:
mylar_env['mylar_issueid'] = vars['comicinfo']['issueid']
else:
mylar_env['mylar_issueid'] = 'None'
except:
pass
try:
if vars['comicinfo']['issuearcid'] is not None:
mylar_env['mylar_issuearcid'] = vars['comicinfo']['issuearcid']
else:
mylar_env['mylar_issuearcid'] = 'None'
except:
pass
mylar_env['mylar_comicname'] = vars['comicinfo']['comicname']
mylar_env['mylar_issuenumber'] = str(vars['comicinfo']['issuenumber'])
try:
mylar_env['mylar_comicvolume'] = str(vars['comicinfo']['volume'])
except:
pass
try:
mylar_env['mylar_seriesyear'] = str(vars['comicinfo']['seriesyear'])
except:
pass
try:
mylar_env['mylar_issuedate'] = str(vars['comicinfo']['issuedate'])
except:
pass
mylar_env['mylar_release_pack'] = str(vars['pack'])
if vars['pack'] is True:
if vars['pack_numbers'] is not None:
mylar_env['mylar_release_pack_numbers'] = vars['pack_numbers']
if vars['pack_issuelist'] is not None:
mylar_env['mylar_release_pack_issuelist'] = vars['pack_issuelist']
mylar_env['mylar_method'] = vars['method']
mylar_env['mylar_client'] = vars['clientmode']
elif mode == 'post-process':
#to-do
runscript = mylar.CONFIG.EXTRA_SCRIPTS
elif mode == 'pre-process':
#to-do
runscript = mylar.CONFIG.PRE_SCRIPTS
logger.fdebug('Initiating ' + mode + ' script detection.')
with open(runscript, 'r') as f:
first_line = f.readline()
if runscript.endswith('.sh'):
shell_cmd = re.sub('#!', '', first_line)
if shell_cmd == '' or shell_cmd is None:
shell_cmd = '/bin/bash'
else:
shell_cmd = sys.executable
curScriptName = shell_cmd + ' ' + runscript.decode("string_escape")
logger.fdebug("snatch script detected...enabling: " + str(curScriptName))
script_cmd = shlex.split(curScriptName)
logger.fdebug(u"Executing command " +str(script_cmd))
try:
subprocess.call(script_cmd, env=dict(mylar_env))
except OSError, e:
logger.warn(u"Unable to run extra_script: " + str(script_cmd))
return False
else:
return True
def get_the_hash(filepath):
import bencode
# Open torrent file
torrent_file = open(filepath, "rb")
metainfo = bencode.decode(torrent_file.read())
info = metainfo['info']
thehash = hashlib.sha1(bencode.encode(info)).hexdigest().upper()
logger.info('Hash of file : ' + thehash)
return {'hash': thehash}
def disable_provider(site, newznab=False):
logger.info('Temporarily disabling %s due to not responding' % site)
if newznab is True:
tmplist = []
for ti in mylar.CONFIG.EXTRA_NEWZNABS:
tmpnewz = list(ti)
if tmpnewz[0] == site:
tmpnewz[5] = '0'
tmplist.append(tuple(tmpnewz))
mylar.CONFIG.EXTRA_NEWZNABS = tmplist
else:
if site == 'nzbsu':
mylar.CONFIG.NZBSU = False
elif site == 'dognzb':
mylar.CONFIG.DOGNZB = False
elif site == 'experimental':
mylar.CONFIG.EXPERIMENTAL = False
elif site == '32P':
mylar.CONFIG.ENABLE_32P = False
def date_conversion(originaldate):
c_obj_date = datetime.datetime.strptime(originaldate, "%Y-%m-%d %H:%M:%S")
n_date = datetime.datetime.now()
absdiff = abs(n_date - c_obj_date)
hours = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 3600.0
return hours
def job_management(write=False, job=None, last_run_completed=None, current_run=None, status=None):
jobresults = []
#import db
myDB = db.DBConnection()
if job is None:
dbupdate_newstatus = 'Waiting'
dbupdate_nextrun = None
if mylar.CONFIG.ENABLE_RSS is True:
rss_newstatus = 'Waiting'
else:
rss_newstatus = 'Paused'
rss_nextrun = None
weekly_newstatus = 'Waiting'
weekly_nextrun = None
search_newstatus = 'Waiting'
search_nextrun = None
version_newstatus = 'Waiting'
version_nextrun = None
if mylar.CONFIG.ENABLE_CHECK_FOLDER is True:
monitor_newstatus = 'Waiting'
else:
monitor_newstatus = 'Paused'
monitor_nextrun = None
job_info = myDB.select('SELECT DISTINCT * FROM jobhistory')
#set default values if nothing has been ran yet
for ji in job_info:
if 'update' in ji['JobName'].lower():
if mylar.SCHED_DBUPDATE_LAST is None:
mylar.SCHED_DBUPDATE_LAST = ji['prev_run_timestamp']
dbupdate_newstatus = ji['status']
mylar.UPDATER_STATUS = dbupdate_newstatus
dbupdate_nextrun = ji['next_run_timestamp']
elif 'search' in ji['JobName'].lower():
if mylar.SCHED_SEARCH_LAST is None:
mylar.SCHED_SEARCH_LAST = ji['prev_run_timestamp']
search_newstatus = ji['status']
mylar.SEARCH_STATUS = search_newstatus
search_nextrun = ji['next_run_timestamp']
elif 'rss' in ji['JobName'].lower():
if mylar.SCHED_RSS_LAST is None:
mylar.SCHED_RSS_LAST = ji['prev_run_timestamp']
rss_newstatus = ji['status']
mylar.RSS_STATUS = rss_newstatus
rss_nextrun = ji['next_run_timestamp']
elif 'weekly' in ji['JobName'].lower():
if mylar.SCHED_WEEKLY_LAST is None:
mylar.SCHED_WEEKLY_LAST = ji['prev_run_timestamp']
weekly_newstatus = ji['status']
mylar.WEEKLY_STATUS = weekly_newstatus
weekly_nextrun = ji['next_run_timestamp']
elif 'version' in ji['JobName'].lower():
if mylar.SCHED_VERSION_LAST is None:
mylar.SCHED_VERSION_LAST = ji['prev_run_timestamp']
version_newstatus = ji['status']
mylar.VERSION_STATUS = version_newstatus
version_nextrun = ji['next_run_timestamp']
elif 'monitor' in ji['JobName'].lower():
if mylar.SCHED_MONITOR_LAST is None:
mylar.SCHED_MONITOR_LAST = ji['prev_run_timestamp']
monitor_newstatus = ji['status']
mylar.MONITOR_STATUS = monitor_newstatus
monitor_nextrun = ji['next_run_timestamp']
monitors = {'weekly': mylar.SCHED_WEEKLY_LAST,
'monitor': mylar.SCHED_MONITOR_LAST,
'search': mylar.SCHED_SEARCH_LAST,
'dbupdater': mylar.SCHED_DBUPDATE_LAST,
'version': mylar.SCHED_VERSION_LAST,
'rss': mylar.SCHED_RSS_LAST}
#this is for initial startup
for jb in mylar.SCHED.get_jobs():
#logger.fdebug('jb: %s' % jb)
jobinfo = str(jb)
if 'Status Updater' in jobinfo.lower():
continue
elif 'update' in jobinfo.lower():
prev_run_timestamp = mylar.SCHED_DBUPDATE_LAST
newstatus = dbupdate_newstatus
mylar.UPDATER_STATUS = newstatus
elif 'search' in jobinfo.lower():
prev_run_timestamp = mylar.SCHED_SEARCH_LAST
newstatus = search_newstatus
mylar.SEARCH_STATUS = newstatus
elif 'rss' in jobinfo.lower():
prev_run_timestamp = mylar.SCHED_RSS_LAST
newstatus = rss_newstatus
mylar.RSS_STATUS = newstatus
elif 'weekly' in jobinfo.lower():
prev_run_timestamp = mylar.SCHED_WEEKLY_LAST
newstatus = weekly_newstatus
mylar.WEEKLY_STATUS = newstatus
elif 'version' in jobinfo.lower():
prev_run_timestamp = mylar.SCHED_VERSION_LAST
newstatus = version_newstatus
mylar.VERSION_STATUS = newstatus
elif 'monitor' in jobinfo.lower():
prev_run_timestamp = mylar.SCHED_MONITOR_LAST
newstatus = monitor_newstatus
mylar.MONITOR_STATUS = newstatus
jobname = jobinfo[:jobinfo.find('(')-1].strip()
#logger.fdebug('jobinfo: %s' % jobinfo)
try:
jobtimetmp = jobinfo.split('at: ')[1].split('.')[0].strip()
except:
continue
#logger.fdebug('jobtimetmp: %s' % jobtimetmp)
jobtime = float(calendar.timegm(datetime.datetime.strptime(jobtimetmp[:-1], '%Y-%m-%d %H:%M:%S %Z').timetuple()))
#logger.fdebug('jobtime: %s' % jobtime)
if prev_run_timestamp is not None:
prev_run_time_utc = datetime.datetime.utcfromtimestamp(float(prev_run_timestamp))
prev_run_time_utc = prev_run_time_utc.replace(microsecond=0)
else:
prev_run_time_utc = None
#logger.fdebug('prev_run_time: %s' % prev_run_timestamp)
#logger.fdebug('prev_run_time type: %s' % type(prev_run_timestamp))
jobresults.append({'jobname': jobname,
'next_run_datetime': datetime.datetime.utcfromtimestamp(jobtime),
'prev_run_datetime': prev_run_time_utc,
'next_run_timestamp': jobtime,
'prev_run_timestamp': prev_run_timestamp,
'status': newstatus})
if not write:
if len(jobresults) == 0:
return monitors
else:
return jobresults
else:
if job is None:
for x in jobresults:
updateCtrl = {'JobName': x['jobname']}
updateVals = {'next_run_timestamp': x['next_run_timestamp'],
'prev_run_timestamp': x['prev_run_timestamp'],
'next_run_datetime': x['next_run_datetime'],
'prev_run_datetime': x['prev_run_datetime'],
'status': x['status']}
myDB.upsert('jobhistory', updateVals, updateCtrl)
else:
#logger.fdebug('Updating info - job: %s' % job)
#logger.fdebug('Updating info - last run: %s' % last_run_completed)
#logger.fdebug('Updating info - status: %s' % status)
updateCtrl = {'JobName': job}
if current_run is not None:
pr_datetime = datetime.datetime.utcfromtimestamp(current_run)
pr_datetime = pr_datetime.replace(microsecond=0)
updateVals = {'prev_run_timestamp': current_run,
'prev_run_datetime': pr_datetime,
'status': status}
#logger.info('updateVals: %s' % updateVals)
elif last_run_completed is not None:
if any([job == 'DB Updater', job == 'Auto-Search', job == 'RSS Feeds', job == 'Weekly Pullist', job == 'Check Version', job == 'Folder Monitor']):
jobstore = None
for jbst in mylar.SCHED.get_jobs():
jb = str(jbst)
if 'Status Updater' in jb.lower():
continue
elif job == 'DB Updater' and 'update' in jb.lower():
nextrun_stamp = utctimestamp() + (int(mylar.DBUPDATE_INTERVAL) * 60)
jobstore = jbst
break
elif job == 'Auto-Search' and 'search' in jb.lower():
nextrun_stamp = utctimestamp() + (mylar.CONFIG.SEARCH_INTERVAL * 60)
jobstore = jbst
break
elif job == 'RSS Feeds' and 'rss' in jb.lower():
nextrun_stamp = utctimestamp() + (int(mylar.CONFIG.RSS_CHECKINTERVAL) * 60)
mylar.SCHED_RSS_LAST = last_run_completed
jobstore = jbst
break
elif job == 'Weekly Pullist' and 'weekly' in jb.lower():
if mylar.CONFIG.ALT_PULL == 2:
wkt = 4
else:
wkt = 24
nextrun_stamp = utctimestamp() + (wkt * 60 * 60)
mylar.SCHED_WEEKLY_LAST = last_run_completed
jobstore = jbst
break
elif job == 'Check Version' and 'version' in jb.lower():
nextrun_stamp = utctimestamp() + (mylar.CONFIG.CHECK_GITHUB_INTERVAL * 60)
jobstore = jbst
break
elif job == 'Folder Monitor' and 'monitor' in jb.lower():
nextrun_stamp = utctimestamp() + (int(mylar.CONFIG.DOWNLOAD_SCAN_INTERVAL) * 60)
jobstore = jbst
break
if jobstore is not None:
nextrun_date = datetime.datetime.utcfromtimestamp(nextrun_stamp)
jobstore.modify(next_run_time=nextrun_date)
nextrun_date = nextrun_date.replace(microsecond=0)
else:
# if the rss is enabled after startup, we have to re-set it up...
nextrun_stamp = utctimestamp() + (int(mylar.CONFIG.RSS_CHECKINTERVAL) * 60)
nextrun_date = datetime.datetime.utcfromtimestamp(nextrun_stamp)
mylar.SCHED_RSS_LAST = last_run_completed
logger.fdebug('ReScheduled job: %s to %s' % (job, nextrun_date))
lastrun_comp = datetime.datetime.utcfromtimestamp(last_run_completed)
lastrun_comp = lastrun_comp.replace(microsecond=0)
#if it's completed, then update the last run time to the ending time of the job
updateVals = {'prev_run_timestamp': last_run_completed,
'prev_run_datetime': lastrun_comp,
'last_run_completed': 'True',
'next_run_timestamp': nextrun_stamp,
'next_run_datetime': nextrun_date,
'status': status}
#logger.fdebug('Job update for %s: %s' % (updateCtrl, updateVals))
myDB.upsert('jobhistory', updateVals, updateCtrl)
def stupidchk():
#import db
myDB = db.DBConnection()
CCOMICS = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Active'")
ens = myDB.select("SELECT COUNT(*) FROM comics WHERE Status='Loading' OR Status='Paused'")
mylar.COUNT_COMICS = CCOMICS[0][0]
mylar.EN_OOMICS = ens[0][0]
def newznab_test(name, host, ssl, apikey):
from xml.dom.minidom import Element, parseString
params = {'t': 'search',
'apikey': apikey,
'o': 'xml'}
if host[:-1] == '/':
host = host + 'api'
else:
host = host + '/api'
headers = {'User-Agent': str(mylar.USER_AGENT)}
logger.info('host: %s' % host)
try:
r = requests.get(host, params=params, headers=headers, verify=bool(ssl))
except Exception as e:
logger.warn('Unable to connect: %s' % e)
return
else:
try:
data = parseString(r.content)
except Exception as e:
logger.warn('[WARNING] Error attempting to test: %s' % e)
try:
error_code = data.getElementsByTagName('error')[0].attributes['code'].value
except Exception as e:
logger.info('Connected - Status code returned: %s' % r.status_code)
if r.status_code == 200:
return True
else:
logger.warn('Received response - Status code returned: %s' % r.status_code)
return False
code = error_code
description = data.getElementsByTagName('error')[0].attributes['description'].value
logger.info('[ERROR:%s] - %s' % (code, description))
return False
def torznab_test(name, host, ssl, apikey):
from xml.dom.minidom import Element, parseString
params = {'t': 'search',
'apikey': apikey,
'o': 'xml'}
if host[-1:] == '/':
host = host[:-1]
headers = {'User-Agent': str(mylar.USER_AGENT)}
logger.info('host: %s' % host)
try:
r = requests.get(host, params=params, headers=headers, verify=bool(ssl))
except Exception as e:
logger.warn('Unable to connect: %s' % e)
return
else:
try:
data = parseString(r.content)
except Exception as e:
logger.warn('[WARNING] Error attempting to test: %s' % e)
try:
error_code = data.getElementsByTagName('error')[0].attributes['code'].value
except Exception as e:
logger.info('Connected - Status code returned: %s' % r.status_code)
if r.status_code == 200:
return True
else:
logger.warn('Received response - Status code returned: %s' % r.status_code)
return False
code = error_code
description = data.getElementsByTagName('error')[0].attributes['description'].value
logger.info('[ERROR:%s] - %s' % (code, description))
return False
def get_free_space(folder):
min_threshold = 100000000 #threshold for minimum amount of freespace available (#100mb)
if platform.system() == "Windows":
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
dst_freesize = free_bytes.value
else:
st = os.statvfs(folder)
dst_freesize = st.f_bavail * st.f_frsize
logger.fdebug('[FREESPACE-CHECK] %s has %s free' % (folder, sizeof_fmt(dst_freesize)))
if min_threshold > dst_freesize:
logger.warn('[FREESPACE-CHECK] There is only %s space left on %s' % (dst_freesize, folder))
return False
else:
return True
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def getImage(comicid, url, issueid=None):
if os.path.exists(mylar.CONFIG.CACHE_DIR):
pass
else:
#let's make the dir.
try:
os.makedirs(str(mylar.CONFIG.CACHE_DIR))
logger.info('Cache Directory successfully created at: %s' % mylar.CONFIG.CACHE_DIR)
except OSError:
logger.error('Could not create cache dir. Check permissions of cache dir: %s' % mylar.CONFIG.CACHE_DIR)
coverfile = os.path.join(mylar.CONFIG.CACHE_DIR, str(comicid) + '.jpg')
#if cover has '+' in url it's malformed, we need to replace '+' with '%20' to retreive properly.
#new CV API restriction - one api request / second.(probably unecessary here, but it doesn't hurt)
if mylar.CONFIG.CVAPI_RATE is None or mylar.CONFIG.CVAPI_RATE < 2:
time.sleep(2)
else:
time.sleep(mylar.CONFIG.CVAPI_RATE)
logger.info('Attempting to retrieve the comic image for series')
try:
r = requests.get(url, params=None, stream=True, verify=mylar.CONFIG.CV_VERIFY, headers=mylar.CV_HEADERS)
except Exception as e:
logger.warn('[ERROR: %s] Unable to download image from CV URL link: %s' % (e, url))
coversize = 0
statuscode = '400'
else:
statuscode = str(r.status_code)
logger.fdebug('comic image retrieval status code: %s' % statuscode)
if statuscode != '200':
logger.warn('Unable to download image from CV URL link: %s [Status Code returned: %s]' % (url, statuscode))
coversize = 0
else:
if r.headers.get('Content-Encoding') == 'gzip':
buf = StringIO(r.content)
f = gzip.GzipFile(fileobj=buf)
with open(coverfile, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
statinfo = os.stat(coverfile)
coversize = statinfo.st_size
if any([int(coversize) < 10000, statuscode != '200']):
try:
if statuscode != '200':
logger.info('Trying to grab an alternate cover due to problems trying to retrieve the main cover image.')
else:
logger.info('Image size invalid [%s bytes] - trying to get alternate cover image.' % coversize)
except Exception as e:
logger.info('Image size invalid [%s bytes] - trying to get alternate cover image.' % coversize)
logger.fdebug('invalid image link is here: %s' % url)
if os.path.exists(coverfile):
os.remove(coverfile)
return 'retry'
def publisherImages(publisher):
if publisher == 'DC Comics':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-dccomics.png',
'publisher_image_alt': 'DC',
'publisher_imageH': '50',
'publisher_imageW': '50'}
elif publisher == 'Marvel':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-marvel.jpg',
'publisher_image_alt': 'Marvel',
'publisher_imageH': '50',
'publisher_imageW': '100'}
elif publisher == 'Image':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-imagecomics.png',
'publisher_image_alt': 'Image',
'publisher_imageH': '100',
'publisher_imageW': '50'}
elif publisher == 'Dark Horse Comics' or publisher == 'Dark Horse':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-darkhorse.png',
'publisher_image_alt': 'DarkHorse',
'publisher_imageH': '100',
'publisher_imageW': '75'}
elif publisher == 'IDW Publishing':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-idwpublish.png',
'publisher_image_alt': 'IDW',
'publisher_imageH': '50',
'publisher_imageW': '100'}
elif publisher == 'Icon':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-iconcomics.png',
'publisher_image_alt': 'Icon',
'publisher_imageH': '50',
'publisher_imageW': '100'}
elif publisher == 'Red5':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-red5comics.png',
'publisher_image_alt': 'Red5',
'publisher_imageH': '50',
'publisher_imageW': '100'}
elif publisher == 'Vertigo':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-vertigo.png',
'publisher_image_alt': 'Vertigo',
'publisher_imageH': '50',
'publisher_imageW': '100'}
elif publisher == 'Shadowline':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-shadowline.png',
'publisher_image_alt': 'Shadowline',
'publisher_imageH': '50',
'publisher_imageW': '150'}
elif publisher == 'Archie Comics':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-archiecomics.jpg',
'publisher_image_alt': 'Archie',
'publisher_imageH': '75',
'publisher_imageW': '75'}
elif publisher == 'Oni Press':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-onipress.jpg',
'publisher_image_alt': 'Oni Press',
'publisher_imageH': '50',
'publisher_imageW': '100'}
elif publisher == 'Tokyopop':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-tokyopop.jpg',
'publisher_image_alt': 'Tokyopop',
'publisher_imageH': '100',
'publisher_imageW': '50'}
elif publisher == 'Midtown Comics':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-midtowncomics.jpg',
'publisher_image_alt': 'Midtown',
'publisher_imageH': '50',
'publisher_imageW': '100'}
elif publisher == 'Boom! Studios':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-boom.jpg',
'publisher_image_alt': 'Boom!',
'publisher_imageH': '50',
'publisher_imageW': '100'}
elif publisher == 'Skybound':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-skybound.jpg',
'publisher_image_alt': 'Skybound',
'publisher_imageH': '50',
'publisher_imageW': '100'}
elif publisher == 'Dynamite Entertainment':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-dynamite.png',
'publisher_image_alt': 'Dynamite',
'publisher_imageH': '50',
'publisher_imageW': '125'}
elif publisher == 'Top Cow':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-topcow.gif',
'publisher_image_alt': 'Top Cow',
'publisher_imageH': '75',
'publisher_imageW': '100'}
elif publisher == 'Cartoon Books':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-cartoonbooks.jpg',
'publisher_image_alt': 'Cartoon Books',
'publisher_imageH': '75',
'publisher_imageW': '90'}
elif publisher == 'Valiant':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-valiant.png',
'publisher_image_alt': 'Valiant',
'publisher_imageH': '100',
'publisher_imageW': '100'}
elif publisher == 'Action Lab':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-actionlabs.png',
'publisher_image_alt': 'Action Lab',
'publisher_imageH': '100',
'publisher_imageW': '100'}
elif publisher == 'Zenescope Entertainment':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-zenescope.png',
'publisher_image_alt': 'Zenescope',
'publisher_imageH': '125',
'publisher_imageW': '125'}
elif publisher == '2000 ad':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-2000ad.jpg',
'publisher_image_alt': '2000 AD',
'publisher_imageH': '75',
'publisher_imageW': '50'}
elif publisher == 'Aardvark':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-aardvark.png',
'publisher_image_alt': 'Aardvark',
'publisher_imageH': '100',
'publisher_imageW': '100'}
elif publisher == 'Abstract Studio':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-abstract.jpg',
'publisher_image_alt': 'Abstract Studio',
'publisher_imageH': '75',
'publisher_imageW': '50'}
elif publisher == 'Aftershock Comics':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-aftershock.jpg',
'publisher_image_alt': 'Aftershock',
'publisher_imageH': '100',
'publisher_imageW': '75'}
elif publisher == 'Avatar Press':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-avatarpress.jpg',
'publisher_image_alt': 'Avatar Press',
'publisher_imageH': '100',
'publisher_imageW': '75'}
elif publisher == 'Benitez Productions':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-benitez.png',
'publisher_image_alt': 'Benitez',
'publisher_imageH': '75',
'publisher_imageW': '125'}
elif publisher == 'Boundless Comics':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-boundless.png',
'publisher_image_alt': 'Boundless',
'publisher_imageH': '75',
'publisher_imageW': '75'}
elif publisher == 'Darby Pop':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-darbypop.png',
'publisher_image_alt': 'Darby Pop',
'publisher_imageH': '75',
'publisher_imageW': '125'}
elif publisher == 'Devil\'s Due':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-devilsdue.png',
'publisher_image_alt': 'Devil\'s Due',
'publisher_imageH': '75',
'publisher_imageW': '75'}
elif publisher == 'Joe Books':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-joebooks.png',
'publisher_image_alt': 'Joe Books',
'publisher_imageH': '100',
'publisher_imageW': '100'}
elif publisher == 'Titan Comics':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-titan.png',
'publisher_image_alt': 'Titan',
'publisher_imageH': '75',
'publisher_imageW': '75'}
elif publisher == 'Viz':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-viz.png',
'publisher_image_alt': 'Viz',
'publisher_imageH': '50',
'publisher_imageW': '50'}
elif publisher == 'Warp Graphics':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-warpgraphics.png',
'publisher_image_alt': 'Warp Graphics',
'publisher_imageH': '125',
'publisher_imageW': '75'}
elif publisher == 'Wildstorm':
comicpublisher = {'publisher_image': 'interfaces/default/images/publisherlogos/logo-wildstorm.png',
'publisher_image_alt': 'Wildstorm',
'publisher_imageH': '50',
'publisher_imageW': '100'}
else:
comicpublisher = {'publisher_image': None,
'publisher_image_alt': 'Nope',
'publisher_imageH': '0',
'publisher_imageW': '0'}
return comicpublisher
def lookupthebitches(filelist, folder, nzbname, nzbid, prov, hash, pulldate):
#import db
myDB = db.DBConnection()
watchlist = listLibrary()
matchlist = []
#get the weeknumber/year for the pulldate
dt = datetime.datetime.strptime(pulldate, '%Y-%m-%d')
weeknumber = dt.strftime("%U")
year = dt.strftime("%Y")
for f in filelist:
file = re.sub(folder, '', f).strip()
pp = mylar.filechecker.FileChecker(justparse=True, file=file)
parsedinfo = pp.listFiles()
if parsedinfo['parse_status'] == 'success':
dyncheck = re.sub('[\|\s]', '', parsedinfo['dynamic_name'].lower()).strip()
check = myDB.selectone('SELECT * FROM weekly WHERE DynamicName=? AND weeknumber=? AND year=? AND STATUS<>"Downloaded"', [dyncheck, weeknumber, year]).fetchone()
if check is not None:
logger.fdebug('[%s] found match: %s #%s' % (file, check['COMIC'], check['ISSUE']))
matchlist.append({'comicname': check['COMIC'],
'issue': check['ISSUE'],
'comicid': check['ComicID'],
'issueid': check['IssueID'],
'dynamicname': check['DynamicName']})
else:
logger.fdebug('[%s] unable to match to the pull: %s' % (file, parsedinfo))
if len(matchlist) > 0:
for x in matchlist:
if all([x['comicid'] not in watchlist, mylar.CONFIG.PACK_0DAY_WATCHLIST_ONLY is False]):
oneoff = True
mode = 'pullwant'
elif all([x['comicid'] not in watchlist, mylar.CONFIG.PACK_0DAY_WATCHLIST_ONLY is True]):
continue
else:
oneoff = False
mode = 'want'
mylar.updater.nzblog(x['issueid'], nzbname, x['comicname'], id=nzbid, prov=prov, oneoff=oneoff)
mylar.updater.foundsearch(x['comicid'], x['issueid'], mode=mode, provider=prov, hash=hash)
def DateAddedFix():
#import db
myDB = db.DBConnection()
DA_A = datetime.datetime.today()
DateAdded = DA_A.strftime('%Y-%m-%d')
issues = myDB.select("SELECT IssueID FROM issues WHERE Status='Wanted' and DateAdded is NULL")
for da in issues:
myDB.upsert("issues", {'DateAdded': DateAdded}, {'IssueID': da[0]})
annuals = myDB.select("SELECT IssueID FROM annuals WHERE Status='Wanted' and DateAdded is NULL")
for an in annuals:
myDB.upsert("annuals", {'DateAdded': DateAdded}, {'IssueID': an[0]})
def file_ops(path,dst,arc=False,one_off=False):
# # path = source path + filename
# # dst = destination path + filename
# # arc = to denote if the file_operation is being performed as part of a story arc or not where the series exists on the watchlist already
# # one-off = if the file_operation is being performed where it is either going into the grabbab_dst or story arc folder
# #get the crc of the file prior to the operation and then compare after to ensure it's complete.
# crc_check = mylar.filechecker.crc(path)
# #will be either copy / move
if any([one_off, arc]):
action_op = mylar.CONFIG.ARC_FILEOPS
else:
action_op = mylar.CONFIG.FILE_OPTS
if action_op == 'copy' or (arc is True and any([action_op == 'copy', action_op == 'move'])):
try:
shutil.copy( path , dst )
# if crc_check == mylar.filechecker.crc(dst):
except Exception as e:
logger.error('[%s] error : %s' % (action_op, e))
return False
return True
elif action_op == 'move':
try:
shutil.move( path , dst )
# if crc_check == mylar.filechecker.crc(dst):
except Exception as e:
logger.error('[MOVE] error : %s' % e)
return False
return True
elif any([action_op == 'hardlink', action_op == 'softlink']):
if 'windows' not in mylar.OS_DETECT.lower():
# if it's an arc, then in needs to go reverse since we want to keep the src files (in the series directory)
if action_op == 'hardlink':
import sys
# Open a file
try:
fd = os.open( path, os.O_RDWR|os.O_CREAT )
os.close( fd )
# Now create another copy of the above file.
os.link( path, dst )
logger.info('Created hard link successfully!!')
except OSError, e:
if e.errno == errno.EXDEV:
logger.warn('[' + str(e) + '] Hardlinking failure. Could not create hardlink - dropping down to copy mode so that this operation can complete. Intervention is required if you wish to continue using hardlinks.')
try:
shutil.copy( path, dst )
logger.fdebug('Successfully copied file to : ' + dst)
return True
except Exception as e:
logger.error('[COPY] error : %s' % e)
return False
else:
logger.warn('[' + str(e) + '] Hardlinking failure. Could not create hardlink - Intervention is required if you wish to continue using hardlinks.')
return False
hardlinks = os.lstat( dst ).st_nlink
if hardlinks > 1:
logger.info('Created hard link [' + str(hardlinks) + '] successfully!! (' + dst + ')')
else:
logger.warn('Hardlink cannot be verified. You should probably verify that it is created properly.')
return True
elif action_op == 'softlink':
try:
#first we need to copy the file to the new location, then create the symlink pointing from new -> original
if not arc:
shutil.move( path, dst )
if os.path.lexists( path ):
os.remove( path )
os.symlink( dst, path )
logger.fdebug('Successfully created softlink [' + dst + ' --> ' + path + ']')
else:
os.symlink ( path, dst )
logger.fdebug('Successfully created softlink [' + path + ' --> ' + dst + ']')
except OSError, e:
#if e.errno == errno.EEXIST:
# os.remove(dst)
# os.symlink( path, dst )
#else:
logger.warn('[' + str(e) + '] Unable to create symlink. Dropping down to copy mode so that this operation can continue.')
try:
shutil.copy( dst, path )
logger.fdebug('Successfully copied file [' + dst + ' --> ' + path + ']')
except Exception as e:
logger.error('[COPY] error : %s' % e)
return False
return True
else:
#Not ready just yet.
pass
#softlinks = shortcut (normally junctions are called softlinks, but for this it's being called a softlink)
#hardlinks = MUST reside on the same drive as the original
#junctions = not used (for directories across same machine only but different drives)
#option 1
#this one needs to get tested
#import ctypes
#kdll = ctypes.windll.LoadLibrary("kernel32.dll")
#kdll.CreateSymbolicLinkW(path, dst, 0)
#option 2
import lib.winlink as winlink
if mylar.CONFIG.FILE_OPTS == 'hardlink':
try:
os.system(r'mklink /H dst path')
logger.fdebug('Successfully hardlinked file [' + dst + ' --> ' + path + ']')
except OSError, e:
logger.warn('[' + e + '] Unable to create symlink. Dropping down to copy mode so that this operation can continue.')
try:
shutil.copy( dst, path )
logger.fdebug('Successfully copied file [' + dst + ' --> ' + path + ']')
except:
return False
elif mylar.CONFIG.FILE_OPTS == 'softlink': #ie. shortcut.
try:
shutil.move( path, dst )
if os.path.lexists( path ):
os.remove( path )
os.system(r'mklink dst path')
logger.fdebug('Successfully created symlink [' + dst + ' --> ' + path + ']')
except OSError, e:
raise e
logger.warn('[' + e + '] Unable to create softlink. Dropping down to copy mode so that this operation can continue.')
try:
shutil.copy( dst, path )
logger.fdebug('Successfully copied file [' + dst + ' --> ' + path + ']')
except:
return False
else:
return False
from threading import Thread
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
def join(self):
Thread.join(self)
return self._return
|
migrations | 0213_deprecated_old_tags | # Generated by Django 3.2.5 on 2022-02-17 18:11
import django.contrib.postgres.fields
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0212_alter_persondistinctid_team"),
]
operations = [
migrations.RenameField(
model_name="dashboard", old_name="tags", new_name="deprecated_tags"
),
migrations.RenameField(
model_name="insight", old_name="tags", new_name="deprecated_tags"
),
migrations.AlterField(
model_name="dashboard",
name="deprecated_tags",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=32),
blank=True,
default=list,
null=True,
size=None,
),
),
migrations.AlterField(
model_name="insight",
name="deprecated_tags",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=32),
blank=True,
default=list,
null=True,
size=None,
),
),
migrations.RemoveConstraint(
model_name="taggeditem",
name="exactly_one_related_object",
),
migrations.AddField(
model_name="taggeditem",
name="dashboard",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="tagged_items",
to="posthog.dashboard",
),
),
migrations.AddField(
model_name="taggeditem",
name="event_definition",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="tagged_items",
to="posthog.eventdefinition",
),
),
migrations.AddField(
model_name="taggeditem",
name="insight",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="tagged_items",
to="posthog.insight",
),
),
migrations.AddField(
model_name="taggeditem",
name="property_definition",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="tagged_items",
to="posthog.propertydefinition",
),
),
migrations.AlterUniqueTogether(
name="taggeditem",
unique_together={
(
"tag",
"dashboard",
"insight",
"event_definition",
"property_definition",
"action",
)
},
),
migrations.AddConstraint(
model_name="taggeditem",
constraint=models.CheckConstraint(
check=models.Q(
models.Q(
("dashboard__isnull", False),
("insight__isnull", True),
("event_definition__isnull", True),
("property_definition__isnull", True),
("action__isnull", True),
),
models.Q(
("dashboard__isnull", True),
("insight__isnull", False),
("event_definition__isnull", True),
("property_definition__isnull", True),
("action__isnull", True),
),
models.Q(
("dashboard__isnull", True),
("insight__isnull", True),
("event_definition__isnull", False),
("property_definition__isnull", True),
("action__isnull", True),
),
models.Q(
("dashboard__isnull", True),
("insight__isnull", True),
("event_definition__isnull", True),
("property_definition__isnull", False),
("action__isnull", True),
),
models.Q(
("dashboard__isnull", True),
("insight__isnull", True),
("event_definition__isnull", True),
("property_definition__isnull", True),
("action__isnull", False),
),
_connector="OR",
),
name="exactly_one_related_object",
),
),
]
|
widgets | peak_meter | from sglib.lib import util
from sglib.lib.translate import _
from sglib.math import clip_min, lin_to_db
from sglib.models import theme
from sgui.sgqt import *
PEAK_GRADIENT_CACHE = {}
def peak_meter_gradient(a_height):
if a_height not in PEAK_GRADIENT_CACHE:
f_gradient = QLinearGradient(0.0, 0.0, 0.0, a_height)
for stop in theme.SYSTEM_COLORS.widgets.peak_meter.stops:
f_gradient.setColorAt(stop.pos, QColor(stop.color))
PEAK_GRADIENT_CACHE[a_height] = f_gradient
return PEAK_GRADIENT_CACHE[a_height]
class peak_meter:
def __init__(
self,
a_width=14,
a_text=False,
invert=False,
brush=None,
):
self.text = a_text
self.widget = QWidget()
self.widget.setFixedWidth(a_width)
self.values = None
self.set_value([0.0, 0.0])
self.widget.paintEvent = self.paint_event
self.high = 0.0
self.set_tooltip()
self.widget.mousePressEvent = self.reset_high
self.white_pen = QPen(QtCore.Qt.GlobalColor.white, 1.0)
self.invert = invert
self.brush = brush
def set_value(self, a_vals):
f_vals = [float(x) for x in a_vals]
if f_vals != self.values:
self.values = f_vals
self.widget.update()
def reset_high(self, a_val=None):
self.high = 0.0
self.set_tooltip()
def set_tooltip(self):
if self.high == 0:
f_val = -100.0
else:
f_val = round(lin_to_db(self.high), 1)
self.widget.setToolTip(_("Peak {}dB\nClick with mouse to reset").format(f_val))
def paint_event(self, a_ev):
p = QPainter(self.widget)
p.fillRect(self.widget.rect(), QtCore.Qt.GlobalColor.black)
p.setPen(QtCore.Qt.PenStyle.NoPen)
f_height = self.widget.height()
brush = self.brush if self.brush else peak_meter_gradient(f_height)
p.setBrush(brush)
f_rect_width = self.widget.width() * 0.5
for f_val, f_i in zip(self.values, range(2)):
if self.invert:
f_val = 1.0 - f_val
if f_val == 0.0:
continue
elif f_val >= 1.0:
f_rect_y = 0.0
f_rect_height = f_height
else:
f_db = lin_to_db(f_val)
f_db = clip_min(f_db, -29.0)
f_rect_y = f_height * f_db * -0.033333333 # / -30.0
f_rect_height = f_height - f_rect_y
if f_val > self.high:
self.high = f_val
self.set_tooltip()
f_rect_x = f_i * f_rect_width
f_rect = QtCore.QRectF(
float(f_rect_x),
float(f_rect_y),
float(f_rect_width),
float(f_rect_height),
)
p.drawRect(f_rect)
if self.text:
p.setPen(self.white_pen)
for f_y, f_db in zip(
range(0, int(f_height), int(f_height * 0.2)), # / 5.0
range(0, -30, -6),
):
p.drawText(3, f_y, str(-f_db))
|
widgets | popup_menu | import os
from random import choice
from gi.repository import Gdk, Gio, Gtk
from sunflower.plugin_base.monitor import MonitorSignals
class PopupMenu:
"""Popup menu with path related functions."""
def __init__(self, application, plugin):
self._application = application
self._provider = None
self._selected_path = None
self._popover_visible = False
self._popover = Gtk.Popover.new()
self._popover.get_style_context().add_class("menu")
self._popover.set_border_width(5)
self._popover.set_size_request(250, -1)
self._popover.set_modal(True)
self._popover.connect("closed", self.__handle_popover_close)
left_object = application.get_left_object()
self._popover.set_position(
Gtk.PositionType.RIGHT if plugin is left_object else Gtk.PositionType.LEFT
)
# create stack to allow submenus
self._stack = Gtk.Stack.new()
self._stack.set_transition_type(Gtk.StackTransitionType.SLIDE_LEFT_RIGHT)
self._stack.set_vhomogeneous(False)
self._stack.set_interpolate_size(True)
self._popover.add(self._stack)
# main menu box
box = Gtk.VBox.new(False, 0)
self._stack.add_named(box, "main")
# operation items
hbox = Gtk.HBox.new(True, 0)
hbox.get_style_context().add_class("linked")
hbox.get_style_context().add_class("flat")
hbox.set_margin_bottom(5)
box.pack_start(hbox, True, False, 0)
cut_button = Gtk.Button.new_from_icon_name(
"edit-cut-symbolic", Gtk.IconSize.MENU
)
cut_button.connect("clicked", plugin._cut_files_to_clipboard)
hbox.pack_start(cut_button, True, True, 0)
copy_button = Gtk.Button.new_from_icon_name(
"edit-copy-symbolic", Gtk.IconSize.MENU
)
copy_button.connect("clicked", plugin._copy_files_to_clipboard)
hbox.pack_start(copy_button, True, True, 0)
paste_button = Gtk.Button.new_from_icon_name(
"edit-paste-symbolic", Gtk.IconSize.MENU
)
paste_button.connect("clicked", plugin._paste_files_from_clipboard)
hbox.pack_start(paste_button, True, True, 0)
remove_button = Gtk.Button.new_from_icon_name(
"edit-delete-symbolic", Gtk.IconSize.MENU
)
remove_button.connect("clicked", plugin._delete_files)
hbox.pack_start(remove_button, True, True, 0)
rename_button = Gtk.Button.new_from_icon_name(
"document-edit-symbolic", Gtk.IconSize.MENU
)
rename_button.connect("clicked", plugin._rename_file)
hbox.pack_start(rename_button, True, True, 0)
# options for opening path
self._create_menu_item(_("Open"), box, handler=plugin._execute_selected_item)
self._create_menu_item(
_("Open in new tab"), box, handler=plugin._open_in_new_tab
)
button, open_with = self._create_menu_item(_("Open with"), box, "open-with")
box.pack_start(Gtk.Separator.new(Gtk.Orientation.HORIZONTAL), False, False, 2)
self._create_menu_item(_("Copy to other"), box, handler=plugin._copy_files)
self._create_menu_item(_("Move to other"), box, handler=plugin._move_files)
box.pack_start(Gtk.Separator.new(Gtk.Orientation.HORIZONTAL), False, False, 2)
# path operations
button, path_operations = self._create_menu_item(
_("Path operations"), box, "path-operations"
)
if self._application.NAUTILUS_SEND_TO_INSTALLED:
self._create_menu_item(
_("Send to..."), path_operations, handler=plugin._send_to
)
self._create_menu_item(
_("Make link"), path_operations, handler=plugin._create_link
)
path_operations.pack_start(
Gtk.Separator.new(Gtk.Orientation.HORIZONTAL), False, False, 2
)
self._create_menu_item(
_("Copy file name"),
path_operations,
handler=plugin.copy_selected_item_name_to_clipboard,
)
self._create_menu_item(
_("Copy path"),
path_operations,
handler=plugin.copy_selected_path_to_clipboard,
)
# additional options
self._emblem_map = {}
self._emblems = Gtk.Grid.new()
self._emblems.set_row_spacing(2)
self._emblems.set_column_spacing(2)
self._emblems.set_row_homogeneous(True)
self._emblems.set_column_homogeneous(True)
button, menu = self._create_menu_item(_("Emblems"), box, "emblems")
menu.pack_start(self._emblems, True, True, 0)
box.pack_start(Gtk.Separator.new(Gtk.Orientation.HORIZONTAL), False, False, 2)
self._create_menu_item(_("Properties"), box, handler=plugin._item_properties)
self.__populate_emblem_menu()
# show all widgets
self._stack.show_all()
def __populate_emblem_menu(self):
"""Populate emblem menu with options."""
emblem_list = self._application.emblem_manager.get_available_emblems()
for index, emblem in enumerate(emblem_list):
image = Gtk.Image.new()
image.set_from_icon_name(emblem, Gtk.IconSize.LARGE_TOOLBAR)
button = Gtk.ToggleButton.new()
button.add(image)
button.get_style_context().add_class("flat")
button.connect("toggled", self.__handle_emblem_toggle, emblem)
self._emblem_map[emblem] = button
top = index // 5
left = index - (top * 5)
self._emblems.attach(button, left - 1, top - 1, 1, 1)
def __update_emblem_selection(self, full_path):
"""Update which emblems are selected for provided path."""
manager = self._application.emblem_manager
path, item_name = os.path.split(full_path)
active_emblems = manager.get_emblems(path, item_name)
if not active_emblems:
return
for emblem, button in self._emblem_map.items():
button.handler_block_by_func(self.__handle_emblem_toggle)
button.set_active(emblem in active_emblems)
button.handler_unblock_by_func(self.__handle_emblem_toggle)
def __populate_open_with_menu(self, path, mime_type):
"""Populate submenu for application selection."""
container = self._stack.get_child_by_name("open-with")
associations_manager = self._application.associations_manager
# remove old items skipping first which returns to main menu
old_items = container.get_children()[1:]
list(map(lambda item: container.remove(item), old_items))
# populate list with globally assigned applications
for application in associations_manager.get_application_list_for_type(
mime_type
):
menu_item = Gtk.ModelButton.new()
menu_item.set_property("text", application.name)
# assign icon if available
if application.icon:
icon = Gio.Icon.new_for_string(application.icon)
menu_item.set_property("icon", icon)
menu_item.get_child().get_children()[0].set_visible(
True
) # show it the hard way
# connect click handler
handler_data = {
"selection": [
path,
],
"application": application,
}
menu_item.connect("clicked", self.__handle_open_with_click, handler_data)
container.pack_start(menu_item, False, True, 0)
# add custom associations to the menu
custom_associations = self._application.association_options
custom_commands = custom_associations.get(mime_type)
if custom_commands:
# add menu separator so user can differentiate
separator = Gtk.Separator.new(Gtk.Orientation.HORIZONTAL)
container.pack_start(separator, False, True, 0)
# add custom commands to menu
for custom_command in custom_commands:
# create menu item
menu_item = Gtk.ModelButton.new()
menu_item.set_property("text", custom_command["name"])
# prepare data for item
handler_data = {
"selection": [
path,
],
"command": custom_command["command"],
}
menu_item.connect(
"clicked", self.__handle_open_with_click, handler_data
)
container.pack_start(menu_item, False, True, 0)
container.show_all()
def __handle_open_with_click(self, widget, data):
"""Handle clicking on application in open with menu."""
associations_manager = self._application.associations_manager
if "application" in data:
associations_manager.open_file(
data["selection"], application_info=data["application"]
)
elif "command" in data:
associations_manager.open_file(
data["selection"], exec_command=data["command"]
)
return True
def __handle_popover_open(self):
"""Handle popover opening."""
self._popover_visible = True
# # disable plugin accelerators
# groups = self._application.accelerator_manager.get_groups()
# for group_name in groups:
# group = self._application.accelerator_manager._get_group_by_name(group_name)
# group.deactivate()
def __handle_popover_close(self, widget, data=None):
"""Handle popover closing."""
self._popover_visible = False
# # enable plugin accelerators
# groups = self._application.accelerator_manager.get_groups()
# for group_name in groups:
# group = self._application.accelerator_manager._get_group_by_name(group_name)
# group.activate(self._application)
# remove references to help clear memory
self._provider = None
self._selected_path = None
def __handle_emblem_toggle(self, widget, emblem=None):
"""Handle toggling emblem for current path."""
manager = self._application.emblem_manager
path, item_name = os.path.split(self._selected_path)
update_method = (
self._application.emblem_manager.remove_emblem,
self._application.emblem_manager.add_emblem,
)[widget.get_active()]
update_method(path, item_name, emblem)
# notify monitor of our change
parent = self._provider.get_parent()
parent_path = self._provider.get_path()
if parent_path == self._provider.get_root_path(parent_path):
item_path = self._selected_path[len(parent_path) :]
else:
item_path = self._selected_path[len(parent_path) + 1 :]
queue = parent.get_monitor().get_queue()
queue.put((MonitorSignals.EMBLEM_CHANGED, item_path, None))
def _create_menu_item(self, label, container, submenu_name=None, handler=None):
"""Create menu item and pack in provided container."""
menu_item = Gtk.ModelButton.new()
menu_item.set_property("text", label)
container.pack_start(menu_item, False, False, 0)
submenu = None
if submenu_name:
menu_item.set_property("menu-name", submenu_name)
if self._stack.get_child_by_name(submenu_name) is None:
submenu = self._create_submenu(submenu_name, menu_item)
if handler:
menu_item.connect("clicked", handler)
return menu_item, submenu
def _create_submenu(self, name, button=None, label=None, container=None):
"""Create submenu for provided button and return its container."""
back_button = Gtk.ModelButton.new()
back_button.set_property("inverted", True)
back_button.set_property("menu-name", "main")
# set menu item label
if button:
back_button.set_label(button.get_property("text"))
elif label:
back_button.set_label(label)
# add container to the stack
if not container:
container = Gtk.VBox.new(False, 0)
container.pack_start(back_button, False, False, 0)
self._stack.add_named(container, name)
return container
def prepare(self, path, provider):
"""Allow popup to prepare for provided path."""
self._provider = provider
self._selected_path = path
associations_manager = self._application.associations_manager
mime_type = associations_manager.get_mime_type(path)
# try to detect by content
if associations_manager.is_mime_type_unknown(mime_type):
try:
data = associations_manager.get_sample_data(path, provider)
mime_type = associations_manager.get_mime_type(data=data)
except IsADirectoryError:
mime_type = "inode/directory"
self.__update_emblem_selection(path)
self.__populate_open_with_menu(path, mime_type)
def show(self, widget, position, page="main"):
"""Show menu relative to provided rectangle."""
self.__handle_popover_open()
self._popover.set_relative_to(widget)
self._popover.set_pointing_to(position)
self._stack.set_visible_child_name(page)
self._popover.popup()
visible = property(lambda self: self._popover_visible)
|
keyinput | keyutils | # SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Our own QKeySequence-like class and related utilities.
Note that Qt's type safety (or rather, lack thereof) is somewhat scary when it
comes to keys/modifiers. Many places (such as QKeyEvent::key()) don't actually
return a Qt::Key, they return an int.
To make things worse, when talking about a "key", sometimes Qt means a Qt::Key
member. However, sometimes it means a Qt::Key member ORed with a
Qt.KeyboardModifier...
Because of that, _assert_plain_key() and _assert_plain_modifier() make sure we
handle what we actually think we do.
"""
import dataclasses
import itertools
from typing import Iterable, Iterator, List, Mapping, Optional, Union, cast, overload
from qutebrowser.qt import machinery
from qutebrowser.qt.core import QEvent, Qt
from qutebrowser.qt.gui import QKeyEvent, QKeySequence
if machinery.IS_QT6:
from qutebrowser.qt.core import QKeyCombination
else:
QKeyCombination = None # QKeyCombination was added in Qt 6
from qutebrowser.utils import debug, qtutils, utils
class InvalidKeyError(Exception):
"""Raised when a key can't be represented by PyQt.
WORKAROUND for https://www.riverbankcomputing.com/pipermail/pyqt/2022-April/044607.html
Should be fixed in PyQt 6.3.1 (or 6.4.0?).
"""
# Map Qt::Key values to their Qt::KeyboardModifier value.
_MODIFIER_MAP = {
Qt.Key.Key_Shift: Qt.KeyboardModifier.ShiftModifier,
Qt.Key.Key_Control: Qt.KeyboardModifier.ControlModifier,
Qt.Key.Key_Alt: Qt.KeyboardModifier.AltModifier,
Qt.Key.Key_Meta: Qt.KeyboardModifier.MetaModifier,
Qt.Key.Key_AltGr: Qt.KeyboardModifier.GroupSwitchModifier,
Qt.Key.Key_Mode_switch: Qt.KeyboardModifier.GroupSwitchModifier,
}
try:
_NIL_KEY: Union[Qt.Key, int] = Qt.Key(0)
except ValueError:
# WORKAROUND for
# https://www.riverbankcomputing.com/pipermail/pyqt/2022-April/044607.html
_NIL_KEY = 0
if machinery.IS_QT6:
_KeyInfoType = QKeyCombination
_ModifierType = Qt.KeyboardModifier
else:
_KeyInfoType = int
_ModifierType = Union[Qt.KeyboardModifiers, Qt.KeyboardModifier]
_SPECIAL_NAMES = {
# Some keys handled in a weird way by QKeySequence::toString.
# See https://bugreports.qt.io/browse/QTBUG-40030
# Most are unlikely to be ever needed, but you never know ;)
# For dead/combining keys, we return the corresponding non-combining
# key, as that's easier to add to the config.
Qt.Key.Key_Super_L: "Super L",
Qt.Key.Key_Super_R: "Super R",
Qt.Key.Key_Hyper_L: "Hyper L",
Qt.Key.Key_Hyper_R: "Hyper R",
Qt.Key.Key_Direction_L: "Direction L",
Qt.Key.Key_Direction_R: "Direction R",
Qt.Key.Key_Shift: "Shift",
Qt.Key.Key_Control: "Control",
Qt.Key.Key_Meta: "Meta",
Qt.Key.Key_Alt: "Alt",
Qt.Key.Key_AltGr: "AltGr",
Qt.Key.Key_Multi_key: "Multi key",
Qt.Key.Key_SingleCandidate: "Single Candidate",
Qt.Key.Key_Mode_switch: "Mode switch",
Qt.Key.Key_Dead_Grave: "`",
Qt.Key.Key_Dead_Acute: "´",
Qt.Key.Key_Dead_Circumflex: "^",
Qt.Key.Key_Dead_Tilde: "~",
Qt.Key.Key_Dead_Macron: "¯",
Qt.Key.Key_Dead_Breve: "˘",
Qt.Key.Key_Dead_Abovedot: "˙",
Qt.Key.Key_Dead_Diaeresis: "¨",
Qt.Key.Key_Dead_Abovering: "˚",
Qt.Key.Key_Dead_Doubleacute: "˝",
Qt.Key.Key_Dead_Caron: "ˇ",
Qt.Key.Key_Dead_Cedilla: "¸",
Qt.Key.Key_Dead_Ogonek: "˛",
Qt.Key.Key_Dead_Iota: "Iota",
Qt.Key.Key_Dead_Voiced_Sound: "Voiced Sound",
Qt.Key.Key_Dead_Semivoiced_Sound: "Semivoiced Sound",
Qt.Key.Key_Dead_Belowdot: "Belowdot",
Qt.Key.Key_Dead_Hook: "Hook",
Qt.Key.Key_Dead_Horn: "Horn",
Qt.Key.Key_Dead_Stroke: "\u0335", # '̵'
Qt.Key.Key_Dead_Abovecomma: "\u0313", # '̓'
Qt.Key.Key_Dead_Abovereversedcomma: "\u0314", # '̔'
Qt.Key.Key_Dead_Doublegrave: "\u030f", # '̏'
Qt.Key.Key_Dead_Belowring: "\u0325", # '̥'
Qt.Key.Key_Dead_Belowmacron: "\u0331", # '̱'
Qt.Key.Key_Dead_Belowcircumflex: "\u032d", # '̭'
Qt.Key.Key_Dead_Belowtilde: "\u0330", # '̰'
Qt.Key.Key_Dead_Belowbreve: "\u032e", # '̮'
Qt.Key.Key_Dead_Belowdiaeresis: "\u0324", # '̤'
Qt.Key.Key_Dead_Invertedbreve: "\u0311", # '̑'
Qt.Key.Key_Dead_Belowcomma: "\u0326", # '̦'
Qt.Key.Key_Dead_Currency: "¤",
Qt.Key.Key_Dead_a: "a",
Qt.Key.Key_Dead_A: "A",
Qt.Key.Key_Dead_e: "e",
Qt.Key.Key_Dead_E: "E",
Qt.Key.Key_Dead_i: "i",
Qt.Key.Key_Dead_I: "I",
Qt.Key.Key_Dead_o: "o",
Qt.Key.Key_Dead_O: "O",
Qt.Key.Key_Dead_u: "u",
Qt.Key.Key_Dead_U: "U",
Qt.Key.Key_Dead_Small_Schwa: "ə",
Qt.Key.Key_Dead_Capital_Schwa: "Ə",
Qt.Key.Key_Dead_Greek: "Greek",
Qt.Key.Key_Dead_Lowline: "\u0332", # '̲'
Qt.Key.Key_Dead_Aboveverticalline: "\u030d", # '̍'
Qt.Key.Key_Dead_Belowverticalline: "\u0329",
Qt.Key.Key_Dead_Longsolidusoverlay: "\u0338", # '̸'
Qt.Key.Key_MediaLast: "Media Last",
Qt.Key.Key_unknown: "Unknown",
# For some keys, we just want a different name
Qt.Key.Key_Escape: "Escape",
_NIL_KEY: "nil",
}
def _assert_plain_key(key: Qt.Key) -> None:
"""Make sure this is a key without KeyboardModifier mixed in."""
key_int = qtutils.extract_enum_val(key)
mask = qtutils.extract_enum_val(Qt.KeyboardModifier.KeyboardModifierMask)
assert not key_int & mask, hex(key_int)
def _assert_plain_modifier(key: _ModifierType) -> None:
"""Make sure this is a modifier without a key mixed in."""
key_int = qtutils.extract_enum_val(key)
mask = qtutils.extract_enum_val(Qt.KeyboardModifier.KeyboardModifierMask)
assert not key_int & ~mask, hex(key_int)
def _is_printable(key: Qt.Key) -> bool:
_assert_plain_key(key)
return key <= 0xFF and key not in [Qt.Key.Key_Space, _NIL_KEY]
def _is_surrogate(key: Qt.Key) -> bool:
"""Check if a codepoint is a UTF-16 surrogate.
UTF-16 surrogates are a reserved range of Unicode from 0xd800
to 0xd8ff, used to encode Unicode codepoints above the BMP
(Base Multilingual Plane).
"""
_assert_plain_key(key)
return 0xD800 <= key <= 0xDFFF
def _remap_unicode(key: Qt.Key, text: str) -> Qt.Key:
"""Work around QtKeyEvent's bad values for high codepoints.
QKeyEvent handles higher unicode codepoints poorly. It uses UTF-16 to
handle key events, and for higher codepoints that require UTF-16 surrogates
(e.g. emoji and some CJK characters), it sets the keycode to just the upper
half of the surrogate, which renders it useless, and breaks UTF-8 encoding,
causing crashes. So we detect this case, and reassign the key code to be
the full Unicode codepoint, which we can recover from the text() property,
which has the full character.
This is a WORKAROUND for https://bugreports.qt.io/browse/QTBUG-72776.
"""
_assert_plain_key(key)
if _is_surrogate(key):
if len(text) != 1:
raise KeyParseError(
text,
"Expected 1 character for surrogate, " "but got {}!".format(len(text)),
)
return Qt.Key(ord(text[0]))
return key
def _check_valid_utf8(s: str, data: Union[Qt.Key, _ModifierType]) -> None:
"""Make sure the given string is valid UTF-8.
Makes sure there are no chars where Qt did fall back to weird UTF-16
surrogates.
"""
try:
s.encode("utf-8")
except UnicodeEncodeError as e: # pragma: no cover
i = qtutils.extract_enum_val(data)
raise ValueError(f"Invalid encoding in 0x{i:x} -> {s}: {e}")
def _key_to_string(key: Qt.Key) -> str:
"""Convert a Qt::Key member to a meaningful name.
Args:
key: A Qt::Key member.
Return:
A name of the key as a string.
"""
_assert_plain_key(key)
if key in _SPECIAL_NAMES:
return _SPECIAL_NAMES[key]
result = QKeySequence(key).toString()
_check_valid_utf8(result, key)
return result
def _modifiers_to_string(modifiers: _ModifierType) -> str:
"""Convert the given Qt::KeyboardModifier to a string.
Handles Qt.KeyboardModifier.GroupSwitchModifier because Qt doesn't handle that as a
modifier.
"""
_assert_plain_modifier(modifiers)
altgr = Qt.KeyboardModifier.GroupSwitchModifier
if modifiers & altgr:
modifiers = _unset_modifier_bits(modifiers, altgr)
result = "AltGr+"
else:
result = ""
result += QKeySequence(qtutils.extract_enum_val(modifiers)).toString()
_check_valid_utf8(result, modifiers)
return result
class KeyParseError(Exception):
"""Raised by _parse_single_key/parse_keystring on parse errors."""
def __init__(self, keystr: Optional[str], error: str) -> None:
if keystr is None:
msg = "Could not parse keystring: {}".format(error)
else:
msg = "Could not parse {!r}: {}".format(keystr, error)
super().__init__(msg)
def _parse_keystring(keystr: str) -> Iterator[str]:
key = ""
special = False
for c in keystr:
if c == ">":
if special:
yield _parse_special_key(key)
key = ""
special = False
else:
yield ">"
assert not key, key
elif c == "<":
special = True
elif special:
key += c
else:
yield _parse_single_key(c)
if special:
yield "<"
for c in key:
yield _parse_single_key(c)
def _parse_special_key(keystr: str) -> str:
"""Normalize a keystring like Ctrl-Q to a keystring like Ctrl+Q.
Args:
keystr: The key combination as a string.
Return:
The normalized keystring.
"""
keystr = keystr.lower()
replacements = (
("control", "ctrl"),
("windows", "meta"),
("mod4", "meta"),
("command", "meta"),
("cmd", "meta"),
("super", "meta"),
("mod1", "alt"),
("less", "<"),
("greater", ">"),
)
for orig, repl in replacements:
keystr = keystr.replace(orig, repl)
for mod in ["ctrl", "meta", "alt", "shift", "num"]:
keystr = keystr.replace(mod + "-", mod + "+")
return keystr
def _parse_single_key(keystr: str) -> str:
"""Get a keystring for QKeySequence for a single key."""
return "Shift+" + keystr if keystr.isupper() else keystr
def _unset_modifier_bits(
modifiers: _ModifierType, mask: _ModifierType
) -> _ModifierType:
"""Unset all bits in modifiers which are given in mask.
Equivalent to modifiers & ~mask, but with a WORKAROUND with PyQt 6,
for a bug in Python 3.11.4 where that isn't possible with an enum.Flag...:
https://github.com/python/cpython/issues/105497
"""
if machinery.IS_QT5:
return Qt.KeyboardModifiers(modifiers & ~mask) # can lose type if it's 0
else:
return Qt.KeyboardModifier(modifiers.value & ~mask.value)
@dataclasses.dataclass(frozen=True, order=True)
class KeyInfo:
"""A key with optional modifiers.
Attributes:
key: A Qt::Key member.
modifiers: A Qt::KeyboardModifier enum value.
"""
key: Qt.Key
modifiers: _ModifierType = Qt.KeyboardModifier.NoModifier
def __post_init__(self) -> None:
"""Run some validation on the key/modifier values."""
# This changed with Qt 6, and e.g. to_qt() relies on this.
if machinery.IS_QT5:
modifier_classes = (Qt.KeyboardModifier, Qt.KeyboardModifiers)
elif machinery.IS_QT6:
modifier_classes = Qt.KeyboardModifier
assert isinstance(self.key, Qt.Key), self.key
assert isinstance(self.modifiers, modifier_classes), self.modifiers
_assert_plain_key(self.key)
_assert_plain_modifier(self.modifiers)
def __repr__(self) -> str:
return utils.get_repr(
self,
key=debug.qenum_key(Qt, self.key, klass=Qt.Key),
modifiers=debug.qflags_key(Qt, self.modifiers, klass=Qt.KeyboardModifier),
text=str(self),
)
@classmethod
def from_event(cls, e: QKeyEvent) -> "KeyInfo":
"""Get a KeyInfo object from a QKeyEvent.
This makes sure that key/modifiers are never mixed and also remaps
UTF-16 surrogates to work around QTBUG-72776.
"""
try:
key = Qt.Key(e.key())
except ValueError as ex:
raise InvalidKeyError(str(ex))
key = _remap_unicode(key, e.text())
modifiers = e.modifiers()
return cls(key, modifiers)
@classmethod
def from_qt(cls, combination: _KeyInfoType) -> "KeyInfo":
"""Construct a KeyInfo from a Qt5-style int or Qt6-style QKeyCombination."""
if machinery.IS_QT5:
assert isinstance(combination, int)
key = Qt.Key(int(combination) & ~Qt.KeyboardModifier.KeyboardModifierMask)
modifiers = Qt.KeyboardModifier(
int(combination) & Qt.KeyboardModifier.KeyboardModifierMask
)
return cls(key, modifiers)
else:
# QKeyCombination is now guaranteed to be available here
assert isinstance(combination, QKeyCombination)
try:
key = combination.key()
except ValueError as e:
raise InvalidKeyError(str(e))
return cls(
key=key,
modifiers=combination.keyboardModifiers(),
)
def __str__(self) -> str:
"""Convert this KeyInfo to a meaningful name.
Return:
A name of the key (combination) as a string.
"""
key_string = _key_to_string(self.key)
modifiers = self.modifiers
if self.key in _MODIFIER_MAP:
# Don't return e.g. <Shift+Shift>
modifiers = _unset_modifier_bits(modifiers, _MODIFIER_MAP[self.key])
elif _is_printable(self.key):
# "normal" binding
if not key_string: # pragma: no cover
raise ValueError("Got empty string for key 0x{:x}!".format(self.key))
assert len(key_string) == 1, key_string
if self.modifiers == Qt.KeyboardModifier.ShiftModifier:
assert not self.is_special()
return key_string.upper()
elif self.modifiers == Qt.KeyboardModifier.NoModifier:
assert not self.is_special()
return key_string.lower()
else:
# Use special binding syntax, but <Ctrl-a> instead of <Ctrl-A>
key_string = key_string.lower()
modifiers = Qt.KeyboardModifier(modifiers)
# "special" binding
assert self.is_special()
modifier_string = _modifiers_to_string(modifiers)
return "<{}{}>".format(modifier_string, key_string)
def text(self) -> str:
"""Get the text which would be displayed when pressing this key."""
control = {
Qt.Key.Key_Space: " ",
Qt.Key.Key_Tab: "\t",
Qt.Key.Key_Backspace: "\b",
Qt.Key.Key_Return: "\r",
Qt.Key.Key_Enter: "\r",
Qt.Key.Key_Escape: "\x1b",
}
if self.key in control:
return control[self.key]
elif not _is_printable(self.key):
return ""
text = QKeySequence(self.key).toString()
if not self.modifiers & Qt.KeyboardModifier.ShiftModifier:
text = text.lower()
return text
def to_event(self, typ: QEvent.Type = QEvent.Type.KeyPress) -> QKeyEvent:
"""Get a QKeyEvent from this KeyInfo."""
return QKeyEvent(typ, self.key, self.modifiers, self.text())
def to_qt(self) -> _KeyInfoType:
"""Get something suitable for a QKeySequence."""
if machinery.IS_QT5:
return int(self.key) | int(self.modifiers)
else:
return QKeyCombination(self.modifiers, self.key)
def with_stripped_modifiers(self, modifiers: Qt.KeyboardModifier) -> "KeyInfo":
mods = _unset_modifier_bits(self.modifiers, modifiers)
return KeyInfo(key=self.key, modifiers=mods)
def is_special(self) -> bool:
"""Check whether this key requires special key syntax."""
return not (
_is_printable(self.key)
and self.modifiers
in [
Qt.KeyboardModifier.ShiftModifier,
Qt.KeyboardModifier.NoModifier,
]
)
def is_modifier_key(self) -> bool:
"""Test whether the given key is a modifier.
This only considers keys which are part of Qt::KeyboardModifier, i.e.
which would interrupt a key chain like "yY" when handled.
"""
return self.key in _MODIFIER_MAP
class KeySequence:
"""A sequence of key presses.
This internally uses chained QKeySequence objects and exposes a nicer
interface over it.
NOTE: While private members of this class are in theory mutable, they must
not be mutated in order to ensure consistent hashing.
Attributes:
_sequences: A list of QKeySequence
Class attributes:
_MAX_LEN: The maximum amount of keys in a QKeySequence.
"""
_MAX_LEN = 4
def __init__(self, *keys: KeyInfo) -> None:
self._sequences: List[QKeySequence] = []
for sub in utils.chunk(keys, self._MAX_LEN):
try:
args = [info.to_qt() for info in sub]
except InvalidKeyError as e:
raise KeyParseError(keystr=None, error=f"Got invalid key: {e}")
sequence = QKeySequence(*args)
self._sequences.append(sequence)
if keys:
assert self
self._validate()
def __str__(self) -> str:
parts = []
for info in self:
parts.append(str(info))
return "".join(parts)
def __iter__(self) -> Iterator[KeyInfo]:
"""Iterate over KeyInfo objects."""
# FIXME:mypy Stubs seem to be unaware that iterating a QKeySequence produces
# _KeyInfoType
sequences = cast(List[Iterable[_KeyInfoType]], self._sequences)
for combination in itertools.chain.from_iterable(sequences):
yield KeyInfo.from_qt(combination)
def __repr__(self) -> str:
return utils.get_repr(self, keys=str(self))
def __lt__(self, other: "KeySequence") -> bool:
return self._sequences < other._sequences
def __gt__(self, other: "KeySequence") -> bool:
return self._sequences > other._sequences
def __le__(self, other: "KeySequence") -> bool:
return self._sequences <= other._sequences
def __ge__(self, other: "KeySequence") -> bool:
return self._sequences >= other._sequences
def __eq__(self, other: object) -> bool:
if not isinstance(other, KeySequence):
return NotImplemented
return self._sequences == other._sequences
def __ne__(self, other: object) -> bool:
if not isinstance(other, KeySequence):
return NotImplemented
return self._sequences != other._sequences
def __hash__(self) -> int:
return hash(tuple(self._sequences))
def __len__(self) -> int:
return sum(len(seq) for seq in self._sequences)
def __bool__(self) -> bool:
return bool(self._sequences)
@overload
def __getitem__(self, item: int) -> KeyInfo:
...
@overload
def __getitem__(self, item: slice) -> "KeySequence":
...
def __getitem__(self, item: Union[int, slice]) -> Union[KeyInfo, "KeySequence"]:
infos = list(self)
if isinstance(item, slice):
return self.__class__(*infos[item])
else:
return infos[item]
def _validate(self, keystr: str = None) -> None:
try:
for info in self:
if info.key < Qt.Key.Key_Space or info.key >= Qt.Key.Key_unknown:
raise KeyParseError(keystr, "Got invalid key!")
except InvalidKeyError as e:
raise KeyParseError(keystr, f"Got invalid key: {e}")
for seq in self._sequences:
if not seq:
raise KeyParseError(keystr, "Got invalid key!")
def matches(self, other: "KeySequence") -> QKeySequence.SequenceMatch:
"""Check whether the given KeySequence matches with this one.
We store multiple QKeySequences with <= 4 keys each, so we need to
match those pair-wise, and account for an unequal amount of sequences
as well.
"""
# pylint: disable=protected-access
if len(self._sequences) > len(other._sequences):
# If we entered more sequences than there are in the config,
# there's no way there can be a match.
return QKeySequence.SequenceMatch.NoMatch
for entered, configured in zip(self._sequences, other._sequences):
# If we get NoMatch/PartialMatch in a sequence, we can abort there.
match = entered.matches(configured)
if match != QKeySequence.SequenceMatch.ExactMatch:
return match
# We checked all common sequences and they had an ExactMatch.
#
# If there's still more sequences configured than entered, that's a
# PartialMatch, as more keypresses can still follow and new sequences
# will appear which we didn't check above.
#
# If there's the same amount of sequences configured and entered,
# that's an EqualMatch.
if len(self._sequences) == len(other._sequences):
return QKeySequence.SequenceMatch.ExactMatch
elif len(self._sequences) < len(other._sequences):
return QKeySequence.SequenceMatch.PartialMatch
else:
raise utils.Unreachable("self={!r} other={!r}".format(self, other))
def append_event(self, ev: QKeyEvent) -> "KeySequence":
"""Create a new KeySequence object with the given QKeyEvent added."""
try:
key = Qt.Key(ev.key())
except ValueError as e:
raise KeyParseError(None, f"Got invalid key: {e}")
_assert_plain_key(key)
_assert_plain_modifier(ev.modifiers())
key = _remap_unicode(key, ev.text())
modifiers: _ModifierType = ev.modifiers()
if key == _NIL_KEY:
raise KeyParseError(None, "Got nil key!")
# We always remove Qt.KeyboardModifier.GroupSwitchModifier because QKeySequence has no
# way to mention that in a binding anyways...
modifiers = _unset_modifier_bits(
modifiers, Qt.KeyboardModifier.GroupSwitchModifier
)
# We change Qt.Key.Key_Backtab to Key_Tab here because nobody would
# configure "Shift-Backtab" in their config.
if modifiers & Qt.KeyboardModifier.ShiftModifier and key == Qt.Key.Key_Backtab:
key = Qt.Key.Key_Tab
# We don't care about a shift modifier with symbols (Shift-: should
# match a : binding even though we typed it with a shift on an
# US-keyboard)
#
# However, we *do* care about Shift being involved if we got an
# upper-case letter, as Shift-A should match a Shift-A binding, but not
# an "a" binding.
#
# In addition, Shift also *is* relevant when other modifiers are
# involved. Shift-Ctrl-X should not be equivalent to Ctrl-X.
shift_modifier = Qt.KeyboardModifier.ShiftModifier
if (
modifiers == shift_modifier
and _is_printable(key)
and not ev.text().isupper()
):
modifiers = Qt.KeyboardModifier.NoModifier
# On macOS, swap Ctrl and Meta back
#
# We don't use Qt.ApplicationAttribute.AA_MacDontSwapCtrlAndMeta because that also affects
# Qt/QtWebEngine's own shortcuts. However, we do want "Ctrl" and "Meta"
# (or "Cmd") in a key binding name to actually represent what's on the
# keyboard.
if utils.is_mac:
if (
modifiers & Qt.KeyboardModifier.ControlModifier
and modifiers & Qt.KeyboardModifier.MetaModifier
):
pass
elif modifiers & Qt.KeyboardModifier.ControlModifier:
modifiers = _unset_modifier_bits(
modifiers, Qt.KeyboardModifier.ControlModifier
)
modifiers |= Qt.KeyboardModifier.MetaModifier
elif modifiers & Qt.KeyboardModifier.MetaModifier:
modifiers = _unset_modifier_bits(
modifiers, Qt.KeyboardModifier.MetaModifier
)
modifiers |= Qt.KeyboardModifier.ControlModifier
infos = list(self)
infos.append(KeyInfo(key, modifiers))
return self.__class__(*infos)
def strip_modifiers(self) -> "KeySequence":
"""Strip optional modifiers from keys."""
modifiers = Qt.KeyboardModifier.KeypadModifier
infos = [info.with_stripped_modifiers(modifiers) for info in self]
return self.__class__(*infos)
def with_mappings(
self, mappings: Mapping["KeySequence", "KeySequence"]
) -> "KeySequence":
"""Get a new KeySequence with the given mappings applied."""
infos: List[KeyInfo] = []
for info in self:
key_seq = KeySequence(info)
if key_seq in mappings:
infos += mappings[key_seq]
else:
infos.append(info)
return self.__class__(*infos)
@classmethod
def parse(cls, keystr: str) -> "KeySequence":
"""Parse a keystring like <Ctrl-x> or xyz and return a KeySequence."""
new = cls()
strings = list(_parse_keystring(keystr))
for sub in utils.chunk(strings, cls._MAX_LEN):
sequence = QKeySequence(", ".join(sub))
new._sequences.append(sequence)
if keystr:
assert new, keystr
new._validate(keystr)
return new
|
models | feature_map_generators_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for feature map generators."""
import tensorflow as tf
from app.object_detection.models import feature_map_generators
INCEPTION_V2_LAYOUT = {
"from_layer": ["Mixed_3c", "Mixed_4c", "Mixed_5c", "", "", ""],
"layer_depth": [-1, -1, -1, 512, 256, 256],
"anchor_strides": [16, 32, 64, -1, -1, -1],
"layer_target_norm": [20.0, -1, -1, -1, -1, -1],
}
INCEPTION_V3_LAYOUT = {
"from_layer": ["Mixed_5d", "Mixed_6e", "Mixed_7c", "", "", ""],
"layer_depth": [-1, -1, -1, 512, 256, 128],
"anchor_strides": [16, 32, 64, -1, -1, -1],
"aspect_ratios": [1.0, 2.0, 1.0 / 2, 3.0, 1.0 / 3],
}
EMBEDDED_SSD_MOBILENET_V1_LAYOUT = {
"from_layer": ["Conv2d_11_pointwise", "Conv2d_13_pointwise", "", "", ""],
"layer_depth": [-1, -1, 512, 256, 256],
"conv_kernel_size": [-1, -1, 3, 3, 2],
}
# TODO(rathodv): add tests with different anchor strides.
class MultiResolutionFeatureMapGeneratorTest(tf.test.TestCase):
def test_get_expected_feature_map_shapes_with_inception_v2(self):
image_features = {
"Mixed_3c": tf.random_uniform([4, 28, 28, 256], dtype=tf.float32),
"Mixed_4c": tf.random_uniform([4, 14, 14, 576], dtype=tf.float32),
"Mixed_5c": tf.random_uniform([4, 7, 7, 1024], dtype=tf.float32),
}
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=INCEPTION_V2_LAYOUT,
depth_multiplier=1,
min_depth=32,
insert_1x1_conv=True,
image_features=image_features,
)
expected_feature_map_shapes = {
"Mixed_3c": (4, 28, 28, 256),
"Mixed_4c": (4, 14, 14, 576),
"Mixed_5c": (4, 7, 7, 1024),
"Mixed_5c_2_Conv2d_3_3x3_s2_512": (4, 4, 4, 512),
"Mixed_5c_2_Conv2d_4_3x3_s2_256": (4, 2, 2, 256),
"Mixed_5c_2_Conv2d_5_3x3_s2_256": (4, 1, 1, 256),
}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items()
)
self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_inception_v3(self):
image_features = {
"Mixed_5d": tf.random_uniform([4, 35, 35, 256], dtype=tf.float32),
"Mixed_6e": tf.random_uniform([4, 17, 17, 576], dtype=tf.float32),
"Mixed_7c": tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32),
}
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=INCEPTION_V3_LAYOUT,
depth_multiplier=1,
min_depth=32,
insert_1x1_conv=True,
image_features=image_features,
)
expected_feature_map_shapes = {
"Mixed_5d": (4, 35, 35, 256),
"Mixed_6e": (4, 17, 17, 576),
"Mixed_7c": (4, 8, 8, 1024),
"Mixed_7c_2_Conv2d_3_3x3_s2_512": (4, 4, 4, 512),
"Mixed_7c_2_Conv2d_4_3x3_s2_256": (4, 2, 2, 256),
"Mixed_7c_2_Conv2d_5_3x3_s2_128": (4, 1, 1, 128),
}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items()
)
self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1(self):
image_features = {
"Conv2d_11_pointwise": tf.random_uniform(
[4, 16, 16, 512], dtype=tf.float32
),
"Conv2d_13_pointwise": tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32),
}
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT,
depth_multiplier=1,
min_depth=32,
insert_1x1_conv=True,
image_features=image_features,
)
expected_feature_map_shapes = {
"Conv2d_11_pointwise": (4, 16, 16, 512),
"Conv2d_13_pointwise": (4, 8, 8, 1024),
"Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512": (4, 4, 4, 512),
"Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256": (4, 2, 2, 256),
"Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256": (4, 1, 1, 256),
}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items()
)
self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
class GetDepthFunctionTest(tf.test.TestCase):
def test_return_min_depth_when_multiplier_is_small(self):
depth_fn = feature_map_generators.get_depth_fn(
depth_multiplier=0.5, min_depth=16
)
self.assertEqual(depth_fn(16), 16)
def test_return_correct_depth_with_multiplier(self):
depth_fn = feature_map_generators.get_depth_fn(
depth_multiplier=0.5, min_depth=16
)
self.assertEqual(depth_fn(64), 32)
if __name__ == "__main__":
tf.test.main()
|
src | SongEdit | # -*- coding: utf-8 -*-
# MusicPlayer, https://github.com/albertz/music-player
# Copyright (c) 2012, Albert Zeyer, www.az2000.de
# All rights reserved.
# This code is under the 2-clause BSD license, see License.txt in the root directory of this project.
import gui
import Traits
import utils
from utils import Event, UserAttrib, initBy
# Note: I'm not too happy with all the complicated update handling here...
# In general, the design is ok. But it needs some more specification
# and then some drastic simplification. Most of it should be one-liners.
class SongEdit:
# This is used by the GUI update system.
@initBy
def _updateEvent(self):
return Event()
def _updateHandler(self):
self._updateEvent.push()
def __init__(self, ctx=None):
if not ctx:
import gui
ctx = gui.ctx()
assert ctx, "no gui context"
self.ctx = ctx
ctx.curSelectedSong_updateEvent.register(self._updateHandler)
@UserAttrib(type=Traits.Object)
@property
def song(self):
return self.ctx.curSelectedSong
@UserAttrib(type=Traits.EditableText)
def artist(self, updateText=None):
if self.song:
if updateText:
self.song.artist = updateText
return self.song.artist
return ""
@UserAttrib(type=Traits.EditableText)
def title(self, updateText=None):
if self.song:
if updateText:
self.song.title = updateText
return self.song.title
return ""
@staticmethod
def _convertTagsToText(tags):
def txtForTag(tag):
value = tags[tag]
if value >= 1:
return tag
return tag + ":" + str(value)
return " ".join(map(txtForTag, sorted(tags.keys())))
@staticmethod
def _convertTextToTags(txt):
pass
# todo...
# @UserAttrib(type=Traits.EditableText)
def tags(self, updateText=None):
if self.song:
return self._convertTagsToText(self.song.tags)
return ""
@staticmethod
def _formatGain(gain):
factor = 10.0 ** (gain / 20.0)
return "%f dB (factor %f)" % (gain, factor)
@UserAttrib(type=Traits.Table(keys=("key", "value")), variableHeight=True)
@property
def metadata(self):
d = dict(self.song.metadata)
for key, func in (
("artist", None),
("title", None),
("album", None),
("duration", utils.formatTime),
("url", None),
("rating", None),
("tags", self._convertTagsToText),
("gain", self._formatGain),
("completedCount", None),
("skipCount", None),
("lastPlayedDate", utils.formatDate),
("id", repr),
):
try:
value = getattr(self.song, key)
except AttributeError:
pass
else:
if func:
value = func(value)
if not isinstance(value, (str, unicode)):
value = str(value)
d[key] = utils.convertToUnicode(value)
l = []
for key, value in sorted(d.items()):
l += [{"key": key, "value": value}]
return l
def _queryAcoustId(self):
fingerprint = self.song.get("fingerprint_AcoustId", timeout=None)[0]
duration = self.song.get("duration", timeout=None, accuracy=0.5)[0]
import base64
fingerprint = base64.urlsafe_b64encode(fingerprint)
api_url = "http://api.acoustid.org/v2/lookup"
# "8XaBELgH" is the one from the web example from AcoustID.
# "cSpUJKpD" is from the example from pyacoustid
# get an own one here: http://acoustid.org/api-key
client_api_key = "Rexr3KDO"
params = {
"format": "json",
"client": client_api_key,
"duration": int(duration),
"fingerprint": fingerprint,
"meta": "recordings recordingids releasegroups releases tracks compress",
}
import urllib
body = urllib.urlencode(params)
import urllib2
req = urllib2.Request(api_url, body)
import contextlib
with contextlib.closing(urllib2.urlopen(req)) as f:
data = f.read()
headers = f.info()
import json
data = json.loads(data)
return data
def queryAcoustIdResults_selectionChangeHandler(self, selection):
self._queryAcoustId_selection = selection
@UserAttrib(
type=Traits.Table(keys=("artist", "title", "album", "track", "score")),
selectionChangeHandler=queryAcoustIdResults_selectionChangeHandler,
addUpdateEvent=True,
)
@property
def queryAcoustIdResults(self):
if getattr(self, "_queryAcoustIdResults_songId", "") != getattr(
self.song, "id", ""
):
return []
return list(getattr(self, "_queryAcoustIdResults", []))
@UserAttrib(type=Traits.Action, variableWidth=False)
def queryAcoustId(self):
data = self._queryAcoustId()
self._queryAcoustIdResults_songId = self.song.id
self._queryAcoustIdResults = []
for result in data.get("results", []):
for recording in result.get("recordings", []):
for resGroup in recording.get("releasegroups", []):
artist = resGroup["artists"][0]
release = resGroup["releases"][0]
medium = release["mediums"][0]
track = medium["tracks"][0]
if artist["name"] == "Various Artists":
artist = track["artists"][0]
entry = {
"id": result["id"],
"score": result["score"],
"recording-id": recording["id"],
"releasegroup-id": resGroup["id"],
"artist-id": artist["id"],
"artist": artist["name"],
"title": track["title"],
"album": resGroup["title"],
"track": "%i/%i" % (track["position"], medium["track_count"]),
}
self._queryAcoustIdResults += [entry]
if not self._queryAcoustIdResults:
self._queryAcoustIdResults += [
{"artist": "- None found -", "title": "", "album": "", "track": ""}
]
self.__class__.queryAcoustIdResults.updateEvent(self).push()
@UserAttrib(type=Traits.Action, variableWidth=False, alignRight=True)
def apply(self):
if getattr(self, "_queryAcoustIdResults_songId", "") != getattr(
self.song, "id", ""
):
return
sel = getattr(self, "_queryAcoustId_selection", [])
if not sel:
return
sel = sel[0]
for key in ("artist", "title"):
if not sel[key]:
return
for key in ("artist", "title", "album", "track"):
setattr(self.song, key, sel[key])
self._updateEvent.push() # the song is updating itself - but the edit fields aren't atm...
gui.registerCtxRootObj(clazz=SongEdit, name="Song edit", priority=-2, keyShortcut="i")
|
QT | WBG_Tree | import collections
from Code import TrListas
from Code.Constantes import *
from Code.QT import (
Controles,
FormLayout,
Iconos,
PantallaColores,
QTUtil,
QTUtil2,
QTVarios,
WBG_Comun,
)
from PyQt4 import QtCore, QtGui
SIN_VALORACION, MUY_MALO, MALO, BUENO, MUY_BUENO, INTERESANTE, DUDOSA = (
0,
4,
2,
1,
3,
5,
6,
)
(
V_SIN,
V_IGUAL,
V_BLANCAS,
V_NEGRAS,
V_BLANCAS_MAS,
V_NEGRAS_MAS,
V_BLANCAS_MAS_MAS,
V_NEGRAS_MAS_MAS,
) = (0, 11, 14, 15, 16, 17, 18, 19)
class TreeMoves(QtGui.QTreeWidget):
def __init__(self, wmoves):
QtGui.QTreeWidget.__init__(self)
self.wmoves = wmoves
self.itemActivo = None
self.setAlternatingRowColors(True)
self.dicItems = {}
self.posMoves = 0
self.posTransposition = 1
self.posBookmark = 2
self.posAnalisis = 3
self.posComment = 4
self.setHeaderLabels((_("Moves"), "", "", _("Analysis"), _("Comments"), ""))
self.setColumnHidden(5, True)
self.setIndentation(14)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.menuContexto)
self.setStyleSheet(
"selection-background-color: #F1D369; selection-color: #000000;"
)
ftxt = Controles.TipoLetra(puntos=9)
self.setFont(ftxt)
self.connect(
self, QtCore.SIGNAL("itemExpanded(QTreeWidgetItem *)"), self.expandido
)
self.connect(
self, QtCore.SIGNAL("itemSelectionChanged()"), self.seleccionadoISC
)
self.connect(
self,
QtCore.SIGNAL("itemDoubleClicked(QTreeWidgetItem *,int)"),
self.dobleClick,
)
self.noIcon = QtGui.QIcon()
self.iconBookmark = Iconos.Favoritos()
self.iconTransposition = Iconos.Transposition()
dicNAGs = TrListas.dicNAGs()
self.dicValoracion = collections.OrderedDict()
self.dicValoracion[BUENO] = (dicNAGs[1], PantallaColores.nag2ico(1, 16))
self.dicValoracion[MALO] = (dicNAGs[2], PantallaColores.nag2ico(2, 16))
self.dicValoracion[MUY_BUENO] = (dicNAGs[3], PantallaColores.nag2ico(3, 16))
self.dicValoracion[MUY_MALO] = (dicNAGs[4], PantallaColores.nag2ico(4, 16))
self.dicValoracion[INTERESANTE] = (dicNAGs[5], PantallaColores.nag2ico(5, 16))
self.dicValoracion[DUDOSA] = (dicNAGs[6], PantallaColores.nag2ico(6, 16))
self.dicValoracion[SIN_VALORACION] = (_("No rating"), self.noIcon)
self.dicVentaja = collections.OrderedDict()
self.dicVentaja[V_SIN] = (_("Undefined"), self.noIcon)
self.dicVentaja[V_IGUAL] = (dicNAGs[11], Iconos.V_Blancas_Igual_Negras())
self.dicVentaja[V_BLANCAS] = (dicNAGs[14], Iconos.V_Blancas())
self.dicVentaja[V_BLANCAS_MAS] = (dicNAGs[16], Iconos.V_Blancas_Mas())
self.dicVentaja[V_BLANCAS_MAS_MAS] = (dicNAGs[18], Iconos.V_Blancas_Mas_Mas())
self.dicVentaja[V_NEGRAS] = (dicNAGs[15], Iconos.V_Negras())
self.dicVentaja[V_NEGRAS_MAS] = (dicNAGs[17], Iconos.V_Negras_Mas())
self.dicVentaja[V_NEGRAS_MAS_MAS] = (dicNAGs[19], Iconos.V_Negras_Mas_Mas())
def dobleClick(self, item, col):
move = self.dicItems.get(str(item), None)
if move is None:
return
elif col == self.posTransposition:
tr = move.transpositions()
ntr = len(tr)
if ntr == 0:
return
menutr = QTVarios.LCMenu(self)
menutr.opcion(
None, move.allPGN(), Iconos.Transposition(), siDeshabilitado=True
)
menutr.separador()
for n, mv in enumerate(tr):
menutr.opcion("tr_%d" % n, mv.allPGN(), Iconos.PuntoVerde())
resp = menutr.lanza()
if resp:
move = tr[int(resp[3:])]
self.wmoves.seleccionaMove(move)
elif col == self.posAnalisis:
rm = move.analisis()
if rm:
fen = move.father().fen()
pv = move.pv() + " " + rm.pv
self.analisis.showAnalisis(fen, pv, rm)
def setBookGuide(self, bookGuide, procesador):
self.bookGuide = bookGuide
self.analisis = WBG_Comun.Analisis(
self, bookGuide, self.resetAnalisis, procesador
)
def menuContexto(self, position):
item, move = self.moveActual()
if not move:
return
menu = QTVarios.LCMenu(self)
menu1 = menu.submenu(_("Expand"), Iconos.Mas22())
menu1.opcion("expandall", _("All"), Iconos.PuntoVerde())
menu1.separador()
menu1.opcion("expandthis", _("This branch"), Iconos.PuntoAmarillo())
menu.separador()
menu1 = menu.submenu(_("Collapse"), Iconos.Menos22())
menu1.opcion("collapseall", _("All"), Iconos.PuntoVerde())
menu1.separador()
menu1.opcion("collapsethis", _("This branch"), Iconos.PuntoAmarillo())
menu.separador()
menu.opcion("remove", _("Remove"), Iconos.Borrar())
menu.separador()
padre = move.father()
fenM2 = padre.fenM2()
rmAnalisis = move.analisis()
siShowMoves = rmAnalisis is not None
self.analisis.menuAnalizar(fenM2, menu, siShowMoves)
menu.separador()
if move.mark():
menu.opcion("mark_rem", _("Remove bookmark"), self.iconBookmark)
else:
menu.opcion("mark_add", _("Add bookmark"), self.iconBookmark)
tr = move.transpositions()
if tr and tr > 1:
menu.separador()
menutr = menu.submenu(_("Transpositions"), Iconos.Transposition())
for n, mv in enumerate(tr):
menutr.opcion("tr_%d" % n, mv.allPGN(), Iconos.PuntoVerde())
menutr.separador()
resp = menu.lanza()
if resp:
if resp == "expandthis":
self.showFrom(move, True)
self.resizeColumnToContents(0)
elif resp == "expandall":
self.showFrom(self.bookGuide.root, True, siRoot=True)
self.resizeColumnToContents(0)
elif resp == "collapsethis":
self.showFrom(move, False)
elif resp == "collapseall":
for move in self.bookGuide.root.children():
self.showFrom(move, False)
elif resp == "remove":
self.borrar()
elif resp.startswith("mark_rem"):
move.mark("")
self.ponIconoBookmark(item, move.mark())
self.wmoves.compruebaBookmarks()
elif resp.startswith("mark_add"):
self.newBookmark(move)
elif resp.startswith("tr_"):
self.wmoves.seleccionaMove(tr[int(resp[3:])])
elif resp.startswith("an_"):
fen = padre.fen()
pv = move.pv() + " " + rmAnalisis.pv if siShowMoves else None
self.analisis.exeAnalizar(fenM2, resp, padre, fen, pv, rmAnalisis)
def showUnMove(self, itemBase, unMove):
if unMove.item() is None:
pgn = unMove.pgnNum()
comentario = unMove.commentLine()
posicion = str(unMove.pos())
puntos = unMove.etiPuntos()
item = QtGui.QTreeWidgetItem(
itemBase, [pgn, "", "", puntos, comentario, posicion]
)
item.setTextAlignment(self.posAnalisis, QtCore.Qt.AlignRight)
self.ponIconoValoracion(item, unMove.nag())
self.ponIconoVentaja(item, unMove.adv())
self.ponIconoBookmark(item, unMove.mark())
if unMove.transpositions():
item.setIcon(self.posTransposition, self.iconTransposition)
self.dicItems[str(item)] = unMove
unMove.item(item)
def showFrom(self, unMove, siExpand, siRoot=False):
if siExpand:
mens = QTUtil2.mensEspera
mens.inicio(self, _("Expanding"), siCancelar=True)
def work(move):
if siExpand and mens.cancelado():
return False
itemBase = move.item()
for uno in move.children():
if uno.item() is None:
self.showUnMove(itemBase, uno)
if siExpand:
uno.item().setExpanded(True)
work(uno)
if not siRoot:
itemBase.setExpanded(siExpand)
work(unMove)
if siExpand:
mens.final()
return True
def showChildren(self, unMove, siNietos):
itemBase = unMove.item()
for uno in unMove.children():
if uno.item() is None:
self.showUnMove(itemBase, uno)
if siNietos:
self.showChildren(uno, False)
self.resizeColumnToContents(0)
def focusInEvent(self, event):
self.seleccionado(self.itemActivo)
self.wmoves.focusInEvent(event)
def moveActual(self):
item = self.itemActivo
if item:
mov = self.dicItems[str(item)]
else:
mov = None
return item, mov
def seleccionadoISC(self):
self.itemActivo = self.currentItem()
self.seleccionado(self.itemActivo)
def seleccionado(self, item):
if item:
uno = self.dicItems[str(item)]
self.wmoves.seleccionado(uno)
self.resizeColumnToContents(self.posMoves)
def expandido(self, item):
uno = self.dicItems[str(item)]
self.showChildren(uno, True)
def ponIconoVentaja(self, item, ventaja):
if ventaja not in self.dicVentaja:
ventaja = 0
item.setIcon(self.posAnalisis, self.dicVentaja[ventaja][1])
def ponIconoValoracion(self, item, valoracion):
item.setIcon(self.posMoves, self.dicValoracion[valoracion][1])
def ponIconoBookmark(self, item, mark):
ico = self.iconBookmark if mark else self.noIcon
item.setIcon(self.posBookmark, ico)
def resetValoracion(self, move):
self.ponIconoValoracion(move.item(), move.nag())
def resetVentaja(self, move):
self.ponIconoVentaja(move.item(), move.adv())
def resetComentario(self, move):
if move.pv():
move.item().setText(self.posComment, move.commentLine())
def resetAnalisis(self, padre):
for uno in padre.children():
item = uno.item()
item.setText(self.posAnalisis, uno.etiPuntos())
def borrar(self):
item, mov = self.moveActual()
if item:
if QTUtil2.pregunta(self, _X(_("Delete %1?"), mov.pgn())):
um = QTUtil2.unMomento(self)
self.removeRow()
liBorrados, liQuitarTransposition = self.bookGuide.borrar(mov)
for mv in liBorrados:
item = mv.item()
if item:
del self.dicItems[str(item)]
for mv in liQuitarTransposition:
item = mv.item()
if item:
item.setIcon(self.posTransposition, QtGui.QIcon())
mov.father().delChildren(mov)
um.final()
self.wmoves.compruebaBookmarks()
def removeRow(self):
index = self.currentIndex()
model = self.model()
model.removeRow(index.row(), index.parent())
def newBookmark(self, move):
comment = move.comment()
allpgn = move.allPGN()
siComment = len(comment) > 0
txt = comment if siComment else allpgn
liGen = [(None, None)]
liGen.append((_("Name") + ":", txt))
liGen.append((_("Copy PGN") + ":", False))
if siComment:
liGen.append((_("Copy comment") + ":", False))
reg = KRegistro()
reg.allpgn = allpgn
reg.comment = comment.split("\n")[0].strip()
reg.form = None
def dispatch(valor):
if reg.form is None:
reg.form = valor
reg.wname = valor.getWidget(0)
reg.wpgn = valor.getWidget(1)
reg.wcomment = valor.getWidget(2)
reg.wpgn.setText(reg.allpgn)
if reg.wcomment:
reg.wcomment.setText(reg.comment)
else:
QTUtil.refreshGUI()
if reg.wpgn.isChecked():
reg.wname.setText(reg.allpgn)
elif reg.wcomment and reg.wcomment.isChecked():
reg.wname.setText(reg.comment)
if reg.wcomment:
reg.wcomment.setChecked(False)
reg.wpgn.setChecked(False)
QTUtil.refreshGUI()
resultado = FormLayout.fedit(
liGen,
title=_("Bookmark"),
parent=self.wmoves,
anchoMinimo=460,
icon=Iconos.Favoritos(),
dispatch=dispatch,
)
if resultado is None:
return None
accion, liResp = resultado
txt = liResp[0].strip()
if txt:
move.mark(txt)
self.ponIconoBookmark(move.item(), move.mark())
self.wmoves.compruebaBookmarks()
|
engine | conftest | import datetime
import json
import os
import sys
import typing
import uuid
from importlib import import_module, reload
import pytest
from apps.alerts.models import (
Alert,
AlertGroupLogRecord,
AlertReceiveChannel,
MaintainableObject,
ResolutionNote,
listen_for_alertgrouplogrecord,
listen_for_alertreceivechannel_model_save,
)
from apps.alerts.signals import user_notification_action_triggered_signal
from apps.alerts.tests.factories import (
AlertFactory,
AlertGroupFactory,
AlertGroupLogRecordFactory,
AlertReceiveChannelFactory,
ChannelFilterFactory,
CustomActionFactory,
EscalationChainFactory,
EscalationPolicyFactory,
InvitationFactory,
ResolutionNoteFactory,
ResolutionNoteSlackMessageFactory,
)
from apps.api.permissions import (
ACTION_PREFIX,
GrafanaAPIPermission,
LegacyAccessControlCompatiblePermission,
LegacyAccessControlRole,
RBACPermission,
)
from apps.auth_token.models import ApiAuthToken, PluginAuthToken, SlackAuthToken
from apps.base.models.user_notification_policy_log_record import (
UserNotificationPolicyLogRecord,
listen_for_usernotificationpolicylogrecord_model_save,
)
from apps.base.tests.factories import (
LiveSettingFactory,
UserNotificationPolicyFactory,
UserNotificationPolicyLogRecordFactory,
)
from apps.email.tests.factories import EmailMessageFactory
from apps.heartbeat.tests.factories import IntegrationHeartBeatFactory
from apps.mobile_app.models import MobileAppAuthToken, MobileAppVerificationToken
from apps.phone_notifications.phone_backend import PhoneBackend
from apps.phone_notifications.tests.factories import (
PhoneCallRecordFactory,
SMSRecordFactory,
)
from apps.phone_notifications.tests.mock_phone_provider import MockPhoneProvider
from apps.schedules.models import OnCallScheduleWeb
from apps.schedules.tests.factories import (
CustomOnCallShiftFactory,
OnCallScheduleCalendarFactory,
OnCallScheduleFactory,
OnCallScheduleICalFactory,
ShiftSwapRequestFactory,
)
from apps.slack.client import SlackClient
from apps.slack.tests.factories import (
SlackChannelFactory,
SlackMessageFactory,
SlackTeamIdentityFactory,
SlackUserGroupFactory,
SlackUserIdentityFactory,
)
from apps.telegram.tests.factories import (
TelegramChannelFactory,
TelegramChannelVerificationCodeFactory,
TelegramMessageFactory,
TelegramToUserConnectorFactory,
TelegramVerificationCodeFactory,
)
from apps.user_management.models.user import User, listen_for_user_model_save
from apps.user_management.tests.factories import (
OrganizationFactory,
RegionFactory,
TeamFactory,
UserFactory,
)
from apps.webhooks.presets.preset_options import WebhookPresetOptions
from apps.webhooks.tests.factories import CustomWebhookFactory, WebhookResponseFactory
from apps.webhooks.tests.test_webhook_presets import (
TEST_WEBHOOK_PRESET_ID,
TestWebhookPreset,
)
from celery import Task
from django.db.models.signals import post_save
from django.urls import clear_url_caches
from django.utils import timezone
from pytest_factoryboy import register
from rest_framework.test import APIClient
from telegram import Bot
register(OrganizationFactory)
register(UserFactory)
register(TeamFactory)
register(AlertReceiveChannelFactory)
register(ChannelFilterFactory)
register(EscalationPolicyFactory)
register(OnCallScheduleICalFactory)
register(OnCallScheduleCalendarFactory)
register(CustomOnCallShiftFactory)
register(ShiftSwapRequestFactory)
register(AlertFactory)
register(AlertGroupFactory)
register(AlertGroupLogRecordFactory)
register(InvitationFactory)
register(CustomActionFactory)
register(SlackUserGroupFactory)
register(SlackUserIdentityFactory)
register(SlackTeamIdentityFactory)
register(SlackMessageFactory)
register(TelegramToUserConnectorFactory)
register(TelegramChannelFactory)
register(TelegramVerificationCodeFactory)
register(TelegramChannelVerificationCodeFactory)
register(TelegramMessageFactory)
register(ResolutionNoteSlackMessageFactory)
register(PhoneCallRecordFactory)
register(SMSRecordFactory)
register(EmailMessageFactory)
register(IntegrationHeartBeatFactory)
register(LiveSettingFactory)
IS_RBAC_ENABLED = os.getenv("ONCALL_TESTING_RBAC_ENABLED", "True") == "True"
@pytest.fixture(autouse=True)
def mock_slack_api_call(monkeypatch):
def mock_api_call(*args, **kwargs):
return {
"status": 200,
"usergroups": [],
"channel": {"id": "TEST_CHANNEL_ID"},
"user": {
"name": "TEST_SLACK_LOGIN",
"real_name": "TEST_SLACK_NAME",
"profile": {"image_512": "TEST_SLACK_IMAGE"},
},
"team": {"name": "TEST_TEAM"},
}
monkeypatch.setattr(SlackClient, "api_call", mock_api_call)
@pytest.fixture(autouse=True)
def mock_telegram_bot_username(monkeypatch):
def mock_username(*args, **kwargs):
return "oncall_bot"
monkeypatch.setattr(Bot, "username", mock_username)
@pytest.fixture(autouse=True)
def mock_phone_provider(monkeypatch):
def mock_get_provider(*args, **kwargs):
return MockPhoneProvider()
monkeypatch.setattr(PhoneBackend, "_get_phone_provider", mock_get_provider)
@pytest.fixture(autouse=True)
def mock_apply_async(monkeypatch):
def mock_apply_async(*args, **kwargs):
return uuid.uuid4()
monkeypatch.setattr(Task, "apply_async", mock_apply_async)
@pytest.fixture
def make_organization():
def _make_organization(**kwargs):
return OrganizationFactory(
**kwargs, is_rbac_permissions_enabled=IS_RBAC_ENABLED
)
return _make_organization
@pytest.fixture
def make_user_for_organization(make_user):
def _make_user_for_organization(
organization, role: typing.Optional[LegacyAccessControlRole] = None, **kwargs
):
post_save.disconnect(listen_for_user_model_save, sender=User)
user = make_user(organization=organization, role=role, **kwargs)
post_save.disconnect(listen_for_user_model_save, sender=User)
return user
return _make_user_for_organization
@pytest.fixture
def make_token_for_organization():
def _make_token_for_organization(organization):
return PluginAuthToken.create_auth_token(organization)
return _make_token_for_organization
@pytest.fixture
def make_mobile_app_verification_token_for_user():
def _make_mobile_app_verification_token_for_user(user, organization):
return MobileAppVerificationToken.create_auth_token(user, organization)
return _make_mobile_app_verification_token_for_user
@pytest.fixture
def make_mobile_app_auth_token_for_user():
def _make_mobile_app_auth_token_for_user(user, organization):
return MobileAppAuthToken.create_auth_token(user, organization)
return _make_mobile_app_auth_token_for_user
@pytest.fixture
def make_slack_token_for_user():
def _make_slack_token_for_user(user):
return SlackAuthToken.create_auth_token(
organization=user.organization, user=user
)
return _make_slack_token_for_user
@pytest.fixture
def make_public_api_token():
def _make_public_api_token(user, organization, name="test_api_token"):
return ApiAuthToken.create_auth_token(user, organization, name)
return _make_public_api_token
@pytest.fixture
def make_user_auth_headers():
def _make_user_auth_headers(
user,
token,
grafana_token: typing.Optional[str] = None,
grafana_context_data: typing.Optional[typing.Dict] = None,
):
instance_context_headers = {
"stack_id": user.organization.stack_id,
"org_id": user.organization.org_id,
}
grafana_context_headers = {"UserId": user.user_id}
if grafana_token is not None:
instance_context_headers["grafana_token"] = grafana_token
if grafana_context_data is not None:
grafana_context_headers.update(grafana_context_data)
return {
"HTTP_X-Instance-Context": json.dumps(instance_context_headers),
"HTTP_X-Grafana-Context": json.dumps(grafana_context_headers),
"HTTP_AUTHORIZATION": f"{token}",
}
return _make_user_auth_headers
RoleMapping = typing.Dict[
LegacyAccessControlRole, typing.List[LegacyAccessControlCompatiblePermission]
]
def get_user_permission_role_mapping_from_frontend_plugin_json() -> RoleMapping:
"""
This is used to take the RBAC permission -> basic role grants on the frontend
and test that the RBAC grants work the same way against the backend in terms of authorization
"""
class PluginJSONRoleDefinition(typing.TypedDict):
permissions: typing.List[GrafanaAPIPermission]
class PluginJSONRole(typing.TypedDict):
role: PluginJSONRoleDefinition
grants: typing.List[str]
class PluginJSON(typing.TypedDict):
roles: typing.List[PluginJSONRole]
with open("../grafana-plugin/src/plugin.json") as fp:
plugin_json: PluginJSON = json.load(fp)
role_mapping: RoleMapping = {
LegacyAccessControlRole.VIEWER: [],
LegacyAccessControlRole.EDITOR: [],
LegacyAccessControlRole.ADMIN: [],
}
all_permission_classes: typing.Dict[
str, LegacyAccessControlCompatiblePermission
] = {
getattr(RBACPermission.Permissions, attr).value: getattr(
RBACPermission.Permissions, attr
)
for attr in dir(RBACPermission.Permissions)
if not attr.startswith("_")
}
# we just care about getting the basic role grants, everything else can be ignored
for role in plugin_json["roles"]:
if grants := role["grants"]:
for permission in role["role"]["permissions"]:
# only concerned with grafana-oncall-app specific grants
# ignore things like plugins.app:access actions
action = permission["action"]
permission_class = None
if action.startswith(ACTION_PREFIX):
permission_class = all_permission_classes[action]
if permission_class:
for grant in grants:
try:
role = LegacyAccessControlRole[grant.upper()]
if role not in role_mapping[role]:
role_mapping[role].append(permission_class)
except KeyError:
# may come across grants like "Grafana Admin"
# which we can ignore
continue
return role_mapping
ROLE_PERMISSION_MAPPING = get_user_permission_role_mapping_from_frontend_plugin_json()
@pytest.fixture
def make_user():
def _make_user(role: typing.Optional[LegacyAccessControlRole] = None, **kwargs):
role = LegacyAccessControlRole.ADMIN if role is None else role
permissions = kwargs.pop("permissions", None)
if permissions is None:
permissions_to_grant = (
ROLE_PERMISSION_MAPPING[role] if IS_RBAC_ENABLED else []
)
permissions = [
GrafanaAPIPermission(action=perm.value) for perm in permissions_to_grant
]
return UserFactory(role=role, permissions=permissions, **kwargs)
return _make_user
@pytest.fixture
def make_organization_and_user(make_organization, make_user_for_organization):
def _make_organization_and_user(
role: typing.Optional[LegacyAccessControlRole] = None,
):
organization = make_organization()
user = make_user_for_organization(organization=organization, role=role)
return organization, user
return _make_organization_and_user
@pytest.fixture
def make_organization_and_user_with_slack_identities(
make_organization_with_slack_team_identity, make_user_with_slack_user_identity
):
def _make_organization_and_user_with_slack_identities(
role: typing.Optional[LegacyAccessControlRole] = None,
):
organization, slack_team_identity = make_organization_with_slack_team_identity()
user, slack_user_identity = make_user_with_slack_user_identity(
slack_team_identity, organization, role=role
)
return organization, user, slack_team_identity, slack_user_identity
return _make_organization_and_user_with_slack_identities
@pytest.fixture
def make_user_with_slack_user_identity(make_user):
def _make_slack_user_identity_with_user(
slack_team_identity,
organization,
role: typing.Optional[LegacyAccessControlRole] = None,
**kwargs,
):
slack_user_identity = SlackUserIdentityFactory(
slack_team_identity=slack_team_identity, **kwargs
)
user = make_user(
slack_user_identity=slack_user_identity,
organization=organization,
role=role,
)
return user, slack_user_identity
return _make_slack_user_identity_with_user
@pytest.fixture
def make_organization_with_slack_team_identity(
make_slack_team_identity, make_organization
):
def _make_slack_team_identity_with_organization(**kwargs):
slack_team_identity = make_slack_team_identity(**kwargs)
organization = make_organization(slack_team_identity=slack_team_identity)
return organization, slack_team_identity
return _make_slack_team_identity_with_organization
@pytest.fixture
def make_slack_team_identity():
def _make_slack_team_identity(**kwargs):
slack_team_identity = SlackTeamIdentityFactory(**kwargs)
return slack_team_identity
return _make_slack_team_identity
@pytest.fixture
def make_slack_user_identity():
def _make_slack_user_identity(**kwargs):
slack_user_identity = SlackUserIdentityFactory(**kwargs)
return slack_user_identity
return _make_slack_user_identity
@pytest.fixture
def make_slack_message():
def _make_slack_message(alert_group=None, organization=None, **kwargs):
organization = organization or alert_group.channel.organization
slack_message = SlackMessageFactory(
alert_group=alert_group,
organization=organization,
_slack_team_identity=organization.slack_team_identity,
**kwargs,
)
return slack_message
return _make_slack_message
@pytest.fixture
def client_with_user():
def _client_with_user(user):
"""The client with logged in user"""
client = APIClient()
client.force_login(user)
return client
return _client_with_user
@pytest.fixture
def make_team():
def _make_team(organization, **kwargs):
team = TeamFactory(organization=organization, **kwargs)
return team
return _make_team
@pytest.fixture
def make_alert_receive_channel():
def _make_alert_receive_channel(organization, **kwargs):
if "integration" not in kwargs:
kwargs["integration"] = AlertReceiveChannel.INTEGRATION_GRAFANA
post_save.disconnect(
listen_for_alertreceivechannel_model_save, sender=AlertReceiveChannel
)
alert_receive_channel = AlertReceiveChannelFactory(
organization=organization, **kwargs
)
post_save.connect(
listen_for_alertreceivechannel_model_save, sender=AlertReceiveChannel
)
return alert_receive_channel
return _make_alert_receive_channel
@pytest.fixture
def make_alert_receive_channel_with_post_save_signal():
def _make_alert_receive_channel(organization, **kwargs):
if "integration" not in kwargs:
kwargs["integration"] = AlertReceiveChannel.INTEGRATION_GRAFANA
alert_receive_channel = AlertReceiveChannelFactory(
organization=organization, **kwargs
)
return alert_receive_channel
return _make_alert_receive_channel
@pytest.fixture
def make_channel_filter():
def _make_channel_filter(alert_receive_channel, filtering_term=None, **kwargs):
channel_filter = ChannelFilterFactory(
filtering_term=filtering_term,
alert_receive_channel=alert_receive_channel,
**kwargs,
)
return channel_filter
return _make_channel_filter
@pytest.fixture
def make_channel_filter_with_post_save():
def _make_channel_filter(alert_receive_channel, filtering_term=None, **kwargs):
channel_filter = ChannelFilterFactory(
filtering_term=filtering_term,
alert_receive_channel=alert_receive_channel,
**kwargs,
)
return channel_filter
return _make_channel_filter
@pytest.fixture
def make_escalation_chain():
def _make_escalation_chain(organization, **kwargs):
escalation_chain = EscalationChainFactory(organization=organization, **kwargs)
return escalation_chain
return _make_escalation_chain
@pytest.fixture
def make_escalation_policy():
def _make_escalation_policy(escalation_chain, escalation_policy_step, **kwargs):
escalation_policy = EscalationPolicyFactory(
escalation_chain=escalation_chain, step=escalation_policy_step, **kwargs
)
return escalation_policy
return _make_escalation_policy
@pytest.fixture
def make_user_notification_policy():
def _make_user_notification_policy(user, step, **kwargs):
user_notification_policy = UserNotificationPolicyFactory(
user=user, step=step, **kwargs
)
return user_notification_policy
return _make_user_notification_policy
@pytest.fixture
def make_user_notification_policy_log_record():
def _make_user_notification_policy_log_record(**kwargs):
post_save.disconnect(
listen_for_usernotificationpolicylogrecord_model_save,
sender=UserNotificationPolicyLogRecord,
)
user_notification_policy_log_record = UserNotificationPolicyLogRecordFactory(
**kwargs
)
post_save.connect(
listen_for_usernotificationpolicylogrecord_model_save,
sender=UserNotificationPolicyLogRecord,
)
return user_notification_policy_log_record
return _make_user_notification_policy_log_record
@pytest.fixture
def make_integration_escalation_chain_route_escalation_policy(
make_alert_receive_channel,
make_escalation_chain,
make_channel_filter,
make_escalation_policy,
):
def _make_integration_escalation_chain_route_escalation_policy(
organization, escalation_policy_step
):
alert_receive_channel = make_alert_receive_channel(organization)
escalation_chain = make_escalation_chain(organization)
default_channel_filter = make_channel_filter(
alert_receive_channel, escalation_chain=escalation_chain, is_default=True
)
escalation_policy = make_escalation_policy(
escalation_chain, escalation_policy_step
)
return (
alert_receive_channel,
escalation_chain,
default_channel_filter,
escalation_policy,
)
return _make_integration_escalation_chain_route_escalation_policy
@pytest.fixture
def make_invitation():
def _make_invitation(alert_group, author, invitee, **kwargs):
invitation = InvitationFactory(
alert_group=alert_group, author=author, invitee=invitee, **kwargs
)
return invitation
return _make_invitation
@pytest.fixture
def make_schedule():
def _make_schedule(organization, schedule_class, **kwargs):
factory = OnCallScheduleFactory.get_factory_for_class(schedule_class)
schedule = factory(organization=organization, **kwargs)
return schedule
return _make_schedule
@pytest.fixture
def make_on_call_shift():
def _make_on_call_shift(organization, shift_type, **kwargs):
on_call_shift = CustomOnCallShiftFactory(
organization=organization, type=shift_type, **kwargs
)
return on_call_shift
return _make_on_call_shift
@pytest.fixture
def make_alert_group():
def _make_alert_group(alert_receive_channel, **kwargs):
alert_group = AlertGroupFactory(channel=alert_receive_channel, **kwargs)
return alert_group
return _make_alert_group
@pytest.fixture
def make_alert_group_log_record():
def _make_alert_group_log_record(alert_group, type, author, **kwargs):
post_save.disconnect(listen_for_alertgrouplogrecord, sender=AlertGroupLogRecord)
log_record = AlertGroupLogRecordFactory(
alert_group=alert_group, type=type, author=author, **kwargs
)
post_save.connect(listen_for_alertgrouplogrecord, sender=AlertGroupLogRecord)
return log_record
return _make_alert_group_log_record
@pytest.fixture
def make_resolution_note():
def _make_resolution_note(
alert_group, source=ResolutionNote.Source.WEB, author=None, **kwargs
):
resolution_note = ResolutionNoteFactory(
alert_group=alert_group, source=source, author=author, **kwargs
)
return resolution_note
return _make_resolution_note
@pytest.fixture
def make_resolution_note_slack_message():
def _make_resolution_note_slack_message(alert_group, user, added_by_user, **kwargs):
return ResolutionNoteSlackMessageFactory(
alert_group=alert_group, user=user, added_by_user=added_by_user, **kwargs
)
return _make_resolution_note_slack_message
@pytest.fixture
def make_alert():
def _make_alert(alert_group, raw_request_data, **kwargs):
alert = AlertFactory(
group=alert_group, raw_request_data=raw_request_data, **kwargs
)
return alert
return _make_alert
@pytest.fixture
def make_alert_with_custom_create_method():
def _make_alert_with_custom_create_method(
title,
message,
image_url,
link_to_upstream_details,
alert_receive_channel,
integration_unique_data,
raw_request_data,
**kwargs,
):
alert = Alert.create(
title,
message,
image_url,
link_to_upstream_details,
alert_receive_channel,
integration_unique_data,
raw_request_data,
**kwargs,
)
return alert
return _make_alert_with_custom_create_method
@pytest.fixture
def make_custom_action():
def _make_custom_action(organization, **kwargs):
custom_action = CustomActionFactory(organization=organization, **kwargs)
return custom_action
return _make_custom_action
@pytest.fixture
def make_custom_webhook():
def _make_custom_webhook(organization, **kwargs):
custom_webhook = CustomWebhookFactory(organization=organization, **kwargs)
return custom_webhook
return _make_custom_webhook
@pytest.fixture
def make_webhook_response():
def _make_webhook_response(**kwargs):
webhook_response = WebhookResponseFactory(**kwargs)
return webhook_response
return _make_webhook_response
@pytest.fixture
def make_slack_user_group():
def _make_slack_user_group(slack_team_identity, **kwargs):
slack_user_group = SlackUserGroupFactory(
slack_team_identity=slack_team_identity, **kwargs
)
return slack_user_group
return _make_slack_user_group
@pytest.fixture
def make_slack_channel():
def _make_slack_channel(slack_team_identity, **kwargs):
schedule = SlackChannelFactory(
slack_team_identity=slack_team_identity, **kwargs
)
return schedule
return _make_slack_channel
@pytest.fixture()
def mock_start_disable_maintenance_task(monkeypatch):
def mocked_start_disable_maintenance_task(*args, **kwargs):
return uuid.uuid4()
monkeypatch.setattr(
MaintainableObject,
"start_disable_maintenance_task",
mocked_start_disable_maintenance_task,
)
@pytest.fixture()
def make_organization_and_user_with_plugin_token(
make_organization_and_user, make_token_for_organization
):
def _make_organization_and_user_with_plugin_token(
role: typing.Optional[LegacyAccessControlRole] = None,
):
organization, user = make_organization_and_user(role)
_, token = make_token_for_organization(organization)
return organization, user, token
return _make_organization_and_user_with_plugin_token
@pytest.fixture()
def make_organization_and_user_with_mobile_app_verification_token(
make_organization_and_user, make_mobile_app_verification_token_for_user
):
def _make_organization_and_user_with_mobile_app_verification_token(
role: typing.Optional[LegacyAccessControlRole] = None,
):
organization, user = make_organization_and_user(role)
_, token = make_mobile_app_verification_token_for_user(user, organization)
return organization, user, token
return _make_organization_and_user_with_mobile_app_verification_token
@pytest.fixture()
def make_organization_and_user_with_mobile_app_auth_token(
make_organization_and_user, make_mobile_app_auth_token_for_user
):
def _make_organization_and_user_with_mobile_app_auth_token(
role: typing.Optional[LegacyAccessControlRole] = None,
):
organization, user = make_organization_and_user(role)
_, token = make_mobile_app_auth_token_for_user(user, organization)
return organization, user, token
return _make_organization_and_user_with_mobile_app_auth_token
@pytest.fixture()
def mock_send_user_notification_signal(monkeypatch):
def mocked_send_signal(*args, **kwargs):
return None
monkeypatch.setattr(
user_notification_action_triggered_signal, "send", mocked_send_signal
)
@pytest.fixture()
def make_telegram_user_connector():
def _make_telegram_user_connector(user, **kwargs):
return TelegramToUserConnectorFactory(user=user, **kwargs)
return _make_telegram_user_connector
@pytest.fixture()
def make_telegram_channel():
def _make_telegram_channel(organization, is_default_channel=False):
return TelegramChannelFactory(
organization=organization, is_default_channel=is_default_channel
)
return _make_telegram_channel
@pytest.fixture()
def make_telegram_verification_code():
def _make_telegram_verification_code(user, **kwargs):
return TelegramVerificationCodeFactory(user=user, **kwargs)
return _make_telegram_verification_code
@pytest.fixture()
def make_telegram_channel_verification_code():
def _make_telegram_channel_verification_code(organization, author, **kwargs):
return TelegramChannelVerificationCodeFactory(
organization=organization, author=author, **kwargs
)
return _make_telegram_channel_verification_code
@pytest.fixture()
def make_telegram_message():
def _make_telegram_message(alert_group, message_type, **kwargs):
return TelegramMessageFactory(
alert_group=alert_group, message_type=message_type, **kwargs
)
return _make_telegram_message
@pytest.fixture()
def make_phone_call_record():
def _make_phone_call_record(receiver, **kwargs):
return PhoneCallRecordFactory(receiver=receiver, **kwargs)
return _make_phone_call_record
@pytest.fixture()
def make_sms_record():
def _make_sms_record(receiver, **kwargs):
return SMSRecordFactory(receiver=receiver, **kwargs)
return _make_sms_record
@pytest.fixture()
def make_email_message():
def _make_email_message(receiver, **kwargs):
return EmailMessageFactory(receiver=receiver, **kwargs)
return _make_email_message
@pytest.fixture()
def make_live_setting():
def _make_live_setting(name, **kwargs):
return LiveSettingFactory(name=name, **kwargs)
return _make_live_setting
@pytest.fixture()
def make_integration_heartbeat():
def _make_integration_heartbeat(
alert_receive_channel, timeout_seconds=60, last_heartbeat_time=None, **kwargs
):
return IntegrationHeartBeatFactory(
alert_receive_channel=alert_receive_channel,
timeout_seconds=timeout_seconds,
last_heartbeat_time=last_heartbeat_time,
**kwargs,
)
return _make_integration_heartbeat
@pytest.fixture
def reload_urls(settings):
"""
Reloads Django URLs, especially useful when testing conditionally registered URLs
"""
def _reload_urls():
clear_url_caches()
urlconf = settings.ROOT_URLCONF
if urlconf in sys.modules:
reload(sys.modules[urlconf])
else:
import_module(urlconf)
return _reload_urls
@pytest.fixture()
def load_slack_urls(settings, reload_urls):
settings.FEATURE_SLACK_INTEGRATION_ENABLED = True
reload_urls()
@pytest.fixture
def make_region():
def _make_region(**kwargs):
region = RegionFactory(**kwargs)
return region
return _make_region
@pytest.fixture
def make_organization_and_region(make_organization, make_region):
def _make_organization_and_region():
organization = make_organization()
region = make_region()
organization.migration_destination = region
return organization, region
return _make_organization_and_region
@pytest.fixture()
def make_organization_and_user_with_token(
make_organization_and_user, make_public_api_token
):
def _make_organization_and_user_with_token():
organization, user = make_organization_and_user()
_, token = make_public_api_token(user, organization)
return organization, user, token
return _make_organization_and_user_with_token
@pytest.fixture
def make_shift_swap_request():
def _make_shift_swap_request(schedule, beneficiary, **kwargs):
return ShiftSwapRequestFactory(
schedule=schedule, beneficiary=beneficiary, **kwargs
)
return _make_shift_swap_request
@pytest.fixture
def shift_swap_request_setup(
make_schedule,
make_organization_and_user,
make_user_for_organization,
make_shift_swap_request,
):
def _shift_swap_request_setup(**kwargs):
organization, beneficiary = make_organization_and_user()
benefactor = make_user_for_organization(organization)
schedule = make_schedule(organization, schedule_class=OnCallScheduleWeb)
tomorrow = timezone.now() + datetime.timedelta(days=1)
two_days_from_now = tomorrow + datetime.timedelta(days=1)
ssr = make_shift_swap_request(
schedule,
beneficiary,
swap_start=tomorrow,
swap_end=two_days_from_now,
**kwargs,
)
return ssr, beneficiary, benefactor
return _shift_swap_request_setup
@pytest.fixture()
def webhook_preset_api_setup():
WebhookPresetOptions.WEBHOOK_PRESETS = {TEST_WEBHOOK_PRESET_ID: TestWebhookPreset()}
WebhookPresetOptions.WEBHOOK_PRESET_CHOICES = [
preset.metadata for preset in WebhookPresetOptions.WEBHOOK_PRESETS.values()
]
|
dev | change_release | #!/usr/bin/env python3
# SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Change a description of a GitHub release."""
import argparse
import os.path
import sys
import github3
import github3.exceptions
class Error(Exception):
"""Raised for errors in this script."""
def read_github_token():
"""Read the GitHub API token from disk."""
token_file = os.path.join(os.path.expanduser("~"), ".gh_token")
with open(token_file, encoding="ascii") as f:
token = f.read().strip()
return token
def find_release(repo, tag):
"""Find the release for the given repo/tag."""
release = None # to satisfy pylint
for release in repo.releases():
if release.tag_name == tag:
break
else:
raise Error("No release found for {!r}!".format(tag))
return release
def change_release_description(release, filename, description):
"""Change a release description to the given new one."""
assets = [asset for asset in release.assets() if asset.name == filename]
if not assets:
raise Error(f"No assets found for {filename}")
if len(assets) > 1:
raise Error(f"Multiple assets found for {filename}: {assets}")
asset = assets[0]
asset.edit(filename, description)
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("tag")
parser.add_argument("filename")
parser.add_argument("description")
return parser.parse_args()
def main():
args = parse_args()
token = read_github_token()
gh = github3.login(token=token)
repo = gh.repository("qutebrowser", "qutebrowser")
try:
release = find_release(repo, args.tag)
change_release_description(release, args.filename, args.description)
except Error as e:
sys.exit(str(e))
if __name__ == "__main__":
main()
|
gpodder | sync | # -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2018 The gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# sync.py -- Device synchronization
# Thomas Perl <thp@perli.net> 2007-12-06
# based on libipodsync.py (2006-04-05 Thomas Perl)
# Ported to gPodder 3 by Joseph Wickremasinghe in June 2012
import logging
import os.path
import threading
import time
import gpodder
from gpodder import download, services, util
import gi # isort:skip
gi.require_version("Gio", "2.0") # isort:skip
from gi.repository import GLib, Gio # isort:skip
logger = logging.getLogger(__name__)
_ = gpodder.gettext
gpod_available = True
try:
from gpodder import libgpod_ctypes
except:
logger.info("iPod sync not available")
gpod_available = False
mplayer_available = True if util.find_command("mplayer") is not None else False
eyed3mp3_available = True
try:
import eyed3.mp3
except:
logger.info("eyeD3 MP3 not available")
eyed3mp3_available = False
def open_device(gui):
config = gui._config
device_type = gui._config.device_sync.device_type
if device_type == "ipod":
return iPodDevice(config, gui.download_status_model, gui.download_queue_manager)
elif device_type == "filesystem":
return MP3PlayerDevice(
config,
gui.download_status_model,
gui.download_queue_manager,
gui.mount_volume_for_file,
)
return None
def get_track_length(filename):
attempted = False
if mplayer_available:
try:
mplayer_output = os.popen(
'mplayer -msglevel all=-1 -identify -vo null -ao null -frames 0 "%s" 2>/dev/null'
% filename
).read()
return int(
float(
mplayer_output[mplayer_output.index("ID_LENGTH") :].splitlines()[0][
10:
]
)
* 1000
)
except Exception:
logger.error(
"MPlayer could not determine length: %s", filename, exc_info=True
)
attempted = True
if eyed3mp3_available:
try:
length = int(eyed3.mp3.Mp3AudioFile(filename).info.time_secs * 1000)
# Notify user on eyed3 success if mplayer failed.
# A warning is used to make it visible in gpo or on console.
if attempted:
logger.warning("eyed3.mp3 successfully determined length: %s", filename)
return length
except Exception:
logger.error(
"eyed3.mp3 could not determine length: %s", filename, exc_info=True
)
attempted = True
if not attempted:
logger.warning("Could not determine length: %s", filename)
logger.warning(
"Please install MPlayer or the eyed3.mp3 module for track length detection."
)
return int(60 * 60 * 1000 * 3)
# Default is three hours (to be on the safe side)
def episode_filename_on_device(config, episode):
"""
:param gpodder.config.Config config: configuration (for sync options)
:param gpodder.model.PodcastEpisode episode: episode to get filename for
:return str: basename minus extension to use to save episode on device
"""
# get the local file
from_file = episode.local_filename(create=False)
# get the formatted base name
filename_base = util.sanitize_filename(
episode.sync_filename(
config.device_sync.custom_sync_name_enabled,
config.device_sync.custom_sync_name,
),
config.device_sync.max_filename_length,
)
# add the file extension
to_file = filename_base + os.path.splitext(from_file)[1].lower()
# dirty workaround: on bad (empty) episode titles,
# we simply use the from_file basename
# (please, podcast authors, FIX YOUR RSS FEEDS!)
if os.path.splitext(to_file)[0] == "":
to_file = os.path.basename(from_file)
return to_file
def episode_foldername_on_device(config, episode):
"""
:param gpodder.config.Config config: configuration (for sync options)
:param gpodder.model.PodcastEpisode episode: episode to get folder name for
:return str: folder name to save episode to on device
"""
if config.device_sync.one_folder_per_podcast:
# Add channel title as subfolder
folder = episode.channel.title
# Clean up the folder name for use on limited devices
folder = util.sanitize_filename(folder, config.device_sync.max_filename_length)
else:
folder = None
return folder
class SyncTrack(object):
"""
This represents a track that is on a device. You need
to specify at least the following keyword arguments,
because these will be used to display the track in the
GUI. All other keyword arguments are optional and can
be used to reference internal objects, etc... See the
iPod synchronization code for examples.
Keyword arguments needed:
playcount (How often has the track been played?)
podcast (Which podcast is this track from? Or: Folder name)
If any of these fields is unknown, it should not be
passed to the function (the values will default to None
for all required fields).
"""
def __init__(self, title, length, modified, **kwargs):
self.title = title
self.length = length
self.filesize = util.format_filesize(length)
self.modified = modified
# Set some (possible) keyword arguments to default values
self.playcount = 0
self.podcast = None
# Convert keyword arguments to object attributes
self.__dict__.update(kwargs)
def __repr__(self):
return "SyncTrack(title={}, podcast={})".format(self.title, self.podcast)
@property
def playcount_str(self):
return str(self.playcount)
class Device(services.ObservableService):
def __init__(self, config):
self._config = config
self.cancelled = False
self.allowed_types = ["audio", "video"]
self.errors = []
self.tracks_list = []
signals = ["progress", "sub-progress", "status", "done", "post-done"]
services.ObservableService.__init__(self, signals)
def open(self):
pass
def cancel(self):
self.cancelled = True
self.notify("status", _("Cancelled by user"))
def close(self):
self.notify("status", _("Writing data to disk"))
if self._config.device_sync.after_sync.sync_disks and not gpodder.ui.win32:
os.system("sync")
else:
logger.warning("Not syncing disks. Unmount your device before unplugging.")
return True
def create_task(self, track):
return SyncTask(track)
def cancel_task(self, task):
pass
def cleanup_task(self, task):
pass
def add_sync_tasks(self, tracklist, force_played=False, done_callback=None):
for track in list(tracklist):
# Filter tracks that are not meant to be synchronized
does_not_exist = not track.was_downloaded(and_exists=True)
exclude_played = (
not track.is_new and self._config.device_sync.skip_played_episodes
)
wrong_type = track.file_type() not in self.allowed_types
if does_not_exist:
tracklist.remove(track)
elif exclude_played or wrong_type:
logger.info("Excluding %s from sync", track.title)
tracklist.remove(track)
if tracklist:
for track in sorted(tracklist, key=lambda e: e.pubdate_prop):
if self.cancelled:
break
# XXX: need to check if track is added properly?
sync_task = self.create_task(track)
sync_task.status = sync_task.NEW
sync_task.device = self
# New Task, we must wait on the GTK Loop
self.download_status_model.register_task(sync_task)
# Executes after task has been registered
util.idle_add(self.download_queue_manager.queue_task, sync_task)
else:
logger.warning("No episodes to sync")
if done_callback:
done_callback()
def get_all_tracks(self):
pass
def add_track(self, track, reporthook=None):
pass
def remove_track(self, track):
pass
def get_free_space(self):
pass
def episode_on_device(self, episode):
return self._track_on_device(episode.title)
def _track_on_device(self, track_name):
for t in self.tracks_list:
title = t.title
if track_name == title:
return t
return None
class iPodDevice(Device):
def __init__(self, config, download_status_model, download_queue_manager):
Device.__init__(self, config)
self.mountpoint = self._config.device_sync.device_folder
self.download_status_model = download_status_model
self.download_queue_manager = download_queue_manager
self.ipod = None
def get_free_space(self):
# Reserve 10 MiB for iTunesDB writing (to be on the safe side)
RESERVED_FOR_ITDB = 1024 * 1024 * 10
result = util.get_free_disk_space(self.mountpoint)
if result == -1:
# Can't get free disk space
return -1
return result - RESERVED_FOR_ITDB
def open(self):
Device.open(self)
if not gpod_available:
logger.error("Please install libgpod 0.8.3 to sync with an iPod device.")
return False
if not os.path.isdir(self.mountpoint):
return False
self.notify("status", _("Opening iPod database"))
self.ipod = libgpod_ctypes.iPodDatabase(self.mountpoint)
if (
not self.ipod.itdb
or not self.ipod.podcasts_playlist
or not self.ipod.master_playlist
):
return False
self.notify("status", _("iPod opened"))
# build the initial tracks_list
self.tracks_list = self.get_all_tracks()
return True
def close(self):
if self.ipod is not None:
self.notify("status", _("Saving iPod database"))
self.ipod.close()
self.ipod = None
Device.close(self)
return True
def get_all_tracks(self):
tracks = []
for track in self.ipod.get_podcast_tracks():
filename = track.filename_on_ipod
if filename is None:
length = 0
modified = ""
else:
length = util.calculate_size(filename)
timestamp = util.file_modification_timestamp(filename)
modified = util.format_date(timestamp)
t = SyncTrack(
track.episode_title,
length,
modified,
ipod_track=track,
playcount=track.playcount,
podcast=track.podcast_title,
)
tracks.append(t)
return tracks
def episode_on_device(self, episode):
return next(
(
track
for track in self.tracks_list
if track.ipod_track.podcast_rss == episode.channel.url
and track.ipod_track.podcast_url == episode.url
),
None,
)
def remove_track(self, track):
self.notify("status", _("Removing %s") % track.title)
logger.info("Removing track from iPod: %r", track.title)
track.ipod_track.remove_from_device()
try:
self.tracks_list.remove(
next(
(
sync_track
for sync_track in self.tracks_list
if sync_track.ipod_track == track
),
None,
)
)
except ValueError:
...
def add_track(self, task, reporthook=None):
episode = task.episode
self.notify("status", _("Adding %s") % episode.title)
tracklist = self.ipod.get_podcast_tracks()
episode_urls = [track.podcast_url for track in tracklist]
if episode.url in episode_urls:
# Mark as played on iPod if played locally (and set podcast flags)
self.update_from_episode(
tracklist[episode_urls.index(episode.url)], episode
)
return True
local_filename = episode.local_filename(create=False)
# The file has to exist, if we ought to transfer it, and therefore,
# local_filename(create=False) must never return None as filename
assert local_filename is not None
if util.calculate_size(local_filename) > self.get_free_space():
logger.error("Not enough space on %s, sync aborted...", self.mountpoint)
d = {"episode": episode.title, "mountpoint": self.mountpoint}
message = _(
"Error copying %(episode)s: Not enough free space on %(mountpoint)s"
)
self.errors.append(message % d)
self.cancelled = True
return False
(fn, extension) = os.path.splitext(local_filename)
if extension.lower().endswith("ogg"):
# XXX: Proper file extension/format support check for iPod
logger.error("Cannot copy .ogg files to iPod.")
return False
track = self.ipod.add_track(
local_filename,
episode.title,
episode.channel.title,
episode._text_description,
episode.url,
episode.channel.url,
episode.published,
get_track_length(local_filename),
episode.file_type() == "audio",
)
self.update_from_episode(track, episode, initial=True)
reporthook(episode.file_size, 1, episode.file_size)
return True
def update_from_episode(self, track, episode, *, initial=False):
if initial:
# Set the initial bookmark on the device based on what we have locally
track.initialize_bookmark(episode.is_new, episode.current_position * 1000)
else:
# Copy updated status from iPod
if track.playcount > 0:
episode.is_new = False
if track.bookmark_time > 0:
logger.info(
"Playback position from iPod: %s",
util.format_time(track.bookmark_time / 1000),
)
episode.is_new = False
episode.current_position = int(track.bookmark_time / 1000)
episode.current_position_updated = time.time()
episode.save()
class MP3PlayerDevice(Device):
def __init__(
self,
config,
download_status_model,
download_queue_manager,
mount_volume_for_file,
):
Device.__init__(self, config)
folder = self._config.device_sync.device_folder
self.destination = util.new_gio_file(folder)
self.mount_volume_for_file = mount_volume_for_file
self.download_status_model = download_status_model
self.download_queue_manager = download_queue_manager
def get_free_space(self):
info = self.destination.query_filesystem_info(
Gio.FILE_ATTRIBUTE_FILESYSTEM_FREE, None
)
return info.get_attribute_uint64(Gio.FILE_ATTRIBUTE_FILESYSTEM_FREE)
def open(self):
Device.open(self)
self.notify("status", _("Opening MP3 player"))
if not self.mount_volume_for_file(self.destination):
return False
try:
info = self.destination.query_info(
Gio.FILE_ATTRIBUTE_ACCESS_CAN_WRITE
+ ","
+ Gio.FILE_ATTRIBUTE_STANDARD_TYPE,
Gio.FileQueryInfoFlags.NONE,
None,
)
except GLib.Error as err:
logger.error(
"querying destination info for %s failed with %s",
self.destination.get_uri(),
err.message,
)
return False
if info.get_file_type() != Gio.FileType.DIRECTORY:
logger.error(
"destination %s is not a directory", self.destination.get_uri()
)
return False
# open is ok if the target is a directory, and it can be written to
# for smb, query_info doesn't return FILE_ATTRIBUTE_ACCESS_CAN_WRITE,
# -- if that's the case, just assume that it's writable
if not info.has_attribute(
Gio.FILE_ATTRIBUTE_ACCESS_CAN_WRITE
) or info.get_attribute_boolean(Gio.FILE_ATTRIBUTE_ACCESS_CAN_WRITE):
self.notify("status", _("MP3 player opened"))
self.tracks_list = self.get_all_tracks()
return True
logger.error("destination %s is not writable", self.destination.get_uri())
return False
def get_episode_folder_on_device(self, episode):
folder = episode_foldername_on_device(self._config, episode)
if folder:
folder = self.destination.get_child(folder)
else:
folder = self.destination
return folder
def get_episode_file_on_device(self, episode):
return episode_filename_on_device(self._config, episode)
def create_task(self, track):
return GioSyncTask(track)
def cancel_task(self, task):
task.cancellable.cancel()
# called by the sync task when it is removed and needs partial files cleaning up
def cleanup_task(self, task):
episode = task.episode
folder = self.get_episode_folder_on_device(episode)
file = self.get_episode_file_on_device(episode)
file = folder.get_child(file)
self.remove_track_file(file)
def add_track(self, task, reporthook=None):
episode = task.episode
self.notify("status", _("Adding %s") % episode.title)
# get the folder on the device
folder = self.get_episode_folder_on_device(episode)
filename = episode.local_filename(create=False)
# The file has to exist, if we ought to transfer it, and therefore,
# local_filename(create=False) must never return None as filename
assert filename is not None
from_file = filename
# verify free space
needed = util.calculate_size(from_file)
free = self.get_free_space()
if free == -1:
logger.warning("Cannot determine free disk space on device")
elif needed > free:
d = {
"path": self.destination,
"free": util.format_filesize(free),
"need": util.format_filesize(needed),
}
message = _(
"Not enough space in %(path)s: %(free)s available, but need at least %(need)s"
)
raise SyncFailedException(message % d)
# get the filename that will be used on the device
to_file = self.get_episode_file_on_device(episode)
to_file = folder.get_child(to_file)
util.make_directory(folder)
to_file_exists = to_file.query_exists()
from_size = episode.file_size
to_size = episode.file_size
# An interrupted sync results in a partial file on the device that must be removed to fully sync it.
# Comparing file size would detect such files and finish uploading.
# However, some devices add metadata to files, increasing their size, and forcing an upload on every sync.
# File size and checksum can not be used.
if to_file_exists and self._config.device_sync.compare_episode_filesize:
try:
info = to_file.query_info(
Gio.FILE_ATTRIBUTE_STANDARD_SIZE, Gio.FileQueryInfoFlags.NONE
)
to_size = info.get_attribute_uint64(Gio.FILE_ATTRIBUTE_STANDARD_SIZE)
except GLib.Error:
# Assume same size and don't sync again
pass
if not to_file_exists or from_size != to_size:
logger.info(
"Copying %s (%d bytes) => %s (%d bytes)",
os.path.basename(from_file),
from_size,
to_file.get_uri(),
to_size,
)
from_file = Gio.File.new_for_path(from_file)
try:
def hookconvert(current_bytes, total_bytes, user_data):
return reporthook(current_bytes, 1, total_bytes)
from_file.copy(
to_file,
Gio.FileCopyFlags.OVERWRITE,
task.cancellable,
hookconvert,
None,
)
except GLib.Error as err:
if err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.CANCELLED):
raise SyncCancelledException()
logger.error(
"Error copying %s to %s: %s",
from_file.get_uri(),
to_file.get_uri(),
err.message,
)
d = {
"from_file": from_file.get_uri(),
"to_file": to_file.get_uri(),
"message": err.message,
}
self.errors.append(
_("Error copying %(from_file)s to %(to_file)s: %(message)s") % d
)
return False
return True
def add_sync_track(self, tracks, file, info, podcast_name):
(title, extension) = os.path.splitext(info.get_name())
timestamp = info.get_modification_time()
modified = util.format_date(timestamp.tv_sec)
t = SyncTrack(
title,
info.get_size(),
modified,
filename=file.get_uri(),
podcast=podcast_name,
)
tracks.append(t)
def get_all_tracks(self):
tracks = []
attributes = (
Gio.FILE_ATTRIBUTE_STANDARD_NAME
+ ","
+ Gio.FILE_ATTRIBUTE_STANDARD_TYPE
+ ","
+ Gio.FILE_ATTRIBUTE_STANDARD_SIZE
+ ","
+ Gio.FILE_ATTRIBUTE_TIME_MODIFIED
)
root_path = self.destination
for path_info in root_path.enumerate_children(
attributes, Gio.FileQueryInfoFlags.NONE, None
):
if self._config.one_folder_per_podcast:
if path_info.get_file_type() == Gio.FileType.DIRECTORY:
path_file = root_path.get_child(path_info.get_name())
try:
for child_info in path_file.enumerate_children(
attributes, Gio.FileQueryInfoFlags.NONE, None
):
if child_info.get_file_type() == Gio.FileType.REGULAR:
child_file = path_file.get_child(child_info.get_name())
self.add_sync_track(
tracks, child_file, child_info, path_info.get_name()
)
except GLib.Error as err:
logger.error(
"get all tracks for %s failed: %s",
path_file.get_uri(),
err.message,
)
else:
if path_info.get_file_type() == Gio.FileTypeFlags.REGULAR:
path_file = root_path.get_child(path_info.get_name())
self.add_sync_track(tracks, path_file, path_info, None)
return tracks
def episode_on_device(self, episode):
e = util.sanitize_filename(
episode.sync_filename(
self._config.device_sync.custom_sync_name_enabled,
self._config.device_sync.custom_sync_name,
),
self._config.device_sync.max_filename_length,
)
return self._track_on_device(e)
def remove_track_file(self, file):
folder = file.get_parent()
if file.query_exists():
try:
file.delete()
except GLib.Error as err:
# if the file went away don't worry about it
if not err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.NOT_FOUND):
logger.error(
"deleting file %s failed: %s", file.get_uri(), err.message
)
return
if self._config.one_folder_per_podcast:
try:
if self.directory_is_empty(folder):
folder.delete()
except GLib.Error as err:
# if the folder went away don't worry about it (multiple threads could
# make this happen if they both notice the folder is empty simultaneously)
if not err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.NOT_FOUND):
logger.error(
"deleting folder %s failed: %s", folder.get_uri(), err.message
)
def remove_track(self, track):
self.notify("status", _("Removing %s") % track.title)
# get the folder on the device
file = Gio.File.new_for_uri(track.filename)
self.remove_track_file(file)
def directory_is_empty(self, directory):
for child in directory.enumerate_children(
Gio.FILE_ATTRIBUTE_STANDARD_NAME, Gio.FileQueryInfoFlags.NONE, None
):
return False
return True
class SyncCancelledException(Exception):
pass
class SyncFailedException(Exception):
pass
class SyncTask(download.DownloadTask):
# An object representing the synchronization task of an episode
# Possible states this sync task can be in
STATUS_MESSAGE = (
_("Queued"),
_("Queued"),
_("Syncing"),
_("Finished"),
_("Failed"),
_("Cancelling"),
_("Cancelled"),
_("Pausing"),
_("Paused"),
)
(
NEW,
QUEUED,
DOWNLOADING,
DONE,
FAILED,
CANCELLING,
CANCELLED,
PAUSING,
PAUSED,
) = list(range(9))
def __str__(self):
return self.__episode.title
def __get_status(self):
return self.__status
def __set_status(self, status):
if status != self.__status:
self.__status_changed = True
self.__status = status
status = property(fget=__get_status, fset=__set_status)
def __get_device(self):
return self.__device
def __set_device(self, device):
self.__device = device
device = property(fget=__get_device, fset=__set_device)
def __get_status_changed(self):
if self.__status_changed:
self.__status_changed = False
return True
else:
return False
status_changed = property(fget=__get_status_changed)
def __get_activity(self):
return self.__activity
def __set_activity(self, activity):
self.__activity = activity
activity = property(fget=__get_activity, fset=__set_activity)
def __get_empty_string(self):
return ""
url = property(fget=__get_empty_string)
podcast_url = property(fget=__get_empty_string)
def __get_episode(self):
return self.__episode
episode = property(fget=__get_episode)
def can_queue(self):
return self.status in (self.CANCELLED, self.PAUSED, self.FAILED)
def can_pause(self):
return self.status in (self.DOWNLOADING, self.QUEUED)
def pause(self):
with self:
# Pause a queued download
if self.status == self.QUEUED:
self.status = self.PAUSED
# Request pause of a running download
elif self.status == self.DOWNLOADING:
self.status = self.PAUSING
def can_cancel(self):
return self.status in (self.DOWNLOADING, self.QUEUED, self.PAUSED, self.FAILED)
def cancel(self):
with self:
# Cancelling directly is allowed if the task isn't currently downloading
if self.status in (self.QUEUED, self.PAUSED, self.FAILED):
self.status = self.CANCELLED
# Call run, so the partial file gets deleted
self.run()
self.recycle()
# Otherwise request cancellation
elif self.status == self.DOWNLOADING:
self.status = self.CANCELLING
self.device.cancel()
def can_remove(self):
return self.status in (self.CANCELLED, self.FAILED, self.DONE)
def removed_from_list(self):
if self.status != self.DONE:
self.device.cleanup_task(self)
def __init__(self, episode):
self.__lock = threading.RLock()
self.__status = SyncTask.NEW
self.__activity = SyncTask.ACTIVITY_SYNCHRONIZE
self.__status_changed = True
self.__episode = episode
# Create the target filename and save it in the database
self.filename = self.__episode.local_filename(create=False)
self.total_size = self.__episode.file_size
self.speed = 0.0
self.progress = 0.0
self.error_message = None
self.custom_downloader = None
# Have we already shown this task in a notification?
self._notification_shown = False
# Variables for speed limit and speed calculation
self.__start_time = 0
self.__start_blocks = 0
self.__limit_rate_value = 999
self.__limit_rate = 999
# Callbacks
self._progress_updated = lambda x: None
def __enter__(self):
return self.__lock.acquire()
def __exit__(self, type, value, traceback):
self.__lock.release()
def notify_as_finished(self):
if self.status == SyncTask.DONE:
if self._notification_shown:
return False
else:
self._notification_shown = True
return True
return False
def notify_as_failed(self):
if self.status == SyncTask.FAILED:
if self._notification_shown:
return False
else:
self._notification_shown = True
return True
return False
def add_progress_callback(self, callback):
self._progress_updated = callback
def status_updated(self, count, blockSize, totalSize):
# We see a different "total size" while downloading,
# so correct the total size variable in the thread
if totalSize != self.total_size and totalSize > 0:
self.total_size = float(totalSize)
if self.total_size > 0:
self.progress = max(0.0, min(1.0, (count * blockSize) / self.total_size))
self._progress_updated(self.progress)
if self.status in (SyncTask.CANCELLING, SyncTask.PAUSING):
self._signal_cancel_from_status()
# default implementation
def _signal_cancel_from_status(self):
raise SyncCancelledException()
def recycle(self):
self.episode.download_task = None
def run(self):
# Speed calculation (re-)starts here
self.__start_time = 0
self.__start_blocks = 0
# If the download has already been cancelled/paused, skip it
with self:
if self.status in (SyncTask.CANCELLING, SyncTask.CANCELLED):
self.progress = 0.0
self.speed = 0.0
self.status = SyncTask.CANCELLED
return False
if self.status == SyncTask.PAUSING:
self.status = SyncTask.PAUSED
return False
# We only start this download if its status is downloading
if self.status != SyncTask.DOWNLOADING:
return False
# We are syncing this file right now
self._notification_shown = False
sync_result = SyncTask.DOWNLOADING
try:
logger.info("Starting SyncTask")
self.device.add_track(self, reporthook=self.status_updated)
except SyncCancelledException as e:
sync_result = SyncTask.CANCELLED
except Exception as e:
sync_result = SyncTask.FAILED
logger.error("Sync failed: %s", str(e), exc_info=True)
self.error_message = _("Error: %s") % (str(e),)
with self:
if sync_result == SyncTask.DOWNLOADING:
# Everything went well - we're done
self.status = SyncTask.DONE
if self.total_size <= 0:
self.total_size = util.calculate_size(self.filename)
logger.info("Total size updated to %d", self.total_size)
self.progress = 1.0
gpodder.user_extensions.on_episode_synced(self.device, self.__episode)
return True
self.speed = 0.0
if sync_result == SyncTask.FAILED:
self.status = SyncTask.FAILED
# cancelled/paused -- update state to mark it as safe to manipulate this task again
elif self.status == SyncTask.PAUSING:
self.status = SyncTask.PAUSED
elif self.status == SyncTask.CANCELLING:
self.status = SyncTask.CANCELLED
# We finished, but not successfully (at least not really)
return False
class GioSyncTask(SyncTask):
def __init__(self, episode):
super().__init__(episode)
# For cancelling the copy
self.cancellable = Gio.Cancellable()
def _signal_cancel_from_status(self):
self.cancellable.cancel()
|
constructors | time_helpers | # NOTE: t must be INT!!!
import datetime
import time
import warnings
try:
from tzlocal import get_localzone
LOCAL_ZONE = get_localzone()
except: # except all problems...
warnings.warn(
"Please install or fix tzlocal library (pip install tzlocal) in order to make Date object work better. Otherwise I will assume DST is in effect all the time",
Warning,
)
class LOCAL_ZONE:
@staticmethod
def dst(*args):
return 1
from lib.js2py.base import MakeError
CUM = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
msPerDay = 86400000
msPerYear = int(86400000 * 365.242)
msPerSecond = 1000
msPerMinute = 60000
msPerHour = 3600000
HoursPerDay = 24
MinutesPerHour = 60
SecondsPerMinute = 60
NaN = float("nan")
LocalTZA = -time.timezone * msPerSecond
def DaylightSavingTA(t):
if t is NaN:
return t
try:
return (
int(LOCAL_ZONE.dst(datetime.datetime.utcfromtimestamp(t // 1000)).seconds)
* 1000
)
except:
warnings.warn(
"Invalid datetime date, assumed DST time, may be inaccurate...", Warning
)
return 1
# raise MakeError('TypeError', 'date not supported by python.datetime. I will solve it in future versions')
def GetTimeZoneName(t):
return time.tzname[DaylightSavingTA(t) > 0]
def LocalToUTC(t):
return t - LocalTZA - DaylightSavingTA(t - LocalTZA)
def UTCToLocal(t):
return t + LocalTZA + DaylightSavingTA(t)
def Day(t):
return t // 86400000
def TimeWithinDay(t):
return t % 86400000
def DaysInYear(y):
if y % 4:
return 365
elif y % 100:
return 366
elif y % 400:
return 365
else:
return 366
def DayFromYear(y):
return 365 * (y - 1970) + (y - 1969) // 4 - (y - 1901) // 100 + (y - 1601) // 400
def TimeFromYear(y):
return 86400000 * DayFromYear(y)
def YearFromTime(t):
guess = 1970 - t // 31556908800 # msPerYear
gt = TimeFromYear(guess)
if gt <= t:
while gt <= t:
guess += 1
gt = TimeFromYear(guess)
return guess - 1
else:
while gt > t:
guess -= 1
gt = TimeFromYear(guess)
return guess
def DayWithinYear(t):
return Day(t) - DayFromYear(YearFromTime(t))
def InLeapYear(t):
y = YearFromTime(t)
if y % 4:
return 0
elif y % 100:
return 1
elif y % 400:
return 0
else:
return 1
def MonthFromTime(t):
day = DayWithinYear(t)
leap = InLeapYear(t)
if day < 31:
return 0
day -= leap
if day < 59:
return 1
elif day < 90:
return 2
elif day < 120:
return 3
elif day < 151:
return 4
elif day < 181:
return 5
elif day < 212:
return 6
elif day < 243:
return 7
elif day < 273:
return 8
elif day < 304:
return 9
elif day < 334:
return 10
else:
return 11
def DateFromTime(t):
mon = MonthFromTime(t)
day = DayWithinYear(t)
return day - CUM[mon] - (1 if InLeapYear(t) and mon >= 2 else 0) + 1
def WeekDay(t):
# 0 == sunday
return (Day(t) + 4) % 7
def msFromTime(t):
return t % 1000
def SecFromTime(t):
return (t // 1000) % 60
def MinFromTime(t):
return (t // 60000) % 60
def HourFromTime(t):
return (t // 3600000) % 24
def MakeTime(hour, Min, sec, ms):
# takes PyJs objects and returns t
if not (
hour.is_finite() and Min.is_finite() and sec.is_finite() and ms.is_finite()
):
return NaN
h, m, s, milli = hour.to_int(), Min.to_int(), sec.to_int(), ms.to_int()
return h * 3600000 + m * 60000 + s * 1000 + milli
def MakeDay(year, month, date):
# takes PyJs objects and returns t
if not (year.is_finite() and month.is_finite() and date.is_finite()):
return NaN
y, m, dt = year.to_int(), month.to_int(), date.to_int()
y += m // 12
mn = m % 12
d = (
DayFromYear(y)
+ CUM[mn]
+ dt
- 1
+ (1 if DaysInYear(y) == 366 and mn >= 2 else 0)
)
return d # ms per day
def MakeDate(day, time):
return 86400000 * day + time
def TimeClip(t):
if t != t or abs(t) == float("inf"):
return NaN
if abs(t) > 8.64 * 10**15:
return NaN
return int(t)
|
mylar | PostProcessor | # This file is part of Mylar.
#
# Mylar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mylar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mylar. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import logging
import os
import re
import shlex
import shutil
import subprocess
import sys
import time
from xml.dom.minidom import parseString
import mylar
import urllib2
from mylar import db, filechecker, helpers, logger, notifiers, updater, weeklypull
class PostProcessor(object):
"""
A class which will process a media file according to the post processing settings in the config.
"""
EXISTS_LARGER = 1
EXISTS_SAME = 2
EXISTS_SMALLER = 3
DOESNT_EXIST = 4
NZB_NAME = 1
FOLDER_NAME = 2
FILE_NAME = 3
def __init__(
self,
nzb_name,
nzb_folder,
issueid=None,
module=None,
queue=None,
comicid=None,
apicall=False,
ddl=False,
):
"""
Creates a new post processor with the given file path and optionally an NZB name.
file_path: The path to the file to be processed
nzb_name: The name of the NZB which resulted in this file being downloaded (optional)
"""
# name of the NZB that resulted in this folder
self.nzb_name = nzb_name
self.nzb_folder = nzb_folder
if module is not None:
self.module = module + "[POST-PROCESSING]"
else:
self.module = "[POST-PROCESSING]"
if queue:
self.queue = queue
if mylar.APILOCK is True:
return {"status": "IN PROGRESS"}
if apicall is True:
self.apicall = True
mylar.APILOCK = True
else:
self.apicall = False
if ddl is True:
self.ddl = True
else:
self.ddl = False
if mylar.CONFIG.FILE_OPTS == "copy":
self.fileop = shutil.copy
else:
self.fileop = shutil.move
self.valreturn = []
self.extensions = (".cbr", ".cbz", ".pdf")
self.failed_files = 0
self.log = ""
if issueid is not None:
self.issueid = issueid
else:
self.issueid = None
if comicid is not None:
self.comicid = comicid
else:
self.comicid = None
self.issuearcid = None
def _log(self, message, level=logger): # .message): #level=logger.MESSAGE):
"""
A wrapper for the internal logger which also keeps track of messages and saves them to a string for sabnzbd post-processing logging functions.
message: The string to log (unicode)
level: The log level to use (optional)
"""
# logger.log(message, level)
self.log += message + "\n"
def _run_pre_scripts(self, nzb_name, nzb_folder, seriesmetadata):
"""
Executes any pre scripts defined in the config.
ep_obj: The object to use when calling the pre script
"""
logger.fdebug("initiating pre script detection.")
self._log("initiating pre script detection.")
logger.fdebug("mylar.PRE_SCRIPTS : " + mylar.CONFIG.PRE_SCRIPTS)
self._log("mylar.PRE_SCRIPTS : " + mylar.CONFIG.PRE_SCRIPTS)
# for currentScriptName in mylar.CONFIG.PRE_SCRIPTS:
with open(mylar.CONFIG.PRE_SCRIPTS, "r") as f:
first_line = f.readline()
if mylar.CONFIG.PRE_SCRIPTS.endswith(".sh"):
shell_cmd = re.sub("#!", "", first_line).strip()
if shell_cmd == "" or shell_cmd is None:
shell_cmd = "/bin/bash"
else:
# forces mylar to use the executable that it was run with to run the extra script.
shell_cmd = sys.executable
currentScriptName = (
shell_cmd + " " + str(mylar.CONFIG.PRE_SCRIPTS).decode("string_escape")
)
logger.fdebug("pre script detected...enabling: " + str(currentScriptName))
# generate a safe command line string to execute the script and provide all the parameters
script_cmd = shlex.split(currentScriptName, posix=False) + [
str(nzb_name),
str(nzb_folder),
str(seriesmetadata),
]
logger.fdebug("cmd to be executed: " + str(script_cmd))
self._log("cmd to be executed: " + str(script_cmd))
# use subprocess to run the command and capture output
logger.fdebug("Executing command " + str(script_cmd))
logger.fdebug("Absolute path to script: " + script_cmd[0])
try:
p = subprocess.Popen(
script_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=mylar.PROG_DIR,
)
out, err = p.communicate() # @UnusedVariable
logger.fdebug("Script result: " + out)
self._log("Script result: " + out)
except OSError, e:
logger.warn("Unable to run pre_script: " + str(script_cmd))
self._log("Unable to run pre_script: " + str(script_cmd))
def _run_extra_scripts(self, nzb_name, nzb_folder, filen, folderp, seriesmetadata):
"""
Executes any extra scripts defined in the config.
ep_obj: The object to use when calling the extra script
"""
logger.fdebug("initiating extra script detection.")
self._log("initiating extra script detection.")
logger.fdebug("mylar.EXTRA_SCRIPTS : " + mylar.CONFIG.EXTRA_SCRIPTS)
self._log("mylar.EXTRA_SCRIPTS : " + mylar.CONFIG.EXTRA_SCRIPTS)
# for curScriptName in mylar.CONFIG.EXTRA_SCRIPTS:
with open(mylar.CONFIG.EXTRA_SCRIPTS, "r") as f:
first_line = f.readline()
if mylar.CONFIG.EXTRA_SCRIPTS.endswith(".sh"):
shell_cmd = re.sub("#!", "", first_line)
if shell_cmd == "" or shell_cmd is None:
shell_cmd = "/bin/bash"
else:
# forces mylar to use the executable that it was run with to run the extra script.
shell_cmd = sys.executable
curScriptName = (
shell_cmd + " " + str(mylar.CONFIG.EXTRA_SCRIPTS).decode("string_escape")
)
logger.fdebug("extra script detected...enabling: " + str(curScriptName))
# generate a safe command line string to execute the script and provide all the parameters
script_cmd = shlex.split(curScriptName) + [
str(nzb_name),
str(nzb_folder),
str(filen),
str(folderp),
str(seriesmetadata),
]
logger.fdebug("cmd to be executed: " + str(script_cmd))
self._log("cmd to be executed: " + str(script_cmd))
# use subprocess to run the command and capture output
logger.fdebug("Executing command " + str(script_cmd))
logger.fdebug("Absolute path to script: " + script_cmd[0])
try:
p = subprocess.Popen(
script_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=mylar.PROG_DIR,
)
out, err = p.communicate() # @UnusedVariable
logger.fdebug("Script result: " + out)
self._log("Script result: " + out)
except OSError, e:
logger.warn("Unable to run extra_script: " + str(script_cmd))
self._log("Unable to run extra_script: " + str(script_cmd))
def duplicate_process(self, dupeinfo):
# path to move 'should' be the entire path to the given file
path_to_move = dupeinfo["to_dupe"]
file_to_move = os.path.split(path_to_move)[1]
if dupeinfo["action"] == "dupe_src" and mylar.CONFIG.FILE_OPTS == "move":
logger.info(
"[DUPLICATE-CLEANUP] New File will be post-processed. Moving duplicate [%s] to Duplicate Dump Folder for manual intervention."
% path_to_move
)
else:
if mylar.CONFIG.FILE_OPTS == "move":
logger.info(
"[DUPLICATE-CLEANUP][MOVE-MODE] New File will not be post-processed. Moving duplicate [%s] to Duplicate Dump Folder for manual intervention."
% path_to_move
)
else:
logger.info(
"[DUPLICATE-CLEANUP][COPY-MODE] NEW File will not be post-processed. Retaining file in original location [%s]"
% path_to_move
)
return True
# this gets tricky depending on if it's the new filename or the existing filename, and whether or not 'copy' or 'move' has been selected.
if mylar.CONFIG.FILE_OPTS == "move":
# check to make sure duplicate_dump directory exists:
checkdirectory = filechecker.validateAndCreateDirectory(
mylar.CONFIG.DUPLICATE_DUMP, True, module="[DUPLICATE-CLEANUP]"
)
if mylar.CONFIG.DUPLICATE_DATED_FOLDERS is True:
todaydate = datetime.datetime.now().strftime("%Y-%m-%d")
dump_folder = os.path.join(mylar.CONFIG.DUPLICATE_DUMP, todaydate)
checkdirectory = filechecker.validateAndCreateDirectory(
dump_folder, True, module="[DUPLICATE-DATED CLEANUP]"
)
else:
dump_folder = mylar.CONFIG.DUPLICATE_DUMP
try:
shutil.move(path_to_move, os.path.join(dump_folder, file_to_move))
except (OSError, IOError):
logger.warn(
"[DUPLICATE-CLEANUP] Failed to move %s ... to ... %s"
% (path_to_move, os.path.join(dump_folder, file_to_move))
)
return False
logger.warn(
"[DUPLICATE-CLEANUP] Successfully moved %s ... to ... %s"
% (path_to_move, os.path.join(dump_folder, file_to_move))
)
return True
def tidyup(
self, odir=None, del_nzbdir=False, sub_path=None, cacheonly=False, filename=None
):
# del_nzbdir will remove the original directory location. Must be set to False for manual pp or else will delete manual dir that's provided (if empty).
# move = cleanup/delete original location (self.nzb_folder) AND cache location (odir) if metatagging is enabled.
# copy = cleanup/delete cache location (odir) only if enabled.
# cacheonly = will only delete the cache location (useful if there's an error during metatagging, and/or the final location is out of space)
try:
# tidyup old path
if cacheonly is False:
logger.fdebug(
"File Option: %s [META-ENABLED: %s]"
% (mylar.CONFIG.FILE_OPTS, mylar.CONFIG.ENABLE_META)
)
logger.fdebug(
"odir: %s [filename: %s][self.nzb_folder: %s]"
% (odir, filename, self.nzb_folder)
)
logger.fdebug(
"sub_path: %s [cacheonly: %s][del_nzbdir: %s]"
% (sub_path, cacheonly, del_nzbdir)
)
# if sub_path exists, then we need to use that in place of self.nzb_folder since the file was in a sub-directory within self.nzb_folder
if all(
[sub_path is not None, sub_path != self.nzb_folder]
): # , self.issueid is not None]):
if self.issueid is None:
logger.fdebug(
"Sub-directory detected during cleanup. Will attempt to remove if empty: %s"
% sub_path
)
orig_folder = sub_path
else:
logger.fdebug(
"Direct post-processing was performed against specific issueid. Using supplied filepath for deletion."
)
orig_folder = self.nzb_folder
else:
orig_folder = self.nzb_folder
# make sure we don't delete the directory passed via manual-pp and ajust for trailling slashes or not
if orig_folder.endswith("/") or orig_folder.endswith("\\"):
tmp_folder = orig_folder[:-1]
else:
tmp_folder = orig_folder
if os.path.split(tmp_folder)[1] == filename and not os.path.isdir(
tmp_folder
):
logger.fdebug(
"%s item to be deleted is file, not folder due to direct submission: %s"
% (self.module, tmp_folder)
)
tmp_folder = os.path.split(tmp_folder)[0]
# if all([os.path.isdir(odir), self.nzb_folder != tmp_folder]) or any([odir.startswith('mylar_'),del_nzbdir is True]):
# check to see if the directory is empty or not.
if all(
[
mylar.CONFIG.FILE_OPTS == "move",
self.nzb_name == "Manual Run",
tmp_folder != self.nzb_folder,
]
):
if not os.listdir(tmp_folder):
logger.fdebug(
"%s Tidying up. Deleting sub-folder location : %s"
% (self.module, tmp_folder)
)
shutil.rmtree(tmp_folder)
self._log("Removed temporary directory : %s" % tmp_folder)
else:
if filename is not None:
if os.path.isfile(os.path.join(tmp_folder, filename)):
logger.fdebug(
"%s Attempting to remove file: %s"
% (self.module, os.path.join(tmp_folder, filename))
)
try:
os.remove(os.path.join(tmp_folder, filename))
except Exception as e:
logger.warn(
"%s [%s] Unable to remove file : %s"
% (
self.module,
e,
os.path.join(tmp_folder, filename),
)
)
else:
if not os.listdir(tmp_folder):
logger.fdebug(
"%s Tidying up. Deleting original folder location : %s"
% (self.module, tmp_folder)
)
try:
shutil.rmtree(tmp_folder)
except Exception as e:
logger.warn(
"%s [%s] Unable to delete original folder location: %s"
% (self.module, e, tmp_folder)
)
else:
logger.fdebug(
"%s Removed original folder location: %s"
% (self.module, tmp_folder)
)
self._log(
"Removed temporary directory : %s"
% tmp_folder
)
else:
self._log(
"Failed to remove temporary directory: %s"
% tmp_folder
)
logger.error(
"%s %s not empty. Skipping removal of directory - this will either be caught in further post-processing or it will have to be manually deleted."
% (self.module, tmp_folder)
)
else:
self._log(
"Failed to remove temporary directory: " + tmp_folder
)
logger.error(
"%s %s not empty. Skipping removal of directory - this will either be caught in further post-processing or it will have to be manually deleted."
% (self.module, tmp_folder)
)
elif all(
[
mylar.CONFIG.FILE_OPTS == "move",
self.nzb_name == "Manual Run",
filename is not None,
]
):
if os.path.isfile(os.path.join(tmp_folder, filename)):
logger.fdebug(
"%s Attempting to remove original file: %s"
% (self.module, os.path.join(tmp_folder, filename))
)
try:
os.remove(os.path.join(tmp_folder, filename))
except Exception as e:
logger.warn(
"%s [%s] Unable to remove file : %s"
% (self.module, e, os.path.join(tmp_folder, filename))
)
elif mylar.CONFIG.FILE_OPTS == "move" and all(
[del_nzbdir is True, self.nzb_name != "Manual Run"]
): # tmp_folder != self.nzb_folder]):
if not os.listdir(tmp_folder):
logger.fdebug(
"%s Tidying up. Deleting original folder location : %s"
% (self.module, tmp_folder)
)
shutil.rmtree(tmp_folder)
self._log("Removed temporary directory : %s" % tmp_folder)
else:
if filename is not None:
if os.path.isfile(os.path.join(tmp_folder, filename)):
logger.fdebug(
"%s Attempting to remove file: %s"
% (self.module, os.path.join(tmp_folder, filename))
)
try:
os.remove(os.path.join(tmp_folder, filename))
except Exception as e:
logger.warn(
"%s [%s] Unable to remove file : %s"
% (
self.module,
e,
os.path.join(tmp_folder, filename),
)
)
else:
if not os.listdir(tmp_folder):
logger.fdebug(
"%s Tidying up. Deleting original folder location : %s"
% (self.module, tmp_folder)
)
try:
shutil.rmtree(tmp_folder)
except Exception as e:
logger.warn(
"%s [%s] Unable to delete original folder location: %s"
% (self.module, e, tmp_folder)
)
else:
logger.fdebug(
"%s Removed original folder location: %s"
% (self.module, tmp_folder)
)
self._log(
"Removed temporary directory : "
+ tmp_folder
)
else:
self._log(
"Failed to remove temporary directory: "
+ tmp_folder
)
logger.error(
"%s %s not empty. Skipping removal of directory - this will either be caught in further post-processing or it will have to be manually deleted."
% (self.module, tmp_folder)
)
else:
self._log(
"Failed to remove temporary directory: " + tmp_folder
)
logger.error(
"%s %s not empty. Skipping removal of directory - this will either be caught in further post-processing or it will have to be manually deleted."
% (self.module, tmp_folder)
)
if mylar.CONFIG.ENABLE_META and all(
[os.path.isdir(odir), "mylar_" in odir]
):
# Regardless of the copy/move operation, we need to delete the files from within the cache directory, then remove the cache directory itself for the given issue.
# sometimes during a meta, it retains the cbr as well after conversion depending on settings. Make sure to delete too thus the 'walk'.
for filename in os.listdir(odir):
filepath = os.path.join(odir, filename)
try:
os.remove(filepath)
except OSError:
pass
if not os.listdir(odir):
logger.fdebug(
"%s Tidying up. Deleting temporary cache directory : %s"
% (self.module, odir)
)
shutil.rmtree(odir)
self._log("Removed temporary directory : %s" % odir)
else:
self._log("Failed to remove temporary directory: %s" % odir)
logger.error(
"%s %s not empty. Skipping removal of temporary cache directory - this will either be caught in further post-processing or have to be manually deleted."
% (self.module, odir)
)
except (OSError, IOError):
logger.fdebug(
"%s Failed to remove directory - Processing will continue, but manual removal is necessary"
% self.module
)
self._log("Failed to remove temporary directory")
def Process(self):
module = self.module
self._log("nzb name: %s" % self.nzb_name)
self._log("nzb folder: %s" % self.nzb_folder)
logger.fdebug("%s nzb name: %s" % (module, self.nzb_name))
logger.fdebug("%s nzb folder: %s" % (module, self.nzb_folder))
if self.ddl is False:
if mylar.USE_SABNZBD == 1:
if self.nzb_name != "Manual Run":
logger.fdebug("%s Using SABnzbd" % module)
logger.fdebug(
"%s NZB name as passed from SABnzbd: %s"
% (module, self.nzb_name)
)
if self.nzb_name == "Manual Run":
logger.fdebug("%s Manual Run Post-Processing enabled." % module)
else:
# if the SAB Directory option is enabled, let's use that folder name and append the jobname.
if all(
[
mylar.CONFIG.SAB_TO_MYLAR,
mylar.CONFIG.SAB_DIRECTORY is not None,
mylar.CONFIG.SAB_DIRECTORY != "None",
]
):
self.nzb_folder = os.path.join(
mylar.CONFIG.SAB_DIRECTORY, self.nzb_name
).encode(mylar.SYS_ENCODING)
logger.fdebug(
"%s SABnzbd Download folder option enabled. Directory set to : %s"
% (module, self.nzb_folder)
)
if mylar.USE_NZBGET == 1:
if self.nzb_name != "Manual Run":
logger.fdebug("%s Using NZBGET" % module)
logger.fdebug(
"%s NZB name as passed from NZBGet: %s"
% (module, self.nzb_name)
)
# if the NZBGet Directory option is enabled, let's use that folder name and append the jobname.
if self.nzb_name == "Manual Run":
logger.fdebug("%s Manual Run Post-Processing enabled." % module)
elif all(
[
mylar.CONFIG.NZBGET_DIRECTORY is not None,
mylar.CONFIG.NZBGET_DIRECTORY is not "None",
]
):
logger.fdebug(
"%s NZB name as passed from NZBGet: %s"
% (module, self.nzb_name)
)
self.nzb_folder = os.path.join(
mylar.CONFIG.NZBGET_DIRECTORY, self.nzb_name
).encode(mylar.SYS_ENCODING)
logger.fdebug(
"%s NZBGET Download folder option enabled. Directory set to : %s"
% (module, self.nzb_folder)
)
else:
logger.fdebug(
"%s Now performing post-processing of %s sent from DDL"
% (module, self.nzb_name)
)
myDB = db.DBConnection()
self.oneoffinlist = False
if any(
[
self.nzb_name == "Manual Run",
self.issueid is not None,
self.comicid is not None,
self.apicall is True,
]
):
if (
all(
[
self.issueid is None,
self.comicid is not None,
self.apicall is True,
]
)
or self.nzb_name == "Manual Run"
or all(
[
self.apicall is True,
self.comicid is None,
self.issueid is None,
self.nzb_name.startswith("0-Day"),
]
)
):
if self.comicid is not None:
logger.fdebug(
"%s Now post-processing pack directly against ComicID: %s"
% (module, self.comicid)
)
elif all(
[
self.apicall is True,
self.issueid is None,
self.comicid is None,
self.nzb_name.startswith("0-Day"),
]
):
logger.fdebug(
"%s Now post-processing 0-day pack: %s"
% (module, self.nzb_name)
)
else:
logger.fdebug("%s Manual Run initiated" % module)
# Manual postprocessing on a folder.
# first we get a parsed results list of the files being processed, and then poll against the sql to get a short list of hits.
flc = filechecker.FileChecker(
self.nzb_folder, justparse=True, pp_mode=True
)
filelist = flc.listFiles()
if filelist["comiccount"] == 0: # is None:
logger.warn(
"There were no files located - check the debugging logs if you think this is in error."
)
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
logger.info(
"I have located %s files that I should be able to post-process. Continuing..."
% filelist["comiccount"]
)
else:
if all([self.comicid is None, "_" not in self.issueid]):
cid = myDB.selectone(
"SELECT ComicID FROM issues where IssueID=?",
[str(self.issueid)],
).fetchone()
self.comicid = cid[0]
else:
if "_" in self.issueid:
logger.fdebug("Story Arc post-processing request detected.")
self.issuearcid = self.issueid
self.issueid = None
logger.fdebug(
"%s Now post-processing directly against StoryArcs - ComicID: %s / IssueArcID: %s"
% (module, self.comicid, self.issuearcid)
)
if self.issueid is not None:
logger.fdebug(
"%s Now post-processing directly against ComicID: %s / IssueID: %s"
% (module, self.comicid, self.issueid)
)
if self.issuearcid is None:
if self.nzb_name.lower().endswith(self.extensions):
flc = filechecker.FileChecker(
self.nzb_folder, file=self.nzb_name, pp_mode=True
)
fl = flc.listFiles()
filelist = {}
filelist["comiclist"] = [fl]
filelist["comiccount"] = len(filelist["comiclist"])
else:
flc = filechecker.FileChecker(
self.nzb_folder, justparse=True, pp_mode=True
)
filelist = flc.listFiles()
else:
filelist = {}
filelist["comiclist"] = []
filelist["comiccount"] = 0
# preload the entire ALT list in here.
alt_list = []
alt_db = myDB.select("SELECT * FROM Comics WHERE AlternateSearch != 'None'")
if alt_db is not None:
for aldb in alt_db:
as_d = filechecker.FileChecker(
AlternateSearch=helpers.conversion(aldb["AlternateSearch"])
)
as_dinfo = as_d.altcheck()
alt_list.append(
{
"AS_Alt": as_dinfo["AS_Alt"],
"AS_Tuple": as_dinfo["AS_Tuple"],
"AS_DyComicName": aldb["DynamicComicName"],
}
)
manual_arclist = []
oneoff_issuelist = []
manual_list = []
for fl in filelist["comiclist"]:
self.matched = False
as_d = filechecker.FileChecker()
as_dinfo = as_d.dynamic_replace(helpers.conversion(fl["series_name"]))
orig_seriesname = as_dinfo["mod_seriesname"]
mod_seriesname = as_dinfo["mod_seriesname"]
loopchk = []
if fl["alt_series"] is not None:
logger.fdebug(
"%s Alternate series naming detected: %s"
% (module, fl["alt_series"])
)
as_sinfo = as_d.dynamic_replace(
helpers.conversion(fl["alt_series"])
)
mod_altseriesname = as_sinfo["mod_seriesname"]
if all(
[mylar.CONFIG.ANNUALS_ON, "annual" in mod_altseriesname.lower()]
) or all(
[
mylar.CONFIG.ANNUALS_ON,
"special" in mod_altseriesname.lower(),
]
):
mod_altseriesname = re.sub(
"annual", "", mod_altseriesname, flags=re.I
).strip()
mod_altseriesname = re.sub(
"special", "", mod_altseriesname, flags=re.I
).strip()
if not any(
re.sub("[\|\s]", "", mod_altseriesname).lower() == x
for x in loopchk
):
loopchk.append(re.sub("[\|\s]", "", mod_altseriesname.lower()))
for x in alt_list:
cname = x["AS_DyComicName"]
for ab in x["AS_Alt"]:
tmp_ab = re.sub(" ", "", ab)
tmp_mod_seriesname = re.sub(" ", "", mod_seriesname)
if (
re.sub("\|", "", tmp_mod_seriesname.lower()).strip()
== re.sub("\|", "", tmp_ab.lower()).strip()
):
if not any(
re.sub("[\|\s]", "", cname.lower()) == x
for x in loopchk
):
loopchk.append(re.sub("[\|\s]", "", cname.lower()))
if all(
[mylar.CONFIG.ANNUALS_ON, "annual" in mod_seriesname.lower()]
) or all(
[mylar.CONFIG.ANNUALS_ON, "special" in mod_seriesname.lower()]
):
mod_seriesname = re.sub(
"annual", "", mod_seriesname, flags=re.I
).strip()
mod_seriesname = re.sub(
"special", "", mod_seriesname, flags=re.I
).strip()
# make sure we add back in the original parsed filename here.
if not any(
re.sub("[\|\s]", "", mod_seriesname).lower() == x for x in loopchk
):
loopchk.append(re.sub("[\|\s]", "", mod_seriesname.lower()))
if any([self.issueid is not None, self.comicid is not None]):
comicseries = myDB.select(
"SELECT * FROM comics WHERE ComicID=?", [self.comicid]
)
else:
if fl["issueid"] is not None:
logger.info("issueid detected in filename: %s" % fl["issueid"])
csi = myDB.selectone(
"SELECT i.ComicID, i.IssueID, i.Issue_Number, c.ComicName FROM comics as c JOIN issues as i ON c.ComicID = i.ComicID WHERE i.IssueID=?",
[fl["issueid"]],
).fetchone()
if csi is None:
csi = myDB.selectone(
"SELECT i.ComicID as comicid, i.IssueID, i.Issue_Number, a.ReleaseComicName, c.ComicName FROM comics as c JOIN annuals as a ON c.ComicID = a.ComicID WHERE a.IssueID=?",
[fl["issueid"]],
).fetchone()
if csi is not None:
annchk = "yes"
else:
continue
else:
annchk = "no"
if fl["sub"]:
logger.fdebug(
"%s[SUB: %s][CLOCATION: %s]"
% (module, fl["sub"], fl["comiclocation"])
)
clocation = os.path.join(
fl["comiclocation"],
fl["sub"],
helpers.conversion(fl["comicfilename"]),
)
else:
logger.fdebug(
"%s[CLOCATION] %s" % (module, fl["comiclocation"])
)
clocation = os.path.join(
fl["comiclocation"],
helpers.conversion(fl["comicfilename"]),
)
annualtype = None
if annchk == "yes":
if "Annual" in csi["ReleaseComicName"]:
annualtype = "Annual"
elif "Special" in csi["ReleaseComicName"]:
annualtype = "Special"
else:
if "Annual" in csi["ComicName"]:
annualtype = "Annual"
elif "Special" in csi["ComicName"]:
annualtype = "Special"
manual_list.append(
{
"ComicLocation": clocation,
"ComicID": csi["ComicID"],
"IssueID": csi["IssueID"],
"IssueNumber": csi["Issue_Number"],
"AnnualType": annualtype,
"ComicName": csi["ComicName"],
"Series": fl["series_name"],
"AltSeries": fl["alt_series"],
"One-Off": False,
"ForcedMatch": True,
}
)
logger.info("manual_list: %s" % manual_list)
break
else:
tmpsql = "SELECT * FROM comics WHERE DynamicComicName IN ({seq}) COLLATE NOCASE".format(
seq=",".join("?" * len(loopchk))
)
comicseries = myDB.select(tmpsql, tuple(loopchk))
if not comicseries or orig_seriesname != mod_seriesname:
if all(
[
"special" in orig_seriesname.lower(),
mylar.CONFIG.ANNUALS_ON,
orig_seriesname != mod_seriesname,
]
):
if not any(
re.sub("[\|\s]", "", orig_seriesname).lower() == x
for x in loopchk
):
loopchk.append(
re.sub("[\|\s]", "", orig_seriesname.lower())
)
tmpsql = "SELECT * FROM comics WHERE DynamicComicName IN ({seq}) COLLATE NOCASE".format(
seq=",".join("?" * len(loopchk))
)
comicseries = myDB.select(tmpsql, tuple(loopchk))
# if not comicseries:
# logger.error('[%s][%s] No Series named %s - checking against Story Arcs (just in case). If I do not find anything, maybe you should be running Import?' % (module, fl['comicfilename'], fl['series_name']))
# continue
watchvals = []
for wv in comicseries:
logger.info(
"Now checking: %s [%s]" % (wv["ComicName"], wv["ComicID"])
)
# do some extra checks in here to ignore these types:
# check for Paused status /
# check for Ended status and 100% completion of issues.
if wv["Status"] == "Paused" or (
wv["Have"] == wv["Total"]
and not any(
[
"Present" in wv["ComicPublished"],
helpers.now()[:4] in wv["ComicPublished"],
]
)
):
logger.warn(
"%s [%s] is either Paused or in an Ended status with 100%s completion. Ignoring for match."
% (wv["ComicName"], wv["ComicYear"], "%")
)
continue
wv_comicname = wv["ComicName"]
wv_comicpublisher = wv["ComicPublisher"]
wv_alternatesearch = wv["AlternateSearch"]
wv_comicid = wv["ComicID"]
if (
all([wv["Type"] != "Print", wv["Type"] != "Digital"])
and wv["Corrected_Type"] != "Print"
) or wv["Corrected_Type"] == "TPB":
wv_type = "TPB"
else:
wv_type = None
wv_seriesyear = wv["ComicYear"]
wv_comicversion = wv["ComicVersion"]
wv_publisher = wv["ComicPublisher"]
wv_total = wv["Total"]
if mylar.CONFIG.FOLDER_SCAN_LOG_VERBOSE:
logger.fdebug(
"Queuing to Check: %s [%s] -- %s"
% (wv["ComicName"], wv["ComicYear"], wv["ComicID"])
)
# force it to use the Publication Date of the latest issue instead of the Latest Date (which could be anything)
latestdate = myDB.select(
"SELECT IssueDate from issues WHERE ComicID=? order by ReleaseDate DESC",
[wv["ComicID"]],
)
if latestdate:
tmplatestdate = latestdate[0][0]
if tmplatestdate[:4] != wv["LatestDate"][:4]:
if tmplatestdate[:4] > wv["LatestDate"][:4]:
latestdate = tmplatestdate
else:
latestdate = wv["LatestDate"]
else:
latestdate = tmplatestdate
else:
latestdate = wv["LatestDate"]
if (
latestdate == "0000-00-00"
or latestdate == "None"
or latestdate is None
):
logger.fdebug(
"Forcing a refresh of series: %s as it appears to have incomplete issue dates."
% wv_comicname
)
updater.dbUpdate([wv_comicid])
logger.fdebug(
"Refresh complete for %s. Rechecking issue dates for completion."
% wv_comicname
)
latestdate = myDB.select(
"SELECT IssueDate from issues WHERE ComicID=? order by ReleaseDate DESC",
[wv["ComicID"]],
)
if latestdate:
tmplatestdate = latestdate[0][0]
if tmplatestdate[:4] != wv["LatestDate"][:4]:
if tmplatestdate[:4] > wv["LatestDate"][:4]:
latestdate = tmplatestdate
else:
latestdate = wv["LatestDate"]
else:
latestdate = tmplatestdate
else:
latestdate = wv["LatestDate"]
logger.fdebug(
"Latest Date (after forced refresh) set to :"
+ str(latestdate)
)
if (
latestdate == "0000-00-00"
or latestdate == "None"
or latestdate is None
):
logger.fdebug(
"Unable to properly attain the Latest Date for series: %s. Cannot check against this series for post-processing."
% wv_comicname
)
continue
watchvals.append(
{
"ComicName": wv_comicname,
"ComicPublisher": wv_comicpublisher,
"AlternateSearch": wv_alternatesearch,
"ComicID": wv_comicid,
"LastUpdated": wv["LastUpdated"],
"WatchValues": {
"SeriesYear": wv_seriesyear,
"LatestDate": latestdate,
"ComicVersion": wv_comicversion,
"Type": wv_type,
"Publisher": wv_publisher,
"Total": wv_total,
"ComicID": wv_comicid,
"IsArc": False,
},
}
)
ccnt = 0
nm = 0
for cs in watchvals:
wm = filechecker.FileChecker(
watchcomic=cs["ComicName"],
Publisher=cs["ComicPublisher"],
AlternateSearch=cs["AlternateSearch"],
manual=cs["WatchValues"],
)
watchmatch = wm.matchIT(fl)
if watchmatch["process_status"] == "fail":
nm += 1
continue
else:
try:
if (
cs["WatchValues"]["Type"] == "TPB"
and cs["WatchValues"]["Total"] > 1
):
if watchmatch["series_volume"] is not None:
just_the_digits = re.sub(
"[^0-9]", "", watchmatch["series_volume"]
).strip()
else:
just_the_digits = re.sub(
"[^0-9]", "", watchmatch["justthedigits"]
).strip()
else:
just_the_digits = watchmatch["justthedigits"]
except Exception as e:
logger.warn(
"[Exception: %s] Unable to properly match up/retrieve issue number (or volume) for this [CS: %s] [WATCHMATCH: %s]"
% (e, cs, watchmatch)
)
nm += 1
continue
if just_the_digits is not None:
temploc = just_the_digits.replace("_", " ")
temploc = re.sub("[\#']", "", temploc)
# logger.fdebug('temploc: %s' % temploc)
else:
temploc = None
datematch = "False"
if temploc is None and all(
[
cs["WatchValues"]["Type"] != "TPB",
cs["WatchValues"]["Type"] != "One-Shot",
]
):
logger.info(
"this should have an issue number to match to this particular series: %s"
% cs["ComicID"]
)
continue
if temploc is not None and (
any(
[
"annual" in temploc.lower(),
"special" in temploc.lower(),
]
)
and mylar.CONFIG.ANNUALS_ON is True
):
biannchk = re.sub("-", "", temploc.lower()).strip()
if "biannual" in biannchk:
logger.fdebug("%s Bi-Annual detected." % module)
fcdigit = helpers.issuedigits(
re.sub("biannual", "", str(biannchk)).strip()
)
else:
if "annual" in temploc.lower():
fcdigit = helpers.issuedigits(
re.sub(
"annual", "", str(temploc.lower())
).strip()
)
else:
fcdigit = helpers.issuedigits(
re.sub(
"special", "", str(temploc.lower())
).strip()
)
logger.fdebug(
"%s Annual/Special detected [%s]. ComicID assigned as %s"
% (module, fcdigit, cs["ComicID"])
)
annchk = "yes"
issuechk = myDB.select(
"SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?",
[cs["ComicID"], fcdigit],
)
else:
annchk = "no"
if temploc is not None:
fcdigit = helpers.issuedigits(temploc)
issuechk = myDB.select(
"SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?",
[cs["ComicID"], fcdigit],
)
else:
fcdigit = None
issuechk = myDB.select(
"SELECT * from issues WHERE ComicID=?",
[cs["ComicID"]],
)
if not issuechk:
try:
logger.fdebug(
"%s No corresponding issue #%s found for %s"
% (module, temploc, cs["ComicID"])
)
except:
continue
# check the last refresh date of the series, and if > than an hr try again:
c_date = cs["LastUpdated"]
if c_date is None:
logger.error(
"%s %s failed during a previous add /refresh as it has no Last Update timestamp. Forcing refresh now."
% (module, cs["ComicName"])
)
else:
c_obj_date = datetime.datetime.strptime(
c_date, "%Y-%m-%d %H:%M:%S"
)
n_date = datetime.datetime.now()
absdiff = abs(n_date - c_obj_date)
hours = (
absdiff.days * 24 * 60 * 60 + absdiff.seconds
) / 3600.0
if hours < 1:
logger.fdebug(
"%s %s [%s] Was refreshed less than 1 hours ago. Skipping Refresh at this time so we don't hammer things unnecessarily."
% (module, cs["ComicName"], cs["ComicID"])
)
continue
updater.dbUpdate([cs["ComicID"]])
logger.fdebug(
"%s Succssfully refreshed series - now re-querying against new data for issue #%s."
% (module, temploc)
)
if annchk == "yes":
issuechk = myDB.select(
"SELECT * from annuals WHERE ComicID=? AND Int_IssueNumber=?",
[cs["ComicID"], fcdigit],
)
else:
issuechk = myDB.select(
"SELECT * from issues WHERE ComicID=? AND Int_IssueNumber=?",
[cs["ComicID"], fcdigit],
)
if not issuechk:
logger.fdebug(
"%s No corresponding issue #%s found for %s even after refreshing. It might not have the information available as of yet..."
% (module, temploc, cs["ComicID"])
)
continue
for isc in issuechk:
datematch = "True"
datechkit = False
if (
isc["ReleaseDate"] is not None
and isc["ReleaseDate"] != "0000-00-00"
):
try:
if isc["DigitalDate"] != "0000-00-00" and int(
re.sub("-", "", isc["DigitalDate"]).strip()
) <= int(
re.sub("-", "", isc["ReleaseDate"]).strip()
):
monthval = isc["DigitalDate"]
watch_issueyear = isc["DigitalDate"][:4]
else:
monthval = isc["ReleaseDate"]
watch_issueyear = isc["ReleaseDate"][:4]
except:
monthval = isc["ReleaseDate"]
watch_issueyear = isc["ReleaseDate"][:4]
else:
try:
if isc["DigitalDate"] != "0000-00-00" and int(
re.sub("-", "", isc["DigitalDate"]).strip()
) <= int(
re.sub("-", "", isc["ReleaseDate"]).strip()
):
monthval = isc["DigitalDate"]
watch_issueyear = isc["DigitalDate"][:4]
else:
monthval = isc["IssueDate"]
watch_issueyear = isc["IssueDate"][:4]
except:
monthval = isc["IssueDate"]
watch_issueyear = isc["IssueDate"][:4]
if (
len(watchmatch) >= 1
and watchmatch["issue_year"] is not None
):
# if the # of matches is more than 1, we need to make sure we get the right series
# compare the ReleaseDate for the issue, to the found issue date in the filename.
# if ReleaseDate doesn't exist, use IssueDate
# if no issue date was found, then ignore.
logger.fdebug(
"%s[ISSUE-VERIFY] Now checking against %s - %s"
% (module, cs["ComicName"], cs["ComicID"])
)
issyr = None
# logger.fdebug(module + ' issuedate:' + str(isc['IssueDate']))
# logger.fdebug(module + ' isc: ' + str(isc['IssueDate'][5:7]))
# logger.info(module + ' ReleaseDate: ' + str(isc['ReleaseDate']))
# logger.info(module + ' IssueDate: ' + str(isc['IssueDate']))
if (
isc["DigitalDate"] is not None
and isc["DigitalDate"] != "0000-00-00"
):
if int(isc["DigitalDate"][:4]) < int(
watchmatch["issue_year"]
):
logger.fdebug(
"%s[ISSUE-VERIFY] %s is before the issue year of %s that was discovered in the filename"
% (
module,
isc["DigitalDate"],
watchmatch["issue_year"],
)
)
datematch = "False"
elif (
isc["ReleaseDate"] is not None
and isc["ReleaseDate"] != "0000-00-00"
):
if int(isc["ReleaseDate"][:4]) < int(
watchmatch["issue_year"]
):
logger.fdebug(
"%s[ISSUE-VERIFY] %s is before the issue year of %s that was discovered in the filename"
% (
module,
isc["ReleaseDate"],
watchmatch["issue_year"],
)
)
datematch = "False"
else:
if int(isc["IssueDate"][:4]) < int(
watchmatch["issue_year"]
):
logger.fdebug(
"%s[ISSUE-VERIFY] %s is before the issue year %s that was discovered in the filename"
% (
module,
isc["IssueDate"],
watchmatch["issue_year"],
)
)
datematch = "False"
if int(watch_issueyear) != int(
watchmatch["issue_year"]
):
if (
int(monthval[5:7]) == 11
or int(monthval[5:7]) == 12
):
issyr = int(monthval[:4]) + 1
logger.fdebug(
"%s[ISSUE-VERIFY] IssueYear (issyr) is %s"
% (module, issyr)
)
datechkit = True
elif (
int(monthval[5:7]) == 1
or int(monthval[5:7]) == 2
or int(monthval[5:7]) == 3
):
issyr = int(monthval[:4]) - 1
datechkit = True
if datechkit is True and issyr is not None:
logger.fdebug(
"%s[ISSUE-VERIFY] %s comparing to %s : rechecking by month-check versus year."
% (module, issyr, watchmatch["issue_year"])
)
datematch = "True"
if int(issyr) != int(watchmatch["issue_year"]):
logger.fdebug(
"%s[ISSUE-VERIFY][.:FAIL:.] Issue is before the modified issue year of %s"
% (module, issyr)
)
datematch = "False"
else:
if fcdigit is None:
logger.info(
"%s[ISSUE-VERIFY] Found matching issue for ComicID: %s / IssueID: %s"
% (module, cs["ComicID"], isc["IssueID"])
)
else:
logger.info(
"%s[ISSUE-VERIFY] Found matching issue # %s for ComicID: %s / IssueID: %s"
% (
module,
fcdigit,
cs["ComicID"],
isc["IssueID"],
)
)
if datematch == "True":
# need to reset this to False here so that the True doesn't carry down and avoid the year checks due to the True
datematch = "False"
lonevol = False
# if we get to here, we need to do some more comparisons just to make sure we have the right volume
# first we chk volume label if it exists, then we drop down to issue year
# if the above both don't exist, and there's more than one series on the watchlist (or the series is > v1)
# then spit out the error message and don't post-process it.
watch_values = cs["WatchValues"]
# logger.fdebug('WATCH_VALUES:' + str(watch_values))
if any(
[
watch_values["ComicVersion"] is None,
watch_values["ComicVersion"] == "None",
]
):
tmp_watchlist_vol = "1"
else:
tmp_watchlist_vol = re.sub(
"[^0-9]", "", watch_values["ComicVersion"]
).strip()
if all(
[
watchmatch["series_volume"] != "None",
watchmatch["series_volume"] is not None,
]
):
tmp_watchmatch_vol = re.sub(
"[^0-9]", "", watchmatch["series_volume"]
).strip()
if len(tmp_watchmatch_vol) == 4:
if int(tmp_watchmatch_vol) == int(
watch_values["SeriesYear"]
):
logger.fdebug(
"%s[ISSUE-VERIFY][SeriesYear-Volume MATCH] Series Year of %s matched to volume/year label of %s"
% (
module,
watch_values["SeriesYear"],
tmp_watchmatch_vol,
)
)
else:
logger.fdebug(
"%s[ISSUE-VERIFY][SeriesYear-Volume FAILURE] Series Year of %s DID NOT match to volume/year label of %s"
% (
module,
watch_values["SeriesYear"],
tmp_watchmatch_vol,
)
)
datematch = "False"
elif (
len(watchvals) > 1
and int(tmp_watchmatch_vol) >= 1
):
if int(tmp_watchmatch_vol) == int(
tmp_watchlist_vol
):
logger.fdebug(
"%s[ISSUE-VERIFY][SeriesYear-Volume MATCH] Volume label of series Year of %s matched to volume label of %s"
% (
module,
watch_values["ComicVersion"],
watchmatch["series_volume"],
)
)
lonevol = True
else:
logger.fdebug(
"%s[ISSUE-VERIFY][SeriesYear-Volume FAILURE] Volume label of Series Year of %s DID NOT match to volume label of %s"
% (
module,
watch_values["ComicVersion"],
watchmatch["series_volume"],
)
)
datematch = "False"
else:
if any(
[
tmp_watchlist_vol is None,
tmp_watchlist_vol == "None",
tmp_watchlist_vol == "",
]
):
logger.fdebug(
"%s[ISSUE-VERIFY][NO VOLUME PRESENT] No Volume label present for series. Dropping down to Issue Year matching."
% module
)
datematch = "False"
elif (
len(watchvals) == 1
and int(tmp_watchlist_vol) == 1
):
logger.fdebug(
"%s[ISSUE-VERIFY][Lone Volume MATCH] Volume label of %s indicates only volume for this series on your watchlist."
% (module, watch_values["ComicVersion"])
)
lonevol = True
elif int(tmp_watchlist_vol) > 1:
logger.fdebug(
"%s[ISSUE-VERIFY][Lone Volume FAILURE] Volume label of %s indicates that there is more than one volume for this series, but the one on your watchlist has no volume label set."
% (module, watch_values["ComicVersion"])
)
datematch = "False"
if datematch == "False" and all(
[
watchmatch["issue_year"] is not None,
watchmatch["issue_year"] != "None",
watch_issueyear is not None,
]
):
# now we see if the issue year matches exactly to what we have within Mylar.
if int(watch_issueyear) == int(
watchmatch["issue_year"]
):
logger.fdebug(
"%s[ISSUE-VERIFY][Issue Year MATCH] Issue Year of %s is a match to the year found in the filename of : %s"
% (
module,
watch_issueyear,
watchmatch["issue_year"],
)
)
datematch = "True"
else:
logger.fdebug(
"%s[ISSUE-VERIFY][Issue Year FAILURE] Issue Year of %s does NOT match the year found in the filename of : %s"
% (
module,
watch_issueyear,
watchmatch["issue_year"],
)
)
logger.fdebug(
"%s[ISSUE-VERIFY] Checking against complete date to see if month published could allow for different publication year."
% module
)
if issyr is not None:
if int(issyr) != int(
watchmatch["issue_year"]
):
logger.fdebug(
"%s[ISSUE-VERIFY][Issue Year FAILURE] Modified Issue year of %s is before the modified issue year of %s"
% (
module,
issyr,
watchmatch["issue_year"],
)
)
else:
logger.fdebug(
"%s[ISSUE-VERIFY][Issue Year MATCH] Modified Issue Year of %s is a match to the year found in the filename of : %s"
% (
module,
issyr,
watchmatch["issue_year"],
)
)
datematch = "True"
elif (
datematch == "False"
and watchmatch["issue_year"] is None
and lonevol is True
):
logger.fdebug(
"%s[LONE-VOLUME/NO YEAR][MATCH] Only Volume on watchlist matches, no year present in filename. Assuming match based on volume and title."
% module
)
datematch = "True"
if datematch == "True":
if watchmatch["sub"]:
logger.fdebug(
"%s[SUB: %s][CLOCATION: %s]"
% (
module,
watchmatch["sub"],
watchmatch["comiclocation"],
)
)
clocation = os.path.join(
watchmatch["comiclocation"],
watchmatch["sub"],
helpers.conversion(
watchmatch["comicfilename"]
),
)
if not os.path.exists(clocation):
scrubs = re.sub(
watchmatch["comiclocation"],
"",
watchmatch["sub"],
).strip()
if scrubs[:2] == "//" or scrubs[:2] == "\\":
scrubs = scrubs[1:]
if os.path.exists(scrubs):
logger.fdebug(
"[MODIFIED CLOCATION] %s"
% scrubs
)
clocation = scrubs
else:
logger.fdebug(
"%s[CLOCATION] %s"
% (module, watchmatch["comiclocation"])
)
if self.issueid is not None and os.path.isfile(
watchmatch["comiclocation"]
):
clocation = watchmatch["comiclocation"]
else:
clocation = os.path.join(
watchmatch["comiclocation"],
helpers.conversion(
watchmatch["comicfilename"]
),
)
annualtype = None
if annchk == "yes":
if "Annual" in isc["ReleaseComicName"]:
annualtype = "Annual"
elif "Special" in isc["ReleaseComicName"]:
annualtype = "Special"
else:
if "Annual" in isc["ComicName"]:
annualtype = "Annual"
elif "Special" in isc["ComicName"]:
annualtype = "Special"
manual_list.append(
{
"ComicLocation": clocation,
"ComicID": cs["ComicID"],
"IssueID": isc["IssueID"],
"IssueNumber": isc["Issue_Number"],
"AnnualType": annualtype,
"ComicName": cs["ComicName"],
"Series": watchmatch["series_name"],
"AltSeries": watchmatch["alt_series"],
"One-Off": False,
"ForcedMatch": False,
}
)
break
else:
logger.fdebug(
"%s[NON-MATCH: %s-%s] Incorrect series - not populating..continuing post-processing"
% (module, cs["ComicName"], cs["ComicID"])
)
continue
else:
logger.fdebug(
"%s[NON-MATCH: %s-%s] Incorrect series - not populating..continuing post-processing"
% (module, cs["ComicName"], cs["ComicID"])
)
continue
if datematch == "True":
xmld = filechecker.FileChecker()
xmld1 = xmld.dynamic_replace(
helpers.conversion(cs["ComicName"])
)
xseries = xmld1["mod_seriesname"].lower()
xmld2 = xmld.dynamic_replace(
helpers.conversion(watchmatch["series_name"])
)
xfile = xmld2["mod_seriesname"].lower()
if re.sub("\|", "", xseries) == re.sub("\|", "", xfile):
logger.fdebug(
"%s[DEFINITIVE-NAME MATCH] Definitive name match exactly to : %s [%s]"
% (module, watchmatch["series_name"], cs["ComicID"])
)
if len(manual_list) > 1:
manual_list = [
item
for item in manual_list
if all(
[
item["IssueID"] == isc["IssueID"],
item["AnnualType"] is not None,
]
)
or all(
[
item["IssueID"] == isc["IssueID"],
item["ComicLocation"] == clocation,
]
)
or all(
[
item["IssueID"] != isc["IssueID"],
item["ComicLocation"] != clocation,
]
)
]
self.matched = True
else:
continue # break
if datematch == "True":
logger.fdebug(
"%s[SUCCESSFUL MATCH: %s-%s] Match verified for %s"
% (
module,
cs["ComicName"],
cs["ComicID"],
helpers.conversion(fl["comicfilename"]),
)
)
break
elif self.matched is True:
logger.warn(
"%s[MATCH: %s - %s] We matched by name for this series, but cannot find a corresponding issue number in the series list."
% (module, cs["ComicName"], cs["ComicID"])
)
# we should setup for manual post-processing of story-arc issues here
# we can also search by ComicID to just grab those particular arcs as an alternative as well (not done)
# as_d = filechecker.FileChecker()
# as_dinfo = as_d.dynamic_replace(helpers.conversion(fl['series_name']))
# mod_seriesname = as_dinfo['mod_seriesname']
# arcloopchk = []
# for x in alt_list:
# cname = x['AS_DyComicName']
# for ab in x['AS_Alt']:
# if re.sub('[\|\s]', '', mod_seriesname.lower()).strip() in re.sub('[\|\s]', '', ab.lower()).strip():
# if not any(re.sub('[\|\s]', '', cname.lower()) == x for x in arcloopchk):
# arcloopchk.append(re.sub('[\|\s]', '', cname.lower()))
##make sure we add back in the original parsed filename here.
# if not any(re.sub('[\|\s]', '', mod_seriesname).lower() == x for x in arcloopchk):
# arcloopchk.append(re.sub('[\|\s]', '', mod_seriesname.lower()))
if self.issuearcid is None:
tmpsql = "SELECT * FROM storyarcs WHERE DynamicComicName IN ({seq}) COLLATE NOCASE".format(
seq=",".join("?" * len(loopchk))
) # len(arcloopchk)))
arc_series = myDB.select(tmpsql, tuple(loopchk)) # arcloopchk))
else:
if self.issuearcid[0] == "S":
self.issuearcid = self.issuearcid[1:]
arc_series = myDB.select(
"SELECT * FROM storyarcs WHERE IssueArcID=?", [self.issuearcid]
)
if arc_series is None:
logger.error(
"%s No Story Arcs in Watchlist that contain that particular series - aborting Manual Post Processing. Maybe you should be running Import?"
% module
)
return
else:
arcvals = []
for av in arc_series:
arcvals.append(
{
"ComicName": av["ComicName"],
"ArcValues": {
"StoryArc": av["StoryArc"],
"StoryArcID": av["StoryArcID"],
"IssueArcID": av["IssueArcID"],
"ComicName": av["ComicName"],
"DynamicComicName": av["DynamicComicName"],
"ComicPublisher": av["IssuePublisher"],
"Publisher": av["Publisher"],
"IssueID": av["IssueID"],
"IssueNumber": av["IssueNumber"],
"IssueYear": av[
"IssueYear"
], # for some reason this is empty
"ReadingOrder": av["ReadingOrder"],
"IssueDate": av["IssueDate"],
"Status": av["Status"],
"Location": av["Location"],
},
"WatchValues": {
"SeriesYear": av["SeriesYear"],
"LatestDate": av["IssueDate"],
"ComicVersion": av["Volume"],
"ComicID": av["ComicID"],
"Publisher": av["IssuePublisher"],
"Total": av[
"TotalIssues"
], # this will return the total issues in the arc (not needed for this)
"Type": av["Type"],
"IsArc": True,
},
}
)
ccnt = 0
nm = 0
from collections import defaultdict
res = defaultdict(list)
for acv in arcvals:
if len(manual_list) == 0:
res[acv["ComicName"]].append(
{
"ArcValues": acv["ArcValues"],
"WatchValues": acv["WatchValues"],
}
)
else:
acv_check = [
x
for x in manual_list
if x["ComicID"] == acv["WatchValues"]["ComicID"]
]
if acv_check:
res[acv["ComicName"]].append(
{
"ArcValues": acv["ArcValues"],
"WatchValues": acv["WatchValues"],
}
)
if len(res) > 0:
logger.fdebug(
"%s Now Checking if %s issue(s) may also reside in one of the storyarc's that I am watching."
% (module, len(res))
)
for k, v in res.items():
i = 0
# k is ComicName
# v is ArcValues and WatchValues
while i < len(v):
if k is None or k == "None":
pass
else:
arcm = filechecker.FileChecker(
watchcomic=k,
Publisher=v[i]["ArcValues"]["ComicPublisher"],
manual=v[i]["WatchValues"],
)
arcmatch = arcm.matchIT(fl)
# logger.fdebug('arcmatch: ' + str(arcmatch))
if arcmatch["process_status"] == "fail":
nm += 1
else:
try:
if all(
[
v[i]["WatchValues"]["Type"] == "TPB",
v[i]["WatchValues"]["Total"] > 1,
]
) or all(
[
v[i]["WatchValues"]["Type"] == "One-Shot",
v[i]["WatchValues"]["Total"] == 1,
]
):
if watchmatch["series_volume"] is not None:
just_the_digits = re.sub(
"[^0-9]", "", arcmatch["series_volume"]
).strip()
else:
just_the_digits = re.sub(
"[^0-9]", "", arcmatch["justthedigits"]
).strip()
else:
just_the_digits = arcmatch["justthedigits"]
except Exception as e:
logger.warn(
"[Exception: %s] Unable to properly match up/retrieve issue number (or volume) for this [CS: %s] [WATCHMATCH: %s]"
% (e, v[i]["ArcValues"], v[i]["WatchValues"])
)
nm += 1
continue
if just_the_digits is not None:
temploc = just_the_digits.replace("_", " ")
temploc = re.sub("[\#']", "", temploc)
# logger.fdebug('temploc: %s' % temploc)
else:
if any(
[
v[i]["WatchValues"]["Type"] == "TPB",
v[i]["WatchValues"]["Type"] == "One-Shot",
]
):
temploc = "1"
else:
temploc = None
if temploc is not None and helpers.issuedigits(
temploc
) != helpers.issuedigits(
v[i]["ArcValues"]["IssueNumber"]
):
# logger.fdebug('issues dont match. Skipping')
i += 1
continue
else:
if temploc is not None and (
any(
[
"annual" in temploc.lower(),
"special" in temploc.lower(),
]
)
and mylar.CONFIG.ANNUALS_ON is True
):
biannchk = re.sub(
"-", "", temploc.lower()
).strip()
if "biannual" in biannchk:
logger.fdebug(
"%s Bi-Annual detected." % module
)
fcdigit = helpers.issuedigits(
re.sub(
"biannual", "", str(biannchk)
).strip()
)
else:
if "annual" in temploc.lower():
fcdigit = helpers.issuedigits(
re.sub(
"annual",
"",
str(temploc.lower()),
).strip()
)
else:
fcdigit = helpers.issuedigits(
re.sub(
"special",
"",
str(temploc.lower()),
).strip()
)
logger.fdebug(
"%s Annual detected [%s]. ComicID assigned as %s"
% (
module,
fcdigit,
v[i]["WatchValues"]["ComicID"],
)
)
annchk = "yes"
issuechk = myDB.selectone(
"SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?",
[v[i]["WatchValues"]["ComicID"], fcdigit],
).fetchone()
else:
annchk = "no"
if temploc is not None:
fcdigit = helpers.issuedigits(temploc)
issuechk = myDB.select(
"SELECT * from storyarcs WHERE ComicID=? AND Int_IssueNumber=?",
[
v[i]["WatchValues"]["ComicID"],
fcdigit,
],
)
else:
fcdigit = None
issuechk = myDB.select(
"SELECT * from storyarcs WHERE ComicID=?",
[v[i]["WatchValues"]["ComicID"]],
)
if issuechk is None:
try:
logger.fdebug(
"%s No corresponding issue # found for %s"
% (module, v[i]["WatchValues"]["ComicID"])
)
except:
continue
else:
for isc in issuechk:
datematch = "True"
datechkit = False
if (
isc["ReleaseDate"] is not None
and isc["ReleaseDate"] != "0000-00-00"
):
try:
if isc[
"DigitalDate"
] != "0000-00-00" and int(
re.sub(
"-", "", isc["DigitalDate"]
).strip()
) <= int(
re.sub(
"-", "", isc["ReleaseDate"]
).strip()
):
monthval = isc["DigitalDate"]
arc_issueyear = isc["DigitalDate"][
:4
]
else:
monthval = isc["ReleaseDate"]
arc_issueyear = isc["ReleaseDate"][
:4
]
except:
monthval = isc["ReleaseDate"]
arc_issueyear = isc["ReleaseDate"][:4]
else:
try:
if isc[
"DigitalDate"
] != "0000-00-00" and int(
re.sub(
"-", "", isc["DigitalDate"]
).strip()
) <= int(
re.sub(
"-", "", isc["ReleaseDate"]
).strip()
):
monthval = isc["DigitalDate"]
arc_issueyear = isc["DigitalDate"][
:4
]
else:
monthval = isc["IssueDate"]
arc_issueyear = isc["IssueDate"][:4]
except:
monthval = isc["IssueDate"]
arc_issueyear = isc["IssueDate"][:4]
if (
len(arcmatch) >= 1
and arcmatch["issue_year"] is not None
):
# if the # of matches is more than 1, we need to make sure we get the right series
# compare the ReleaseDate for the issue, to the found issue date in the filename.
# if ReleaseDate doesn't exist, use IssueDate
# if no issue date was found, then ignore.
logger.fdebug(
"%s[ARC ISSUE-VERIFY] Now checking against %s - %s"
% (
module,
k,
v[i]["WatchValues"]["ComicID"],
)
)
issyr = None
# logger.fdebug('issuedate: %s' % isc['IssueDate'])
# logger.fdebug('issuechk: %s' % isc['IssueDate'][5:7])
# logger.fdebug('StoreDate %s' % isc['ReleaseDate'])
# logger.fdebug('IssueDate: %s' % isc['IssueDate'])
if (
isc["DigitalDate"] is not None
and isc["DigitalDate"] != "0000-00-00"
):
if int(isc["DigitalDate"][:4]) < int(
arcmatch["issue_year"]
):
logger.fdebug(
"%s[ARC ISSUE-VERIFY] %s is before the issue year of %s that was discovered in the filename"
% (
module,
isc["DigitalDate"],
arcmatch["issue_year"],
)
)
datematch = "False"
elif all(
[
isc["ReleaseDate"] is not None,
isc["ReleaseDate"] != "0000-00-00",
]
):
if isc["ReleaseDate"] == "0000-00-00":
datevalue = isc["IssueDate"]
else:
datevalue = isc["ReleaseDate"]
if int(datevalue[:4]) < int(
arcmatch["issue_year"]
):
logger.fdebug(
"%s[ARC ISSUE-VERIFY] %s is before the issue year %s that was discovered in the filename"
% (
module,
datevalue[:4],
arcmatch["issue_year"],
)
)
datematch = "False"
elif all(
[
isc["IssueDate"] is not None,
isc["IssueDate"] != "0000-00-00",
]
):
if isc["IssueDate"] == "0000-00-00":
datevalue = isc["ReleaseDate"]
else:
datevalue = isc["IssueDate"]
if int(datevalue[:4]) < int(
arcmatch["issue_year"]
):
logger.fdebug(
"%s[ARC ISSUE-VERIFY] %s is before the issue year of %s that was discovered in the filename"
% (
module,
datevalue[:4],
arcmatch["issue_year"],
)
)
datematch = "False"
else:
if int(isc["IssueDate"][:4]) < int(
arcmatch["issue_year"]
):
logger.fdebug(
"%s[ARC ISSUE-VERIFY] %s is before the issue year %s that was discovered in the filename"
% (
module,
isc["IssueDate"],
arcmatch["issue_year"],
)
)
datematch = "False"
if int(arc_issueyear) != int(
arcmatch["issue_year"]
):
if (
int(monthval[5:7]) == 11
or int(monthval[5:7]) == 12
):
issyr = int(monthval[:4]) + 1
datechkit = True
logger.fdebug(
"%s[ARC ISSUE-VERIFY] IssueYear (issyr) is %s"
% (module, issyr)
)
elif (
int(monthval[5:7]) == 1
or int(monthval[5:7]) == 2
or int(monthval[5:7]) == 3
):
issyr = int(monthval[:4]) - 1
datechkit = True
if (
datechkit is True
and issyr is not None
):
logger.fdebug(
"%s[ARC ISSUE-VERIFY] %s comparing to %s : rechecking by month-check versus year."
% (
module,
issyr,
arcmatch["issue_year"],
)
)
datematch = "True"
if int(issyr) != int(
arcmatch["issue_year"]
):
logger.fdebug(
"%s[.:FAIL:.] Issue is before the modified issue year of %s"
% (module, issyr)
)
datematch = "False"
else:
if fcdigit is None:
logger.info(
"%s Found matching issue for ComicID: %s / IssueID: %s"
% (
module,
v[i]["WatchValues"]["ComicID"],
isc["IssueID"],
)
)
else:
logger.info(
"%s Found matching issue # %s for ComicID: %s / IssueID: %s"
% (
module,
fcdigit,
v[i]["WatchValues"]["ComicID"],
isc["IssueID"],
)
)
logger.fdebug("datematch: %s" % datematch)
logger.fdebug(
"temploc: %s" % helpers.issuedigits(temploc)
)
logger.fdebug(
"arcissue: %s"
% helpers.issuedigits(
v[i]["ArcValues"]["IssueNumber"]
)
)
if datematch == "True" and helpers.issuedigits(
temploc
) == helpers.issuedigits(
v[i]["ArcValues"]["IssueNumber"]
):
# reset datematch here so it doesn't carry the value down and avoid year checks
datematch = "False"
arc_values = v[i]["WatchValues"]
if any(
[
arc_values["ComicVersion"] is None,
arc_values["ComicVersion"]
== "None",
]
):
tmp_arclist_vol = "1"
else:
tmp_arclist_vol = re.sub(
"[^0-9]",
"",
arc_values["ComicVersion"],
).strip()
if all(
[
arcmatch["series_volume"] != "None",
arcmatch["series_volume"]
is not None,
]
):
tmp_arcmatch_vol = re.sub(
"[^0-9]",
"",
arcmatch["series_volume"],
).strip()
if len(tmp_arcmatch_vol) == 4:
if int(tmp_arcmatch_vol) == int(
arc_values["SeriesYear"]
):
logger.fdebug(
"%s[ARC ISSUE-VERIFY][SeriesYear-Volume MATCH] Series Year of %s matched to volume/year label of %s"
% (
module,
arc_values[
"SeriesYear"
],
tmp_arcmatch_vol,
)
)
else:
logger.fdebug(
"%s[ARC ISSUE-VERIFY][SeriesYear-Volume FAILURE] Series Year of %s DID NOT match to volume/year label of %s"
% (
module,
arc_values[
"SeriesYear"
],
tmp_arcmatch_vol,
)
)
datematch = "False"
if (
len(arcvals) > 1
and int(tmp_arcmatch_vol) >= 1
):
if int(tmp_arcmatch_vol) == int(
tmp_arclist_vol
):
logger.fdebug(
"%s[ARC ISSUE-VERIFY][SeriesYear-Volume MATCH] Volume label of series Year of %s matched to volume label of %s"
% (
module,
arc_values[
"ComicVersion"
],
arcmatch[
"series_volume"
],
)
)
else:
logger.fdebug(
"%s[ARC ISSUE-VERIFY][SeriesYear-Volume FAILURE] Volume label of Series Year of %s DID NOT match to volume label of %s"
% (
module,
arc_values[
"ComicVersion"
],
arcmatch[
"series_volume"
],
)
)
datematch = "False"
else:
if any(
[
tmp_arclist_vol is None,
tmp_arclist_vol == "None",
tmp_arclist_vol == "",
]
):
logger.fdebug(
"%s[ARC ISSUE-VERIFY][NO VOLUME PRESENT] No Volume label present for series. Dropping down to Issue Year matching."
% module
)
datematch = "False"
elif (
len(arcvals) == 1
and int(tmp_arclist_vol) == 1
):
logger.fdebug(
"%s[ARC ISSUE-VERIFY][Lone Volume MATCH] Volume label of %s indicates only volume for this series on your watchlist."
% (
module,
arc_values["ComicVersion"],
)
)
elif int(tmp_arclist_vol) > 1:
logger.fdebug(
"%s[ARC ISSUE-VERIFY][Lone Volume FAILURE] Volume label of %s indicates that there is more than one volume for this series, but the one on your watchlist has no volume label set."
% (
module,
arc_values["ComicVersion"],
)
)
datematch = "False"
if datematch == "False" and all(
[
arcmatch["issue_year"] is not None,
arcmatch["issue_year"] != "None",
arc_issueyear is not None,
]
):
# now we see if the issue year matches exactly to what we have within Mylar.
if int(arc_issueyear) == int(
arcmatch["issue_year"]
):
logger.fdebug(
"%s[ARC ISSUE-VERIFY][Issue Year MATCH] Issue Year of %s is a match to the year found in the filename of : %s"
% (
module,
arc_issueyear,
arcmatch["issue_year"],
)
)
datematch = "True"
else:
logger.fdebug(
"%s[ARC ISSUE-VERIFY][Issue Year FAILURE] Issue Year of %s does NOT match the year found in the filename of : %s"
% (
module,
arc_issueyear,
arcmatch["issue_year"],
)
)
logger.fdebug(
"%s[ARC ISSUE-VERIFY] Checking against complete date to see if month published could allow for different publication year."
% module
)
if issyr is not None:
if int(issyr) != int(
arcmatch["issue_year"]
):
logger.fdebug(
"%s[ARC ISSUE-VERIFY][Issue Year FAILURE] Modified Issue year of %s is before the modified issue year of %s"
% (
module,
issyr,
arcmatch[
"issue_year"
],
)
)
else:
logger.fdebug(
"%s[ARC ISSUE-VERIFY][Issue Year MATCH] Modified Issue Year of %s is a match to the year found in the filename of : %s"
% (
module,
issyr,
arcmatch[
"issue_year"
],
)
)
datematch = "True"
if datematch == "True":
passit = False
if len(manual_list) > 0:
if any(
[
v[i]["ArcValues"]["IssueID"]
== x["IssueID"]
for x in manual_list
]
):
logger.info(
"[STORY-ARC POST-PROCESSING] IssueID %s exists in your watchlist. Bypassing Story-Arc post-processing performed later."
% v[i]["ArcValues"][
"IssueID"
]
)
# add in the storyarcid into the manual list so it will perform story-arc functions after normal manual PP is finished.
for a in manual_list:
if (
a["IssueID"]
== v[i]["ArcValues"][
"IssueID"
]
):
a["IssueArcID"] = v[i][
"ArcValues"
]["IssueArcID"]
break
passit = True
if passit == False:
tmpfilename = helpers.conversion(
arcmatch["comicfilename"]
)
if arcmatch["sub"]:
clocation = os.path.join(
arcmatch["comiclocation"],
arcmatch["sub"],
tmpfilename,
)
else:
clocation = os.path.join(
arcmatch["comiclocation"],
tmpfilename,
)
logger.info(
"[%s #%s] MATCH: %s / %s / %s"
% (
k,
isc["IssueNumber"],
clocation,
isc["IssueID"],
v[i]["ArcValues"][
"IssueID"
],
)
)
if (
v[i]["ArcValues"]["Publisher"]
is None
):
arcpublisher = v[i][
"ArcValues"
]["ComicPublisher"]
else:
arcpublisher = v[i][
"ArcValues"
]["Publisher"]
manual_arclist.append(
{
"ComicLocation": clocation,
"Filename": tmpfilename,
"ComicID": v[i][
"WatchValues"
]["ComicID"],
"IssueID": v[i][
"ArcValues"
]["IssueID"],
"IssueNumber": v[i][
"ArcValues"
]["IssueNumber"],
"StoryArc": v[i][
"ArcValues"
]["StoryArc"],
"StoryArcID": v[i][
"ArcValues"
]["StoryArcID"],
"IssueArcID": v[i][
"ArcValues"
]["IssueArcID"],
"Publisher": arcpublisher,
"ReadingOrder": v[i][
"ArcValues"
]["ReadingOrder"],
"ComicName": k,
}
)
logger.info(
"%s[SUCCESSFUL MATCH: %s-%s] Match verified for %s"
% (
module,
k,
v[i]["WatchValues"][
"ComicID"
],
arcmatch["comicfilename"],
)
)
self.matched = True
break
else:
logger.fdebug(
"%s[NON-MATCH: %s-%s] Incorrect series - not populating..continuing post-processing"
% (
module,
k,
v[i]["WatchValues"]["ComicID"],
)
)
i += 1
if self.matched is False:
# one-off manual pp'd of torrents
if all(
[
"0-Day Week" in self.nzb_name,
mylar.CONFIG.PACK_0DAY_WATCHLIST_ONLY is True,
]
):
pass
else:
oneofflist = myDB.select(
"select s.Issue_Number, s.ComicName, s.IssueID, s.ComicID, s.Provider, w.format, w.PUBLISHER, w.weeknumber, w.year from snatched as s inner join nzblog as n on s.IssueID = n.IssueID inner join weekly as w on s.IssueID = w.IssueID WHERE n.OneOff = 1;"
) # (s.Provider ='32P' or s.Provider='WWT' or s.Provider='DEM') AND n.OneOff = 1;")
# oneofflist = myDB.select("select s.Issue_Number, s.ComicName, s.IssueID, s.ComicID, s.Provider, w.PUBLISHER, w.weeknumber, w.year from snatched as s inner join nzblog as n on s.IssueID = n.IssueID and s.Hash is not NULL inner join weekly as w on s.IssueID = w.IssueID WHERE n.OneOff = 1;") #(s.Provider ='32P' or s.Provider='WWT' or s.Provider='DEM') AND n.OneOff = 1;")
if not oneofflist:
pass # continue
else:
logger.fdebug(
"%s[ONEOFF-SELECTION][self.nzb_name: %s]"
% (module, self.nzb_name)
)
oneoffvals = []
for ofl in oneofflist:
# logger.info('[ONEOFF-SELECTION] ofl: %s' % ofl)
oneoffvals.append(
{
"ComicName": ofl["ComicName"],
"ComicPublisher": ofl["PUBLISHER"],
"Issue_Number": ofl["Issue_Number"],
"AlternateSearch": None,
"ComicID": ofl["ComicID"],
"IssueID": ofl["IssueID"],
"WatchValues": {
"SeriesYear": None,
"LatestDate": None,
"ComicVersion": None,
"Publisher": ofl["PUBLISHER"],
"Total": None,
"Type": ofl["format"],
"ComicID": ofl["ComicID"],
"IsArc": False,
},
}
)
# this seems redundant to scan in all over again...
# for fl in filelist['comiclist']:
for ofv in oneoffvals:
# logger.info('[ONEOFF-SELECTION] ofv: %s' % ofv)
wm = filechecker.FileChecker(
watchcomic=ofv["ComicName"],
Publisher=ofv["ComicPublisher"],
AlternateSearch=None,
manual=ofv["WatchValues"],
)
# if fl['sub'] is not None:
# pathtofile = os.path.join(fl['comiclocation'], fl['sub'], fl['comicfilename'])
# else:
# pathtofile = os.path.join(fl['comiclocation'], fl['comicfilename'])
watchmatch = wm.matchIT(fl)
if watchmatch["process_status"] == "fail":
nm += 1
continue
else:
try:
if (
ofv["WatchValues"]["Type"] is not None
and ofv["WatchValues"]["Total"] > 1
):
if watchmatch["series_volume"] is not None:
just_the_digits = re.sub(
"[^0-9]",
"",
watchmatch["series_volume"],
).strip()
else:
just_the_digits = re.sub(
"[^0-9]",
"",
watchmatch["justthedigits"],
).strip()
else:
just_the_digits = watchmatch[
"justthedigits"
]
except Exception as e:
logger.warn(
"[Exception: %s] Unable to properly match up/retrieve issue number (or volume) for this [CS: %s] [WATCHMATCH: %s]"
% (e, cs, watchmatch)
)
nm += 1
continue
if just_the_digits is not None:
temploc = just_the_digits.replace("_", " ")
temploc = re.sub("[\#']", "", temploc)
logger.fdebug("temploc: %s" % temploc)
else:
temploc = None
logger.info("watchmatch: %s" % watchmatch)
if temploc is not None:
if "annual" in temploc.lower():
biannchk = re.sub(
"-", "", temploc.lower()
).strip()
if "biannual" in biannchk:
logger.fdebug(
"%s Bi-Annual detected." % module
)
fcdigit = helpers.issuedigits(
re.sub(
"biannual", "", str(biannchk)
).strip()
)
else:
fcdigit = helpers.issuedigits(
re.sub(
"annual", "", str(temploc.lower())
).strip()
)
logger.fdebug(
"%s Annual detected [%s]. ComicID assigned as %s"
% (module, fcdigit, ofv["ComicID"])
)
annchk = "yes"
else:
fcdigit = helpers.issuedigits(temploc)
if (
temploc is not None
and fcdigit
== helpers.issuedigits(ofv["Issue_Number"])
or all(
[
temploc is None,
helpers.issuedigits(ofv["Issue_Number"])
== "1",
]
)
):
if watchmatch["sub"]:
clocation = os.path.join(
watchmatch["comiclocation"],
watchmatch["sub"],
helpers.conversion(
watchmatch["comicfilename"]
),
)
if not os.path.exists(clocation):
scrubs = re.sub(
watchmatch["comiclocation"],
"",
watchmatch["sub"],
).strip()
if scrubs[:2] == "//" or scrubs[:2] == "\\":
scrubs = scrubs[1:]
if os.path.exists(scrubs):
logger.fdebug(
"[MODIFIED CLOCATION] %s"
% scrubs
)
clocation = scrubs
else:
if self.issueid is not None and os.path.isfile(
watchmatch["comiclocation"]
):
clocation = watchmatch["comiclocation"]
else:
clocation = os.path.join(
watchmatch["comiclocation"],
helpers.conversion(
watchmatch["comicfilename"]
),
)
oneoff_issuelist.append(
{
"ComicLocation": clocation,
"ComicID": ofv["ComicID"],
"IssueID": ofv["IssueID"],
"IssueNumber": ofv["Issue_Number"],
"ComicName": ofv["ComicName"],
"One-Off": True,
}
)
self.oneoffinlist = True
else:
logger.fdebug(
"%s No corresponding issue # in dB found for %s # %s"
% (
module,
ofv["ComicName"],
ofv["Issue_Number"],
)
)
continue
logger.fdebug(
"%s[SUCCESSFUL MATCH: %s-%s] Match Verified for %s"
% (
module,
ofv["ComicName"],
ofv["ComicID"],
helpers.conversion(fl["comicfilename"]),
)
)
self.matched = True
break
if filelist["comiccount"] > 0:
logger.fdebug(
"%s There are %s files found that match on your watchlist, %s files are considered one-off's, and %s files do not match anything"
% (
module,
len(manual_list),
len(oneoff_issuelist),
int(filelist["comiccount"]) - len(manual_list),
)
)
delete_arc = []
if len(manual_arclist) > 0:
logger.info(
"[STORY-ARC MANUAL POST-PROCESSING] I have found %s issues that belong to Story Arcs. Flinging them into the correct directories."
% len(manual_arclist)
)
for ml in manual_arclist:
issueid = ml["IssueID"]
ofilename = orig_filename = ml["ComicLocation"]
logger.info(
"[STORY-ARC POST-PROCESSING] Enabled for %s" % ml["StoryArc"]
)
if all(
[
mylar.CONFIG.STORYARCDIR is True,
mylar.CONFIG.COPY2ARCDIR is True,
]
):
grdst = helpers.arcformat(
ml["StoryArc"],
helpers.spantheyears(ml["StoryArcID"]),
ml["Publisher"],
)
logger.info("grdst: %s" % grdst)
# tag the meta.
metaresponse = None
crcvalue = helpers.crc(ofilename)
if mylar.CONFIG.ENABLE_META:
logger.info(
"[STORY-ARC POST-PROCESSING] Metatagging enabled - proceeding..."
)
try:
import cmtagmylar
metaresponse = cmtagmylar.run(
self.nzb_folder, issueid=issueid, filename=ofilename
)
except ImportError:
logger.warn(
"%s comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/"
% module
)
metaresponse = "fail"
if metaresponse == "fail":
logger.fdebug(
"%s Unable to write metadata successfully - check mylar.log file. Attempting to continue without metatagging..."
% module
)
elif any(
[
metaresponse == "unrar error",
metaresponse == "corrupt",
]
):
logger.error(
"%s This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and retrying it."
% module
)
continue
# launch failed download handling here.
elif metaresponse.startswith("file not found"):
filename_in_error = metaresponse.split("||")[1]
self._log(
"The file cannot be found in the location provided for metatagging to be used [%s]. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging..."
% (filename_in_error)
)
logger.error(
"%s The file cannot be found in the location provided for metatagging to be used [%s]. Please verify it exists, and re-run if necessary. Attempting to continue without metatagging..."
% (module, filename_in_error)
)
else:
odir = os.path.split(metaresponse)[0]
ofilename = os.path.split(metaresponse)[1]
ext = os.path.splitext(metaresponse)[1]
logger.info(
"%s Sucessfully wrote metadata to .cbz (%s) - Continuing.."
% (module, ofilename)
)
self._log(
"Sucessfully wrote metadata to .cbz (%s) - proceeding..."
% ofilename
)
dfilename = ofilename
else:
dfilename = ml["Filename"]
if metaresponse:
src_location = odir
grab_src = os.path.join(src_location, ofilename)
else:
src_location = ofilename
grab_src = ofilename
logger.fdebug("%s Source Path : %s" % (module, grab_src))
checkdirectory = filechecker.validateAndCreateDirectory(
grdst, True, module=module
)
if not checkdirectory:
logger.warn(
"%s Error trying to validate/create directory. Aborting this process at this time."
% module
)
self.valreturn.append(
{"self.log": self.log, "mode": "stop"}
)
return self.queue.put(self.valreturn)
# send to renamer here if valid.
if mylar.CONFIG.RENAME_FILES:
renamed_file = helpers.rename_param(
ml["ComicID"],
ml["ComicName"],
ml["IssueNumber"],
dfilename,
issueid=ml["IssueID"],
arc=ml["StoryArc"],
)
if renamed_file:
dfilename = renamed_file["nfilename"]
logger.fdebug(
"%s Renaming file to conform to configuration: %s"
% (module, ofilename)
)
# if from a StoryArc, check to see if we're appending the ReadingOrder to the filename
if mylar.CONFIG.READ2FILENAME:
logger.fdebug(
"%s readingorder#: %s" % (module, ml["ReadingOrder"])
)
if int(ml["ReadingOrder"]) < 10:
readord = "00" + str(ml["ReadingOrder"])
elif (
int(ml["ReadingOrder"]) >= 10
and int(ml["ReadingOrder"]) <= 99
):
readord = "0" + str(ml["ReadingOrder"])
else:
readord = str(ml["ReadingOrder"])
dfilename = str(readord) + "-" + os.path.split(dfilename)[1]
grab_dst = os.path.join(grdst, dfilename)
logger.fdebug("%s Destination Path : %s" % (module, grab_dst))
logger.fdebug("%s Source Path : %s" % (module, grab_src))
logger.info(
"%s[ONE-OFF MODE][%s] %s into directory : %s"
% (
module,
mylar.CONFIG.ARC_FILEOPS.upper(),
grab_src,
grab_dst,
)
)
# this is also for issues that are part of a story arc, and don't belong to a watchlist series (ie. one-off's)
try:
checkspace = helpers.get_free_space(grdst)
if checkspace is False:
if all(
[metaresponse is not None, metaresponse != "fail"]
): # meta was done
self.tidyup(src_location, True, cacheonly=True)
raise OSError
fileoperation = helpers.file_ops(
grab_src, grab_dst, one_off=True
)
if not fileoperation:
raise OSError
except Exception as e:
logger.error(
"%s [ONE-OFF MODE] Failed to %s %s: %s"
% (module, mylar.CONFIG.ARC_FILEOPS, grab_src, e)
)
return
# tidyup old path
if any(
[
mylar.CONFIG.FILE_OPTS == "move",
mylar.CONFIG.FILE_OPTS == "copy",
]
):
self.tidyup(src_location, True, filename=orig_filename)
# delete entry from nzblog table
# if it was downloaded via mylar from the storyarc section, it will have an 'S' in the nzblog
# if it was downloaded outside of mylar and/or not from the storyarc section, it will be a normal issueid in the nzblog
# IssArcID = 'S' + str(ml['IssueArcID'])
myDB.action(
"DELETE from nzblog WHERE IssueID=? AND SARC=?",
["S" + str(ml["IssueArcID"]), ml["StoryArc"]],
)
myDB.action(
"DELETE from nzblog WHERE IssueID=? AND SARC=?",
[ml["IssueArcID"], ml["StoryArc"]],
)
logger.fdebug("%s IssueArcID: %s" % (module, ml["IssueArcID"]))
newVal = {"Status": "Downloaded", "Location": grab_dst}
else:
newVal = {
"Status": "Downloaded",
"Location": ml["ComicLocation"],
}
ctrlVal = {"IssueArcID": ml["IssueArcID"]}
logger.fdebug("writing: %s -- %s" % (newVal, ctrlVal))
myDB.upsert("storyarcs", newVal, ctrlVal)
if all(
[
mylar.CONFIG.STORYARCDIR is True,
mylar.CONFIG.COPY2ARCDIR is True,
]
):
logger.fdebug(
"%s [%s] Post-Processing completed for: %s"
% (module, ml["StoryArc"], grab_dst)
)
else:
logger.fdebug(
"%s [%s] Post-Processing completed for: %s"
% (module, ml["StoryArc"], ml["ComicLocation"])
)
if (
all([self.nzb_name != "Manual Run", self.apicall is False])
or (
self.oneoffinlist is True
or all([self.issuearcid is not None, self.issueid is None])
)
) and not self.nzb_name.startswith(
"0-Day"
): # and all([self.issueid is None, self.comicid is None, self.apicall is False]):
ppinfo = []
if self.oneoffinlist is False:
self.oneoff = False
if any([self.issueid is not None, self.issuearcid is not None]):
if self.issuearcid is not None:
s_id = self.issuearcid
else:
s_id = self.issueid
nzbiss = myDB.selectone(
"SELECT * FROM nzblog WHERE IssueID=?", [s_id]
).fetchone()
if nzbiss is None and self.issuearcid is not None:
nzbiss = myDB.selectone(
"SELECT * FROM nzblog WHERE IssueID=?", ["S" + s_id]
).fetchone()
else:
nzbname = self.nzb_name
# remove extensions from nzb_name if they somehow got through (Experimental most likely)
if nzbname.lower().endswith(self.extensions):
fd, ext = os.path.splitext(nzbname)
self._log("Removed extension from nzb: " + ext)
nzbname = re.sub(str(ext), "", str(nzbname))
# replace spaces
# let's change all space to decimals for simplicity
logger.fdebug("[NZBNAME]: " + nzbname)
# gotta replace & or escape it
nzbname = re.sub("\&", "and", nzbname)
nzbname = re.sub("[\,\:\?'\+]", "", nzbname)
nzbname = re.sub("[\(\)]", " ", nzbname)
logger.fdebug("[NZBNAME] nzbname (remove chars): " + nzbname)
nzbname = re.sub(".cbr", "", nzbname).strip()
nzbname = re.sub(".cbz", "", nzbname).strip()
nzbname = re.sub("[\.\_]", " ", nzbname).strip()
nzbname = re.sub(
"\s+", " ", nzbname
) # make sure we remove the extra spaces.
logger.fdebug(
"[NZBNAME] nzbname (remove extensions, double spaces, convert underscores to spaces): "
+ nzbname
)
nzbname = re.sub("\s", ".", nzbname)
logger.fdebug(
"%s After conversions, nzbname is : %s" % (module, nzbname)
)
self._log("nzbname: %s" % nzbname)
nzbiss = myDB.selectone(
"SELECT * from nzblog WHERE nzbname=? or altnzbname=?",
[nzbname, nzbname],
).fetchone()
if nzbiss is None:
self._log(
"Failure - could not initially locate nzbfile in my database to rename."
)
logger.fdebug(
"%s Failure - could not locate nzbfile initially" % module
)
# if failed on spaces, change it all to decimals and try again.
nzbname = re.sub("[\(\)]", "", str(nzbname))
self._log("trying again with this nzbname: %s" % nzbname)
logger.fdebug(
"%s Trying to locate nzbfile again with nzbname of : %s"
% (module, nzbname)
)
nzbiss = myDB.selectone(
"SELECT * from nzblog WHERE nzbname=? or altnzbname=?",
[nzbname, nzbname],
).fetchone()
if nzbiss is None:
logger.error(
"%s Unable to locate downloaded file within items I have snatched. Attempting to parse the filename directly and process."
% module
)
# set it up to run manual post-processing on self.nzb_folder
self._log(
"Unable to locate downloaded file within items I have snatched. Attempting to parse the filename directly and process."
)
self.valreturn.append(
{"self.log": self.log, "mode": "outside"}
)
return self.queue.put(self.valreturn)
else:
self._log("I corrected and found the nzb as : %s" % nzbname)
logger.fdebug(
"%s Auto-corrected and found the nzb as : %s"
% (module, nzbname)
)
# issueid = nzbiss['IssueID']
issueid = nzbiss["IssueID"]
logger.fdebug("%s Issueid: %s" % (module, issueid))
sarc = nzbiss["SARC"]
self.oneoff = nzbiss["OneOff"]
tmpiss = myDB.selectone(
"SELECT * FROM issues WHERE IssueID=?", [issueid]
).fetchone()
if tmpiss is None:
tmpiss = myDB.selectone(
"SELECT * FROM annuals WHERE IssueID=?", [issueid]
).fetchone()
comicid = None
comicname = None
issuenumber = None
if tmpiss is not None:
ppinfo.append(
{
"comicid": tmpiss["ComicID"],
"issueid": issueid,
"comicname": tmpiss["ComicName"],
"issuenumber": tmpiss["Issue_Number"],
"comiclocation": None,
"publisher": None,
"sarc": sarc,
"oneoff": self.oneoff,
}
)
elif all([self.oneoff is not None, issueid[0] == "S"]):
issuearcid = re.sub("S", "", issueid).strip()
oneinfo = myDB.selectone(
"SELECT * FROM storyarcs WHERE IssueArcID=?", [issuearcid]
).fetchone()
if oneinfo is None:
logger.warn(
"Unable to locate issue as previously snatched arc issue - it might be something else..."
)
self._log(
"Unable to locate issue as previously snatched arc issue - it might be something else..."
)
else:
# reverse lookup the issueid here to see if it possible exists on watchlist...
tmplookup = myDB.selectone(
"SELECT * FROM comics WHERE ComicID=?", [oneinfo["ComicID"]]
).fetchone()
if tmplookup is not None:
logger.fdebug(
"[WATCHLIST-DETECTION-%s] Processing as Arc, detected on watchlist - will PP for both."
% tmplookup["ComicName"]
)
self.oneoff = False
else:
self.oneoff = True
ppinfo.append(
{
"comicid": oneinfo["ComicID"],
"comicname": oneinfo["ComicName"],
"issuenumber": oneinfo["IssueNumber"],
"publisher": oneinfo["IssuePublisher"],
"comiclocation": None,
"issueid": issueid, # need to keep it so the 'S' is present to denote arc.
"sarc": sarc,
"oneoff": self.oneoff,
}
)
if all(
[
len(ppinfo) == 0,
self.oneoff is not None,
mylar.CONFIG.ALT_PULL == 2,
]
):
oneinfo = myDB.selectone(
"SELECT * FROM weekly WHERE IssueID=?", [issueid]
).fetchone()
if oneinfo is None:
oneinfo = myDB.selectone(
"SELECT * FROM oneoffhistory WHERE IssueID=?", [issueid]
).fetchone()
if oneinfo is None:
logger.warn(
"Unable to locate issue as previously snatched one-off"
)
self._log(
"Unable to locate issue as previously snatched one-off"
)
self.valreturn.append(
{"self.log": self.log, "mode": "stop"}
)
return self.queue.put(self.valreturn)
else:
OComicname = oneinfo["ComicName"]
OIssue = oneinfo["IssueNumber"]
OPublisher = None
else:
OComicname = oneinfo["COMIC"]
OIssue = oneinfo["ISSUE"]
OPublisher = oneinfo["PUBLISHER"]
ppinfo.append(
{
"comicid": oneinfo["ComicID"],
"comicname": OComicname,
"issuenumber": OIssue,
"publisher": OPublisher,
"comiclocation": None,
"issueid": issueid,
"sarc": None,
"oneoff": True,
}
)
self.oneoff = True
# logger.info(module + ' Discovered %s # %s by %s [comicid:%s][issueid:%s]' % (comicname, issuenumber, publisher, comicid, issueid))
# use issueid to get publisher, series, year, issue number
else:
for x in oneoff_issuelist:
if x["One-Off"] is True:
oneinfo = myDB.selectone(
"SELECT * FROM weekly WHERE IssueID=?", [x["IssueID"]]
).fetchone()
if oneinfo is not None:
ppinfo.append(
{
"comicid": oneinfo["ComicID"],
"comicname": oneinfo["COMIC"],
"issuenumber": oneinfo["ISSUE"],
"publisher": oneinfo["PUBLISHER"],
"issueid": x["IssueID"],
"comiclocation": x["ComicLocation"],
"sarc": None,
"oneoff": x["One-Off"],
}
)
self.oneoff = True
if len(ppinfo) > 0:
for pp in ppinfo:
logger.info("[PPINFO-POST-PROCESSING-ATTEMPT] %s" % pp)
self.nzb_or_oneoff_pp(tinfo=pp)
if any(
[
self.nzb_name == "Manual Run",
self.issueid is not None,
self.comicid is not None,
self.apicall is True,
]
):
# loop through the hits here.
if len(manual_list) == 0 and len(manual_arclist) == 0:
if self.nzb_name == "Manual Run":
logger.info("%s No matches for Manual Run ... exiting." % module)
if mylar.APILOCK is True:
mylar.APILOCK = False
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
elif len(manual_arclist) > 0 and len(manual_list) == 0:
logger.info(
"%s Manual post-processing completed for %s story-arc issues."
% (module, len(manual_arclist))
)
if mylar.APILOCK is True:
mylar.APILOCK = False
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
elif len(manual_arclist) > 0:
logger.info(
"%s Manual post-processing completed for %s story-arc issues."
% (module, len(manual_arclist))
)
i = 0
for ml in manual_list:
i += 1
comicid = ml["ComicID"]
issueid = ml["IssueID"]
issuenumOG = ml["IssueNumber"]
# check to see if file is still being written to.
waiting = True
while waiting is True:
try:
ctime = max(
os.path.getctime(ml["ComicLocation"]),
os.path.getmtime(ml["ComicLocation"]),
)
if time.time() > ctime > time.time() - 10:
time.sleep(max(time.time() - ctime, 0))
else:
break
except:
# file is no longer present in location / can't be accessed.
break
dupthis = helpers.duplicate_filecheck(
ml["ComicLocation"], ComicID=comicid, IssueID=issueid
)
if dupthis["action"] == "dupe_src" or dupthis["action"] == "dupe_file":
# check if duplicate dump folder is enabled and if so move duplicate file in there for manual intervention.
#'dupe_file' - do not write new file as existing file is better quality
#'dupe_src' - write new file, as existing file is a lesser quality (dupe)
if mylar.CONFIG.DDUMP and not all(
[
mylar.CONFIG.DUPLICATE_DUMP is None,
mylar.CONFIG.DUPLICATE_DUMP == "",
]
): # DUPLICATE_DUMP
dupchkit = self.duplicate_process(dupthis)
if dupchkit == False:
logger.warn(
"Unable to move duplicate file - skipping post-processing of this file."
)
continue
if any([dupthis["action"] == "write", dupthis["action"] == "dupe_src"]):
stat = " [%s/%s]" % (i, len(manual_list))
self.Process_next(comicid, issueid, issuenumOG, ml, stat)
dupthis = None
if self.failed_files == 0:
if all([self.comicid is not None, self.issueid is None]):
logger.info(
"%s post-processing of pack completed for %s issues."
% (module, i)
)
if self.issueid is not None:
if ml["AnnualType"] is not None:
logger.info(
"%s direct post-processing of issue completed for %s %s #%s."
% (
module,
ml["ComicName"],
ml["AnnualType"],
ml["IssueNumber"],
)
)
else:
logger.info(
"%s direct post-processing of issue completed for %s #%s."
% (module, ml["ComicName"], ml["IssueNumber"])
)
else:
logger.info(
"%s Manual post-processing completed for %s issues."
% (module, i)
)
else:
if self.comicid is not None:
logger.info(
"%s post-processing of pack completed for %s issues [FAILED: %s]"
% (module, i, self.failed_files)
)
else:
logger.info(
"%s Manual post-processing completed for %s issues [FAILED: %s]"
% (module, i, self.failed_files)
)
if mylar.APILOCK is True:
mylar.APILOCK = False
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
else:
pass
def nzb_or_oneoff_pp(self, tinfo=None, manual=None):
module = self.module
myDB = db.DBConnection()
manual_list = None
if tinfo is not None: # manual is None:
sandwich = None
issueid = tinfo["issueid"]
comicid = tinfo["comicid"]
comicname = tinfo["comicname"]
issuearcid = None
issuenumber = tinfo["issuenumber"]
publisher = tinfo["publisher"]
sarc = tinfo["sarc"]
oneoff = tinfo["oneoff"]
if all([oneoff is True, tinfo["comiclocation"] is not None]):
location = os.path.abspath(
os.path.join(tinfo["comiclocation"], os.pardir)
)
else:
location = self.nzb_folder
annchk = "no"
issuenzb = myDB.selectone(
"SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]
).fetchone()
if issuenzb is None:
logger.info(
"%s Could not detect as a standard issue - checking against annuals."
% module
)
issuenzb = myDB.selectone(
"SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL",
[issueid],
).fetchone()
if issuenzb is None:
logger.info("%s issuenzb not found." % module)
# if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
# using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
if "S" in issueid:
sandwich = issueid
if oneoff is False:
onechk = myDB.selectone(
"SELECT * FROM storyarcs WHERE IssueArcID=?",
[re.sub("S", "", issueid).strip()],
).fetchone()
if onechk is not None:
issuearcid = onechk["IssueArcID"]
issuenzb = myDB.selectone(
"SELECT * FROM issues WHERE IssueID=? AND ComicName NOT NULL",
[onechk["IssueID"]],
).fetchone()
if issuenzb is None:
issuenzb = myDB.selectone(
"SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL",
[onechk["IssueID"]],
).fetchone()
if issuenzb is not None:
issueid = issuenzb["IssueID"]
logger.fdebug(
"Reverse lookup discovered watchlisted series [issueid: %s] - adjusting so we can PP both properly."
% issueid
)
elif "G" in issueid or "-" in issueid:
sandwich = 1
elif any([oneoff is True, issueid >= "900000", issueid == "1"]):
logger.info(
"%s [ONE-OFF POST-PROCESSING] One-off download detected. Post-processing as a non-watchlist item."
% module
)
sandwich = None # arbitrarily set it to None just to force one-off downloading below.
else:
logger.error(
"%s Unable to locate downloaded file as being initiated via Mylar. Attempting to parse the filename directly and process."
% module
)
self._log(
"Unable to locate downloaded file within items I have snatched. Attempting to parse the filename directly and process."
)
self.valreturn.append({"self.log": self.log, "mode": "outside"})
return self.queue.put(self.valreturn)
else:
logger.info(
"%s Successfully located issue as an annual. Continuing."
% module
)
annchk = "yes"
if issuenzb is not None:
logger.info("%s issuenzb found." % module)
if helpers.is_number(issueid):
sandwich = int(issuenzb["IssueID"])
if all([sandwich is not None, helpers.is_number(sandwich), sarc is None]):
if sandwich < 900000:
# if sandwich is less than 900000 it's a normal watchlist download. Bypass.
pass
else:
if (
any([oneoff is True, issuenzb is None])
or all([sandwich is not None, "S" in str(sandwich), oneoff is True])
or int(sandwich) >= 900000
):
# this has no issueID, therefore it's a one-off or a manual post-proc.
# At this point, let's just drop it into the Comic Location folder and forget about it..
if sandwich is not None and "S" in sandwich:
self._log(
"One-off STORYARC mode enabled for Post-Processing for %s"
% sarc
)
logger.info(
"%s One-off STORYARC mode enabled for Post-Processing for %s"
% (module, sarc)
)
else:
self._log(
"One-off mode enabled for Post-Processing. All I'm doing is moving the file untouched into the Grab-bag directory."
)
if mylar.CONFIG.GRABBAG_DIR is None:
mylar.CONFIG.GRABBAG_DIR = os.path.join(
mylar.CONFIG.DESTINATION_DIR, "Grabbag"
)
logger.info(
"%s One-off mode enabled for Post-Processing. Will move into Grab-bag directory: %s"
% (module, mylar.CONFIG.GRABBAG_DIR)
)
self._log(
"Grab-Bag Directory set to : %s" % mylar.CONFIG.GRABBAG_DIR
)
grdst = mylar.CONFIG.GRABBAG_DIR
odir = location
if odir is None:
odir = self.nzb_folder
ofilename = orig_filename = tinfo["comiclocation"]
if ofilename is not None:
path, ext = os.path.splitext(ofilename)
else:
# os.walk the location to get the filename...(coming from sab kinda thing) where it just passes the path.
for root, dirnames, filenames in os.walk(
odir, followlinks=True
):
for filename in filenames:
if filename.lower().endswith(self.extensions):
ofilename = orig_filename = filename
logger.fdebug(
"%s Valid filename located as : %s"
% (module, ofilename)
)
path, ext = os.path.splitext(ofilename)
break
if ofilename is None:
logger.error(
"%s Unable to post-process file as it is not in a valid cbr/cbz format or cannot be located in path. PostProcessing aborted."
% module
)
self._log(
"Unable to locate downloaded file to rename. PostProcessing aborted."
)
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
if sandwich is not None and "S" in sandwich:
issuearcid = re.sub("S", "", issueid)
logger.fdebug("%s issuearcid:%s" % (module, issuearcid))
arcdata = myDB.selectone(
"SELECT * FROM storyarcs WHERE IssueArcID=?", [issuearcid]
).fetchone()
if arcdata is None:
logger.warn(
"%s Unable to locate issue within Story Arcs. Cannot post-process at this time - try to Refresh the Arc and manual post-process if necessary."
% module
)
self._log(
"Unable to locate issue within Story Arcs in orde to properly assign metadata. PostProcessing aborted."
)
self.valreturn.append(
{"self.log": self.log, "mode": "stop"}
)
return self.queue.put(self.valreturn)
if arcdata["Publisher"] is None:
arcpub = arcdata["IssuePublisher"]
else:
arcpub = arcdata["Publisher"]
grdst = helpers.arcformat(
arcdata["StoryArc"],
helpers.spantheyears(arcdata["StoryArcID"]),
arcpub,
)
if comicid is None:
comicid = arcdata["ComicID"]
if comicname is None:
comicname = arcdata["ComicName"]
if issuenumber is None:
issuenumber = arcdata["IssueNumber"]
issueid = arcdata["IssueID"]
# tag the meta.
metaresponse = None
crcvalue = helpers.crc(os.path.join(location, ofilename))
# if a one-off download from the pull-list, will not have an issueid associated with it, and will fail to due conversion/tagging.
# if altpull/2 method is being used, issueid may already be present so conversion/tagging is possible with some additional fixes.
if all([mylar.CONFIG.ENABLE_META, issueid is not None]):
self._log("Metatagging enabled - proceeding...")
try:
import cmtagmylar
metaresponse = cmtagmylar.run(
location,
issueid=issueid,
filename=os.path.join(self.nzb_folder, ofilename),
)
except ImportError:
logger.warn(
"%s comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/"
% module
)
metaresponse = "fail"
if metaresponse == "fail":
logger.fdebug(
"%s Unable to write metadata successfully - check mylar.log file. Attempting to continue without metatagging..."
% module
)
elif any(
[metaresponse == "unrar error", metaresponse == "corrupt"]
):
logger.error(
"%s This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and retrying it."
% module
)
# launch failed download handling here.
elif metaresponse.startswith("file not found"):
filename_in_error = metaresponse.split("||")[1]
self._log(
"The file cannot be found in the location provided for metatagging [%s]. Please verify it exists, and re-run if necessary."
% filename_in_error
)
logger.error(
"%s The file cannot be found in the location provided for metagging [%s]. Please verify it exists, and re-run if necessary."
% (module, filename_in_error)
)
else:
odir = os.path.split(metaresponse)[0]
ofilename = os.path.split(metaresponse)[1]
ext = os.path.splitext(metaresponse)[1]
logger.info(
"%s Sucessfully wrote metadata to .cbz (%s) - Continuing.."
% (module, ofilename)
)
self._log(
"Sucessfully wrote metadata to .cbz (%s) - proceeding..."
% ofilename
)
dfilename = ofilename
if metaresponse:
src_location = odir
else:
src_location = location
grab_src = os.path.join(src_location, ofilename)
self._log("Source Path : %s" % grab_src)
logger.info("%s Source Path : %s" % (module, grab_src))
checkdirectory = filechecker.validateAndCreateDirectory(
grdst, True, module=module
)
if not checkdirectory:
logger.warn(
"%s Error trying to validate/create directory. Aborting this process at this time."
% module
)
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
# send to renamer here if valid.
if mylar.CONFIG.RENAME_FILES:
renamed_file = helpers.rename_param(
comicid,
comicname,
issuenumber,
dfilename,
issueid=issueid,
arc=sarc,
)
if renamed_file:
dfilename = renamed_file["nfilename"]
logger.fdebug(
"%s Renaming file to conform to configuration: %s"
% (module, dfilename)
)
if sandwich is not None and "S" in sandwich:
# if from a StoryArc, check to see if we're appending the ReadingOrder to the filename
if mylar.CONFIG.READ2FILENAME:
logger.fdebug(
"%s readingorder#: %s"
% (module, arcdata["ReadingOrder"])
)
if int(arcdata["ReadingOrder"]) < 10:
readord = "00" + str(arcdata["ReadingOrder"])
elif (
int(arcdata["ReadingOrder"]) >= 10
and int(arcdata["ReadingOrder"]) <= 99
):
readord = "0" + str(arcdata["ReadingOrder"])
else:
readord = str(arcdata["ReadingOrder"])
dfilename = str(readord) + "-" + dfilename
else:
dfilename = ofilename
grab_dst = os.path.join(grdst, dfilename)
else:
grab_dst = os.path.join(grdst, dfilename)
if not os.path.exists(grab_dst) or grab_src == grab_dst:
# if it hits this, ofilename is the full path so we need to extract just the filename to path it back to a possible grab_bag dir
grab_dst = os.path.join(grdst, os.path.split(dfilename)[1])
self._log("Destination Path : %s" % grab_dst)
logger.info("%s Destination Path : %s" % (module, grab_dst))
logger.info(
"%s[%s] %s into directory : %s"
% (module, mylar.CONFIG.FILE_OPTS, ofilename, grab_dst)
)
try:
checkspace = helpers.get_free_space(grdst)
if checkspace is False:
if all(
[metaresponse != "fail", metaresponse is not None]
): # meta was done
self.tidyup(src_location, True, cacheonly=True)
raise OSError
fileoperation = helpers.file_ops(grab_src, grab_dst)
if not fileoperation:
raise OSError
except Exception as e:
logger.error(
"%s Failed to %s %s: %s"
% (module, mylar.CONFIG.FILE_OPTS, grab_src, e)
)
self._log(
"Failed to %s %s: %s"
% (mylar.CONFIG.FILE_OPTS, grab_src, e)
)
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
# tidyup old path
if any(
[
mylar.CONFIG.FILE_OPTS == "move",
mylar.CONFIG.FILE_OPTS == "copy",
]
):
self.tidyup(src_location, True, filename=orig_filename)
# delete entry from nzblog table
myDB.action("DELETE from nzblog WHERE issueid=?", [issueid])
if sandwich is not None and "S" in sandwich:
logger.info("%s IssueArcID is : %s" % (module, issuearcid))
ctrlVal = {"IssueArcID": issuearcid}
newVal = {"Status": "Downloaded", "Location": grab_dst}
myDB.upsert("storyarcs", newVal, ctrlVal)
logger.info("%s Updated status to Downloaded" % module)
logger.info(
"%s Post-Processing completed for: [%s] %s"
% (module, sarc, grab_dst)
)
self._log("Post Processing SUCCESSFUL! ")
elif oneoff is True:
logger.info("%s IssueID is : %s" % (module, issueid))
ctrlVal = {"IssueID": issueid}
newVal = {"Status": "Downloaded"}
logger.info(
"%s Writing to db: %s -- %s" % (module, newVal, ctrlVal)
)
myDB.upsert("weekly", newVal, ctrlVal)
logger.info("%s Updated status to Downloaded" % module)
myDB.upsert("oneoffhistory", newVal, ctrlVal)
logger.info(
"%s Updated history for one-off's for tracking purposes"
% module
)
logger.info(
"%s Post-Processing completed for: [ %s #%s ] %s"
% (module, comicname, issuenumber, grab_dst)
)
self._log("Post Processing SUCCESSFUL! ")
imageUrl = myDB.select(
"SELECT ImageURL from issues WHERE IssueID=?", [issueid]
)
if imageUrl:
imageUrl = imageUrl[0][0]
try:
self.sendnotify(
comicname,
issueyear=None,
issuenumOG=issuenumber,
annchk=annchk,
module=module,
imageUrl=imageUrl,
)
except:
pass
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
else:
try:
len(manual_arclist)
except:
manual_arclist = []
if tinfo["comiclocation"] is None:
cloc = self.nzb_folder
else:
cloc = tinfo["comiclocation"]
clocation = cloc
if os.path.isdir(cloc):
for root, dirnames, filenames in os.walk(
cloc, followlinks=True
):
for filename in filenames:
if filename.lower().endswith(self.extensions):
clocation = os.path.join(root, filename)
manual_list = {
"ComicID": tinfo["comicid"],
"IssueID": tinfo["issueid"],
"ComicLocation": clocation,
"SARC": tinfo["sarc"],
"IssueArcID": issuearcid,
"ComicName": tinfo["comicname"],
"IssueNumber": tinfo["issuenumber"],
"Publisher": tinfo["publisher"],
"OneOff": tinfo["oneoff"],
"ForcedMatch": False,
}
else:
manual_list = manual
if self.nzb_name == "Manual Run":
# loop through the hits here.
if len(manual_list) == 0 and len(manual_arclist) == 0:
logger.info("%s No matches for Manual Run ... exiting." % module)
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
elif len(manual_arclist) > 0 and len(manual_list) == 0:
logger.info(
"%s Manual post-processing completed for %s story-arc issues."
% (module, len(manual_arclist))
)
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
elif len(manual_arclist) > 0:
logger.info(
"%s Manual post-processing completed for %s story-arc issues."
% (module, len(manual_arclist))
)
i = 0
for ml in manual_list:
i += 1
comicid = ml["ComicID"]
issueid = ml["IssueID"]
issuenumOG = ml["IssueNumber"]
# check to see if file is still being written to.
while True:
waiting = False
try:
ctime = max(
os.path.getctime(ml["ComicLocation"]),
os.path.getmtime(ml["ComicLocation"]),
)
if time.time() > ctime > time.time() - 10:
time.sleep(max(time.time() - ctime, 0))
waiting = True
else:
break
except:
# file is no longer present in location / can't be accessed.
break
dupthis = helpers.duplicate_filecheck(
ml["ComicLocation"], ComicID=comicid, IssueID=issueid
)
if dupthis["action"] == "dupe_src" or dupthis["action"] == "dupe_file":
# check if duplicate dump folder is enabled and if so move duplicate file in there for manual intervention.
#'dupe_file' - do not write new file as existing file is better quality
# check if duplicate dump folder is enabled and if so move duplicate file in there for manual intervention.
#'dupe_file' - do not write new file as existing file is better quality
#'dupe_src' - write new file, as existing file is a lesser quality (dupe)
if mylar.CONFIG.DDUMP and not all(
[
mylar.CONFIG.DUPLICATE_DUMP is None,
mylar.CONFIG.DUPLICATE_DUMP == "",
]
): # DUPLICATE_DUMP
dupchkit = self.duplicate_process(dupthis)
if dupchkit == False:
logger.warn(
"Unable to move duplicate file - skipping post-processing of this file."
)
continue
if any([dupthis["action"] == "write", dupthis["action"] == "dupe_src"]):
stat = " [%s/%s]" % (i, len(manual_list))
self.Process_next(comicid, issueid, issuenumOG, ml, stat)
dupthis = None
if self.failed_files == 0:
logger.info(
"%s Manual post-processing completed for %s issues." % (module, i)
)
else:
logger.info(
"%s Manual post-processing completed for %s issues [FAILED: %s]"
% (module, i, self.failed_files)
)
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
else:
comicid = issuenzb["ComicID"]
issuenumOG = issuenzb["Issue_Number"]
# the self.nzb_folder should contain only the existing filename
dupthis = helpers.duplicate_filecheck(
self.nzb_folder, ComicID=comicid, IssueID=issueid
)
if dupthis["action"] == "dupe_src" or dupthis["action"] == "dupe_file":
# check if duplicate dump folder is enabled and if so move duplicate file in there for manual intervention.
#'dupe_file' - do not write new file as existing file is better quality
#'dupe_src' - write new file, as existing file is a lesser quality (dupe)
if mylar.CONFIG.DUPLICATE_DUMP:
if mylar.CONFIG.DDUMP and not all(
[
mylar.CONFIG.DUPLICATE_DUMP is None,
mylar.CONFIG.DUPLICATE_DUMP == "",
]
):
dupchkit = self.duplicate_process(dupthis)
if dupchkit == False:
logger.warn(
"Unable to move duplicate file - skipping post-processing of this file."
)
self.valreturn.append(
{
"self.log": self.log,
"mode": "stop",
"issueid": issueid,
"comicid": comicid,
}
)
return self.queue.put(self.valreturn)
if dupthis["action"] == "write" or dupthis["action"] == "dupe_src":
if manual_list is None:
return self.Process_next(comicid, issueid, issuenumOG)
else:
logger.info(
"Post-processing issue is found in more than one destination - let us do this!"
)
return self.Process_next(comicid, issueid, issuenumOG, manual_list)
else:
self.valreturn.append(
{
"self.log": self.log,
"mode": "stop",
"issueid": issueid,
"comicid": comicid,
}
)
return self.queue.put(self.valreturn)
def Process_next(self, comicid, issueid, issuenumOG, ml=None, stat=None):
if stat is None:
stat = " [1/1]"
module = self.module
annchk = "no"
snatchedtorrent = False
myDB = db.DBConnection()
comicnzb = myDB.selectone(
"SELECT * from comics WHERE comicid=?", [comicid]
).fetchone()
issuenzb = myDB.selectone(
"SELECT * from issues WHERE issueid=? AND comicid=? AND ComicName NOT NULL",
[issueid, comicid],
).fetchone()
if ml is not None and mylar.CONFIG.SNATCHEDTORRENT_NOTIFY:
snatchnzb = myDB.selectone(
"SELECT * from snatched WHERE IssueID=? AND ComicID=? AND (provider=? OR provider=? OR provider=? OR provider=?) AND Status='Snatched'",
[issueid, comicid, "TPSE", "DEM", "WWT", "32P"],
).fetchone()
if snatchnzb is None:
logger.fdebug(
"%s Was not snatched as a torrent. Using manual post-processing."
% module
)
else:
logger.fdebug(
"%s Was downloaded from %s. Enabling torrent manual post-processing completion notification."
% (module, snatchnzb["Provider"])
)
if issuenzb is None:
issuenzb = myDB.selectone(
"SELECT * from annuals WHERE issueid=? and comicid=?",
[issueid, comicid],
).fetchone()
annchk = "yes"
if annchk == "no":
logger.info(
"%s %s Starting Post-Processing for %s issue: %s"
% (module, stat, issuenzb["ComicName"], issuenzb["Issue_Number"])
)
else:
logger.info(
"%s %s Starting Post-Processing for %s issue: %s"
% (module, stat, issuenzb["ReleaseComicName"], issuenzb["Issue_Number"])
)
logger.fdebug("%s issueid: %s" % (module, issueid))
logger.fdebug("%s issuenumOG: %s" % (module, issuenumOG))
# issueno = str(issuenum).split('.')[0]
# new CV API - removed all decimals...here we go AGAIN!
issuenum = issuenzb["Issue_Number"]
issue_except = "None"
if "au" in issuenum.lower() and issuenum[:1].isdigit():
issuenum = re.sub("[^0-9]", "", issuenum)
issue_except = " AU"
elif "ai" in issuenum.lower() and issuenum[:1].isdigit():
issuenum = re.sub("[^0-9]", "", issuenum)
issue_except = " AI"
elif "inh" in issuenum.lower() and issuenum[:1].isdigit():
issuenum = re.sub("[^0-9]", "", issuenum)
issue_except = ".INH"
elif "now" in issuenum.lower() and issuenum[:1].isdigit():
if "!" in issuenum:
issuenum = re.sub("\!", "", issuenum)
issuenum = re.sub("[^0-9]", "", issuenum)
issue_except = ".NOW"
elif "mu" in issuenum.lower() and issuenum[:1].isdigit():
issuenum = re.sub("[^0-9]", "", issuenum)
issue_except = ".MU"
elif "hu" in issuenum.lower() and issuenum[:1].isdigit():
issuenum = re.sub("[^0-9]", "", issuenum)
issue_except = ".HU"
elif "\xbd" in issuenum:
issuenum = "0.5"
elif "\xbc" in issuenum:
issuenum = "0.25"
elif "\xbe" in issuenum:
issuenum = "0.75"
elif "\u221e" in issuenum:
# issnum = utf-8 will encode the infinity symbol without any help
issuenum = "infinity"
else:
issue_exceptions = ["A", "B", "C", "X", "O"]
exceptionmatch = [
x for x in issue_exceptions if x.lower() in issuenum.lower()
]
if exceptionmatch:
logger.fdebug("[FILECHECKER] We matched on : " + str(exceptionmatch))
for x in exceptionmatch:
issuenum = re.sub("[^0-9]", "", issuenum)
issue_except = x
if "." in issuenum:
iss_find = issuenum.find(".")
iss_b4dec = issuenum[:iss_find]
if iss_b4dec == "":
iss_b4dec = "0"
iss_decval = issuenum[iss_find + 1 :]
if iss_decval.endswith("."):
iss_decval = iss_decval[:-1]
if int(iss_decval) == 0:
iss = iss_b4dec
issdec = int(iss_decval)
issueno = str(iss)
self._log("Issue Number: %s" % issueno)
logger.fdebug("%s Issue Number: %s" % (module, issueno))
else:
if len(iss_decval) == 1:
iss = iss_b4dec + "." + iss_decval
issdec = int(iss_decval) * 10
else:
iss = iss_b4dec + "." + iss_decval.rstrip("0")
issdec = int(iss_decval.rstrip("0")) * 10
issueno = iss_b4dec
self._log("Issue Number: %s" % iss)
logger.fdebug("%s Issue Number: %s" % (module, iss))
else:
iss = issuenum
issueno = iss
# issue zero-suppression here
if mylar.CONFIG.ZERO_LEVEL == "0":
zeroadd = ""
else:
if mylar.CONFIG.ZERO_LEVEL_N == "none":
zeroadd = ""
elif mylar.CONFIG.ZERO_LEVEL_N == "0x":
zeroadd = "0"
elif mylar.CONFIG.ZERO_LEVEL_N == "00x":
zeroadd = "00"
logger.fdebug(
"%s Zero Suppression set to : %s" % (module, mylar.CONFIG.ZERO_LEVEL_N)
)
prettycomiss = None
if issueno.isalpha():
logger.fdebug("issue detected as an alpha.")
prettycomiss = str(issueno)
else:
try:
x = float(issueno)
# validity check
if x < 0:
logger.info(
"%s I've encountered a negative issue #: %s. Trying to accomodate"
% (module, issueno)
)
prettycomiss = "-%s%s" % (zeroadd, issueno[1:])
elif x >= 0:
pass
else:
raise ValueError
except ValueError, e:
logger.warn(
"Unable to properly determine issue number [%s] - you should probably log this on github for help."
% issueno
)
return
if prettycomiss is None and len(str(issueno)) > 0:
# if int(issueno) < 0:
# self._log("issue detected is a negative")
# prettycomiss = '-' + str(zeroadd) + str(abs(issueno))
if int(issueno) < 10:
logger.fdebug("issue detected less than 10")
if "." in iss:
if int(iss_decval) > 0:
issueno = str(iss)
prettycomiss = str(zeroadd) + str(iss)
else:
prettycomiss = str(zeroadd) + str(int(issueno))
else:
prettycomiss = str(zeroadd) + str(iss)
if issue_except != "None":
prettycomiss = str(prettycomiss) + issue_except
logger.fdebug(
"%s Zero level supplement set to %s. Issue will be set as : %s"
% (module, mylar.CONFIG.ZERO_LEVEL_N, prettycomiss)
)
elif int(issueno) >= 10 and int(issueno) < 100:
logger.fdebug("issue detected greater than 10, but less than 100")
if mylar.CONFIG.ZERO_LEVEL_N == "none":
zeroadd = ""
else:
zeroadd = "0"
if "." in iss:
if int(iss_decval) > 0:
issueno = str(iss)
prettycomiss = str(zeroadd) + str(iss)
else:
prettycomiss = str(zeroadd) + str(int(issueno))
else:
prettycomiss = str(zeroadd) + str(iss)
if issue_except != "None":
prettycomiss = str(prettycomiss) + issue_except
logger.fdebug(
"%s Zero level supplement set to %s. Issue will be set as : %s"
% (module, mylar.CONFIG.ZERO_LEVEL_N, prettycomiss)
)
else:
logger.fdebug("issue detected greater than 100")
if "." in iss:
if int(iss_decval) > 0:
issueno = str(iss)
prettycomiss = str(issueno)
if issue_except != "None":
prettycomiss = str(prettycomiss) + issue_except
logger.fdebug(
"%s Zero level supplement set to %s. Issue will be set as : %s"
% (module, mylar.CONFIG.ZERO_LEVEL_N, prettycomiss)
)
elif len(str(issueno)) == 0:
prettycomiss = str(issueno)
logger.fdebug(
"issue length error - cannot determine length. Defaulting to None: %s "
% prettycomiss
)
if annchk == "yes":
self._log("Annual detected.")
logger.fdebug("%s Pretty Comic Issue is : %s" % (module, prettycomiss))
issueyear = issuenzb["IssueDate"][:4]
self._log("Issue Year: %s" % issueyear)
logger.fdebug("%s Issue Year : %s" % (module, issueyear))
month = issuenzb["IssueDate"][5:7].replace("-", "").strip()
month_name = helpers.fullmonth(month)
if month_name is None:
month_name = "None"
publisher = comicnzb["ComicPublisher"]
self._log("Publisher: %s" % publisher)
logger.fdebug("%s Publisher: %s" % (module, publisher))
# we need to un-unicode this to make sure we can write the filenames properly for spec.chars
series = comicnzb["ComicName"].encode("ascii", "ignore").strip()
self._log("Series: %s" % series)
logger.fdebug("%s Series: %s" % (module, series))
if (
comicnzb["AlternateFileName"] is None
or comicnzb["AlternateFileName"] == "None"
):
seriesfilename = series
else:
seriesfilename = (
comicnzb["AlternateFileName"].encode("ascii", "ignore").strip()
)
logger.fdebug(
"%s Alternate File Naming has been enabled for this series. Will rename series to : %s"
% (module, seriesfilename)
)
seriesyear = comicnzb["ComicYear"]
self._log("Year: %s" % seriesyear)
logger.fdebug("%s Year: %s" % (module, seriesyear))
comlocation = comicnzb["ComicLocation"]
self._log("Comic Location: %s" % comlocation)
logger.fdebug("%s Comic Location: %s" % (module, comlocation))
comversion = comicnzb["ComicVersion"]
self._log("Comic Version: %s" % comversion)
logger.fdebug("%s Comic Version: %s" % (module, comversion))
if comversion is None:
comversion = "None"
# if comversion is None, remove it so it doesn't populate with 'None'
if comversion == "None":
chunk_f_f = re.sub("\$VolumeN", "", mylar.CONFIG.FILE_FORMAT)
chunk_f = re.compile(r"\s+")
chunk_file_format = chunk_f.sub(" ", chunk_f_f)
self._log(
"No version # found for series - tag will not be available for renaming."
)
logger.fdebug(
"%s No version # found for series, removing from filename" % module
)
logger.fdebug("%s New format is now: %s" % (module, chunk_file_format))
else:
chunk_file_format = mylar.CONFIG.FILE_FORMAT
if annchk == "no":
chunk_f_f = re.sub("\$Annual", "", chunk_file_format)
chunk_f = re.compile(r"\s+")
chunk_file_format = chunk_f.sub(" ", chunk_f_f)
logger.fdebug(
"%s Not an annual - removing from filename parameters" % module
)
logger.fdebug("%s New format: %s" % (module, chunk_file_format))
else:
logger.fdebug("%s Chunk_file_format is: %s" % (module, chunk_file_format))
if "$Annual" not in chunk_file_format:
# if it's an annual, but $Annual isn't specified in file_format, we need to
# force it in there, by default in the format of $Annual $Issue
prettycomiss = "Annual %s" % prettycomiss
logger.fdebug("%s prettycomiss: %s" % (module, prettycomiss))
ofilename = None
# if it's a Manual Run, use the ml['ComicLocation'] for the exact filename.
if ml is None:
importissue = False
for root, dirnames, filenames in os.walk(self.nzb_folder, followlinks=True):
for filename in filenames:
if filename.lower().endswith(self.extensions):
odir = root
logger.fdebug("%s odir (root): %s" % (module, odir))
ofilename = filename
logger.fdebug("%s ofilename: %s" % (module, ofilename))
path, ext = os.path.splitext(ofilename)
try:
if odir is None:
logger.fdebug("%s No root folder set." % module)
odir = self.nzb_folder
except:
logger.error(
"%s unable to set root folder. Forcing it due to some error above most likely."
% module
)
if os.path.isfile(self.nzb_folder) and self.nzb_folder.lower().endswith(
self.extensions
):
import ntpath
odir, ofilename = ntpath.split(self.nzb_folder)
path, ext = os.path.splitext(ofilename)
importissue = True
else:
odir = self.nzb_folder
if ofilename is None:
self._log(
"Unable to locate a valid cbr/cbz file. Aborting post-processing for this filename."
)
logger.error(
"%s unable to locate a valid cbr/cbz file. Aborting post-processing for this filename."
% module
)
self.failed_files += 1
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
logger.fdebug("%s odir: %s" % (module, odir))
logger.fdebug("%s ofilename: %s" % (module, ofilename))
# if meta-tagging is not enabled, we need to declare the check as being fail
# if meta-tagging is enabled, it gets changed just below to a default of pass
pcheck = "fail"
# make sure we know any sub-folder off of self.nzb_folder that is being used so when we do
# tidy-up we can remove the empty directory too. odir is the original COMPLETE path at this point
if ml is None:
subpath = odir
orig_filename = ofilename
crcvalue = helpers.crc(os.path.join(odir, ofilename))
else:
subpath, orig_filename = os.path.split(ml["ComicLocation"])
crcvalue = helpers.crc(ml["ComicLocation"])
# tag the meta.
if mylar.CONFIG.ENABLE_META:
self._log("Metatagging enabled - proceeding...")
logger.fdebug("%s Metatagging enabled - proceeding..." % module)
pcheck = "pass"
if mylar.CONFIG.CMTAG_START_YEAR_AS_VOLUME:
vol_label = seriesyear
else:
vol_label = comversion
try:
import cmtagmylar
if ml is None:
pcheck = cmtagmylar.run(
self.nzb_folder,
issueid=issueid,
comversion=vol_label,
filename=os.path.join(odir, ofilename),
)
else:
pcheck = cmtagmylar.run(
self.nzb_folder,
issueid=issueid,
comversion=vol_label,
manual="yes",
filename=ml["ComicLocation"],
)
except ImportError:
logger.fdebug(
"%s comictaggerlib not found on system. Ensure the ENTIRE lib directory is located within mylar/lib/comictaggerlib/"
% module
)
logger.fdebug(
"%s continuing with PostProcessing, but I am not using metadata."
% module
)
pcheck = "fail"
if pcheck == "fail":
self._log(
"Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging..."
)
logger.fdebug(
"%s Unable to write metadata successfully - check mylar.log file. Attempting to continue without tagging..."
% module
)
self.failed_files += 1
# we need to set this to the cbz file since not doing it will result in nothing getting moved.
# not sure how to do this atm
elif any([pcheck == "unrar error", pcheck == "corrupt"]):
if ml is not None:
self._log(
"This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and not post-processing."
)
logger.error(
"%s This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and not post-processing."
% module
)
self.failed_files += 1
self.valreturn.append({"self.log": self.log, "mode": "stop"})
else:
self._log(
"This is a corrupt archive - whether CRC errors or it's incomplete. Marking as BAD, and retrying a different copy."
)
logger.error(
"%s This is a corrupt archive - whether CRC errors or it is incomplete. Marking as BAD, and retrying a different copy."
% module
)
self.valreturn.append(
{
"self.log": self.log,
"mode": "fail",
"issueid": issueid,
"comicid": comicid,
"comicname": comicnzb["ComicName"],
"issuenumber": issuenzb["Issue_Number"],
"annchk": annchk,
}
)
return self.queue.put(self.valreturn)
elif pcheck.startswith("file not found"):
filename_in_error = pcheck.split("||")[1]
self._log(
"The file cannot be found in the location provided [%s]. Please verify it exists, and re-run if necessary. Aborting."
% filename_in_error
)
logger.error(
"%s The file cannot be found in the location provided [%s]. Please verify it exists, and re-run if necessary. Aborting"
% (module, filename_in_error)
)
self.failed_files += 1
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
else:
# need to set the filename source as the new name of the file returned from comictagger.
odir = os.path.split(pcheck)[0]
ofilename = os.path.split(pcheck)[1]
ext = os.path.splitext(ofilename)[1]
self._log("Sucessfully wrote metadata to .cbz - Continuing..")
logger.info(
"%s Sucessfully wrote metadata to .cbz (%s) - Continuing.."
% (module, ofilename)
)
# Run Pre-script
if mylar.CONFIG.ENABLE_PRE_SCRIPTS:
nzbn = self.nzb_name # original nzb name
nzbf = self.nzb_folder # original nzb folder
# name, comicyear, comicid , issueid, issueyear, issue, publisher
# create the dic and send it.
seriesmeta = []
seriesmetadata = {}
seriesmeta.append(
{
"name": series,
"comicyear": seriesyear,
"comicid": comicid,
"issueid": issueid,
"issueyear": issueyear,
"issue": issuenum,
"publisher": publisher,
}
)
seriesmetadata["seriesmeta"] = seriesmeta
self._run_pre_scripts(nzbn, nzbf, seriesmetadata)
file_values = {
"$Series": seriesfilename,
"$Issue": prettycomiss,
"$Year": issueyear,
"$series": series.lower(),
"$Publisher": publisher,
"$publisher": publisher.lower(),
"$VolumeY": "V" + str(seriesyear),
"$VolumeN": comversion,
"$monthname": month_name,
"$month": month,
"$Annual": "Annual",
}
if ml:
if pcheck == "fail":
odir, ofilename = os.path.split(ml["ComicLocation"])
orig_filename = ofilename
elif pcheck:
# odir, ofilename already set. Carry it through.
pass
else:
odir, orig_filename = os.path.split(ml["ComicLocation"])
logger.fdebug("%s ofilename: %s" % (module, ofilename))
if any([ofilename == odir, ofilename == odir[:-1], ofilename == ""]):
self._log(
"There was a problem deciphering the filename/directory - please verify that the filename : [%s] exists in location [%s]. Aborting."
% (ofilename, odir)
)
logger.error(
module
+ " There was a problem deciphering the filename/directory - please verify that the filename : [%s] exists in location [%s]. Aborting."
% (ofilename, odir)
)
self.failed_files += 1
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
logger.fdebug("%s odir: %s" % (module, odir))
logger.fdebug("%s ofilename: %s" % (module, ofilename))
ext = os.path.splitext(ofilename)[1]
logger.fdebug("%s ext: %s" % (module, ext))
if ofilename is None or ofilename == "":
logger.error(
"%s Aborting PostProcessing - the filename does not exist in the location given. Make sure that %s exists and is the correct location."
% (module, self.nzb_folder)
)
self.failed_files += 1
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
self._log("Original Filename: %s [%s]" % (orig_filename, ext))
logger.fdebug("%s Original Filename: %s [%s]" % (module, orig_filename, ext))
if mylar.CONFIG.FILE_FORMAT == "" or not mylar.CONFIG.RENAME_FILES:
self._log("Rename Files isn't enabled...keeping original filename.")
logger.fdebug(
"%s Rename Files is not enabled - keeping original filename." % module
)
# check if extension is in nzb_name - will screw up otherwise
if ofilename.lower().endswith(self.extensions):
nfilename = ofilename[:-4]
else:
nfilename = ofilename
else:
nfilename = helpers.replace_all(chunk_file_format, file_values)
if mylar.CONFIG.REPLACE_SPACES:
# mylar.CONFIG.REPLACE_CHAR ...determines what to replace spaces with underscore or dot
nfilename = nfilename.replace(" ", mylar.CONFIG.REPLACE_CHAR)
nfilename = re.sub("[\,\:\?\"']", "", nfilename)
nfilename = re.sub("[\/\*]", "-", nfilename)
if ml is not None and ml["ForcedMatch"] is True:
xyb = nfilename.find("[__")
if xyb != -1:
yyb = nfilename.find("__]", xyb)
if yyb != -1:
rem_issueid = nfilename[xyb + 3 : yyb]
logger.fdebug("issueid: %s" % rem_issueid)
nfilename = "%s %s".strip() % (
nfilename[:xyb],
nfilename[yyb + 3 :],
)
logger.fdebug(
"issueid information [%s] removed successfully: %s"
% (rem_issueid, nfilename)
)
self._log("New Filename: %s" % nfilename)
logger.fdebug("%s New Filename: %s" % (module, nfilename))
src = os.path.join(odir, ofilename)
checkdirectory = filechecker.validateAndCreateDirectory(
comlocation, True, module=module
)
if not checkdirectory:
logger.warn(
"%s Error trying to validate/create directory. Aborting this process at this time."
% module
)
self.failed_files += 1
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
if mylar.CONFIG.LOWERCASE_FILENAMES:
dst = os.path.join(comlocation, (nfilename + ext).lower())
else:
dst = os.path.join(comlocation, (nfilename + ext.lower()))
self._log("Source: %s" % src)
self._log("Destination: %s" % dst)
logger.fdebug("%s Source: %s" % (module, src))
logger.fdebug("%s Destination: %s" % (module, dst))
if ml is None:
# downtype = for use with updater on history table to set status to 'Downloaded'
downtype = "True"
# non-manual run moving/deleting...
logger.fdebug("%s self.nzb_folder: %s" % (module, self.nzb_folder))
logger.fdebug("%s odir: %s" % (module, odir))
logger.fdebug("%s ofilename: %s" % (module, ofilename))
logger.fdebug("%s nfilename: %s" % (module, nfilename + ext))
if mylar.CONFIG.RENAME_FILES:
if ofilename != (nfilename + ext):
logger.fdebug(
"%s Renaming %s ..to.. %s"
% (
module,
os.path.join(odir, ofilename),
os.path.join(odir, nfilename + ext),
)
)
else:
logger.fdebug(
"%s Filename is identical as original, not renaming." % module
)
src = os.path.join(odir, ofilename)
try:
self._log("[%s] %s - to - %s" % (mylar.CONFIG.FILE_OPTS, src, dst))
checkspace = helpers.get_free_space(comlocation)
if checkspace is False:
if all([pcheck is not None, pcheck != "fail"]): # meta was done
self.tidyup(odir, True, cacheonly=True)
raise OSError
fileoperation = helpers.file_ops(src, dst)
if not fileoperation:
raise OSError
except Exception as e:
self._log(
"Failed to %s %s - check log for exact error."
% (mylar.CONFIG.FILE_OPTS, src)
)
self._log("Post-Processing ABORTED.")
logger.error(
"%s Failed to %s %s: %s" % (module, mylar.CONFIG.FILE_OPTS, src, e)
)
logger.error("%s Post-Processing ABORTED" % module)
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
# tidyup old path
if any(
[mylar.CONFIG.FILE_OPTS == "move", mylar.CONFIG.FILE_OPTS == "copy"]
):
self.tidyup(odir, True, filename=orig_filename)
else:
# downtype = for use with updater on history table to set status to 'Post-Processed'
downtype = "PP"
# Manual Run, this is the portion.
src = os.path.join(odir, ofilename)
if mylar.CONFIG.RENAME_FILES:
if ofilename != (nfilename + ext):
logger.fdebug(
"%s Renaming %s ..to.. %s"
% (
module,
os.path.join(odir, ofilename),
os.path.join(odir, self.nzb_folder, str(nfilename + ext)),
)
)
else:
logger.fdebug(
"%s Filename is identical as original, not renaming." % module
)
logger.fdebug("%s odir src : %s" % (module, src))
logger.fdebug(
"%s[%s] %s ... to ... %s" % (module, mylar.CONFIG.FILE_OPTS, src, dst)
)
try:
checkspace = helpers.get_free_space(comlocation)
if checkspace is False:
if all([pcheck != "fail", pcheck is not None]): # meta was done
self.tidyup(odir, True, cacheonly=True)
raise OSError
fileoperation = helpers.file_ops(src, dst)
if not fileoperation:
raise OSError
except Exception as e:
logger.error(
"%s Failed to %s %s: %s" % (module, mylar.CONFIG.FILE_OPTS, src, e)
)
logger.error("%s Post-Processing ABORTED." % module)
self.failed_files += 1
self.valreturn.append({"self.log": self.log, "mode": "stop"})
return self.queue.put(self.valreturn)
logger.info(
"%s %s successful to : %s" % (module, mylar.CONFIG.FILE_OPTS, dst)
)
if any(
[mylar.CONFIG.FILE_OPTS == "move", mylar.CONFIG.FILE_OPTS == "copy"]
):
self.tidyup(odir, True, subpath, filename=orig_filename)
# Hopefully set permissions on downloaded file
if mylar.CONFIG.ENFORCE_PERMS:
if mylar.OS_DETECT != "windows":
filechecker.setperms(dst.rstrip())
else:
try:
permission = int(mylar.CONFIG.CHMOD_FILE, 8)
os.umask(0)
os.chmod(dst.rstrip(), permission)
except OSError:
logger.error(
"%s Failed to change file permissions. Ensure that the user running Mylar has proper permissions to change permissions in : %s"
% (module, dst)
)
logger.fdebug(
"%s Continuing post-processing but unable to change file permissions in %s"
% (module, dst)
)
# let's reset the fileop to the original setting just in case it's a manual pp run
if mylar.CONFIG.FILE_OPTS == "copy":
self.fileop = shutil.copy
else:
self.fileop = shutil.move
# delete entry from nzblog table
myDB.action("DELETE from nzblog WHERE issueid=?", [issueid])
updater.totals(comicid, havefiles="+1", issueid=issueid, file=dst)
# update snatched table to change status to Downloaded
if annchk == "no":
updater.foundsearch(
comicid, issueid, down=downtype, module=module, crc=crcvalue
)
dispiss = "#%s" % issuenumOG
updatetable = "issues"
else:
updater.foundsearch(
comicid,
issueid,
mode="want_ann",
down=downtype,
module=module,
crc=crcvalue,
)
if "annual" in issuenzb["ReleaseComicName"].lower(): # series.lower():
dispiss = "Annual #%s" % issuenumOG
elif "special" in issuenzb["ReleaseComicName"].lower():
dispiss = "Special #%s" % issuenumOG
else:
dispiss = "#%s" % issuenumOG
updatetable = "annuals"
logger.fdebug("[annchk:%s] issue to update: %s" % (annchk, dispiss))
# new method for updating status after pp
if os.path.isfile(dst):
ctrlVal = {"IssueID": issueid}
newVal = {"Status": "Downloaded", "Location": os.path.basename(dst)}
logger.fdebug("writing: %s -- %s" % (newVal, ctrlVal))
myDB.upsert(updatetable, newVal, ctrlVal)
try:
if ml["IssueArcID"]:
logger.info("Watchlist Story Arc match detected.")
logger.info(ml)
arcinfo = myDB.selectone(
"SELECT * FROM storyarcs where IssueArcID=?", [ml["IssueArcID"]]
).fetchone()
if arcinfo is None:
logger.warn(
"Unable to locate IssueID within givin Story Arc. Ensure everything is up-to-date (refreshed) for the Arc."
)
else:
if mylar.CONFIG.COPY2ARCDIR is True:
if arcinfo["Publisher"] is None:
arcpub = arcinfo["IssuePublisher"]
else:
arcpub = arcinfo["Publisher"]
grdst = helpers.arcformat(
arcinfo["StoryArc"],
helpers.spantheyears(arcinfo["StoryArcID"]),
arcpub,
)
logger.info("grdst:" + grdst)
checkdirectory = filechecker.validateAndCreateDirectory(
grdst, True, module=module
)
if not checkdirectory:
logger.warn(
"%s Error trying to validate/create directory. Aborting this process at this time."
% module
)
self.valreturn.append(
{"self.log": self.log, "mode": "stop"}
)
return self.queue.put(self.valreturn)
if mylar.CONFIG.READ2FILENAME:
logger.fdebug(
"%s readingorder#: %s"
% (module, arcinfo["ReadingOrder"])
)
if int(arcinfo["ReadingOrder"]) < 10:
readord = "00" + str(arcinfo["ReadingOrder"])
elif (
int(arcinfo["ReadingOrder"]) >= 10
and int(arcinfo["ReadingOrder"]) <= 99
):
readord = "0" + str(arcinfo["ReadingOrder"])
else:
readord = str(arcinfo["ReadingOrder"])
dfilename = str(readord) + "-" + os.path.split(dst)[1]
else:
dfilename = os.path.split(dst)[1]
grab_dst = os.path.join(grdst, dfilename)
logger.fdebug("%s Destination Path : %s" % (module, grab_dst))
grab_src = dst
logger.fdebug("%s Source Path : %s" % (module, grab_src))
logger.info(
"%s[%s] %s into directory: %s"
% (module, mylar.CONFIG.ARC_FILEOPS.upper(), dst, grab_dst)
)
try:
# need to ensure that src is pointing to the series in order to do a soft/hard-link properly
checkspace = helpers.get_free_space(grdst)
if checkspace is False:
raise OSError
fileoperation = helpers.file_ops(
grab_src, grab_dst, arc=True
)
if not fileoperation:
raise OSError
except Exception as e:
logger.error(
"%s Failed to %s %s: %s"
% (module, mylar.CONFIG.ARC_FILEOPS, grab_src, e)
)
return
else:
grab_dst = dst
# delete entry from nzblog table in case it was forced via the Story Arc Page
IssArcID = "S" + str(ml["IssueArcID"])
myDB.action(
"DELETE from nzblog WHERE IssueID=? AND SARC=?",
[IssArcID, arcinfo["StoryArc"]],
)
logger.fdebug("%s IssueArcID: %s" % (module, ml["IssueArcID"]))
ctrlVal = {"IssueArcID": ml["IssueArcID"]}
newVal = {"Status": "Downloaded", "Location": grab_dst}
logger.fdebug("writing: %s -- %s" % (newVal, ctrlVal))
myDB.upsert("storyarcs", newVal, ctrlVal)
logger.fdebug(
"%s [%s] Post-Processing completed for: %s"
% (module, arcinfo["StoryArc"], grab_dst)
)
except:
pass
if mylar.CONFIG.WEEKFOLDER or mylar.CONFIG.SEND2READ:
# mylar.CONFIG.WEEKFOLDER = will *copy* the post-processed file to the weeklypull list folder for the given week.
# mylar.CONFIG.SEND2READ = will add the post-processed file to the readinglits
weeklypull.weekly_check(
comicid,
issuenum,
file=(nfilename + ext),
path=dst,
module=module,
issueid=issueid,
)
# retrieve/create the corresponding comic objects
if mylar.CONFIG.ENABLE_EXTRA_SCRIPTS:
folderp = dst # folder location after move/rename
nzbn = self.nzb_name # original nzb name
filen = nfilename + ext # new filename
# name, comicyear, comicid , issueid, issueyear, issue, publisher
# create the dic and send it.
seriesmeta = []
seriesmetadata = {}
seriesmeta.append(
{
"name": series,
"comicyear": seriesyear,
"comicid": comicid,
"issueid": issueid,
"issueyear": issueyear,
"issue": issuenum,
"publisher": publisher,
}
)
seriesmetadata["seriesmeta"] = seriesmeta
self._run_extra_scripts(
nzbn, self.nzb_folder, filen, folderp, seriesmetadata
)
# if ml is not None:
# #we only need to return self.log if it's a manual run and it's not a snatched torrent
# #manual run + not snatched torrent (or normal manual-run)
# logger.info(module + ' Post-Processing completed for: ' + series + ' ' + dispiss)
# self._log(u"Post Processing SUCCESSFUL! ")
# self.valreturn.append({"self.log": self.log,
# "mode": 'stop',
# "issueid": issueid,
# "comicid": comicid})
# #if self.apicall is True:
# self.sendnotify(series, issueyear, dispiss, annchk, module)
# return self.queue.put(self.valreturn)
imageUrl = myDB.select("SELECT ImageURL from issues WHERE IssueID=?", [issueid])
if imageUrl:
imageUrl = imageUrl[0][0]
self.sendnotify(series, issueyear, dispiss, annchk, module, imageUrl)
logger.info(
"%s Post-Processing completed for: %s %s" % (module, series, dispiss)
)
self._log("Post Processing SUCCESSFUL! ")
self.valreturn.append(
{
"self.log": self.log,
"mode": "stop",
"issueid": issueid,
"comicid": comicid,
}
)
return self.queue.put(self.valreturn)
def sendnotify(self, series, issueyear, issuenumOG, annchk, module, imageUrl):
if issueyear is None:
prline = "%s %s" % (series, issuenumOG)
else:
prline = "%s (%s) %s" % (series, issueyear, issuenumOG)
prline2 = "Mylar has downloaded and post-processed: " + prline
if mylar.CONFIG.PROWL_ENABLED:
pushmessage = prline
prowl = notifiers.PROWL()
prowl.notify(
pushmessage, "Download and Postprocessing completed", module=module
)
if mylar.CONFIG.PUSHOVER_ENABLED:
pushover = notifiers.PUSHOVER()
pushover.notify(prline, prline2, module=module)
if mylar.CONFIG.BOXCAR_ENABLED:
boxcar = notifiers.BOXCAR()
boxcar.notify(prline=prline, prline2=prline2, module=module)
if mylar.CONFIG.PUSHBULLET_ENABLED:
pushbullet = notifiers.PUSHBULLET()
pushbullet.notify(prline=prline, prline2=prline2, module=module)
if mylar.CONFIG.TELEGRAM_ENABLED:
telegram = notifiers.TELEGRAM()
telegram.notify(prline2, imageUrl)
if mylar.CONFIG.SLACK_ENABLED:
slack = notifiers.SLACK()
slack.notify(
"Download and Postprocessing completed", prline2, module=module
)
if mylar.CONFIG.EMAIL_ENABLED and mylar.CONFIG.EMAIL_ONPOST:
logger.info("Sending email notification")
email = notifiers.EMAIL()
email.notify(prline2, "Mylar notification - Processed", module=module)
return
class FolderCheck:
def __init__(self):
import logger
import PostProcessor
import Queue
self.module = "[FOLDER-CHECK]"
self.queue = Queue.Queue()
def run(self):
if mylar.IMPORTLOCK:
logger.info(
"There is an import currently running. In order to ensure successful import - deferring this until the import is finished."
)
return
# monitor a selected folder for 'snatched' files that haven't been processed
# junk the queue as it's not needed for folder monitoring, but needed for post-processing to run without error.
helpers.job_management(
write=True,
job="Folder Monitor",
current_run=helpers.utctimestamp(),
status="Running",
)
mylar.MONITOR_STATUS = "Running"
logger.info(
"%s Checking folder %s for newly snatched downloads"
% (self.module, mylar.CONFIG.CHECK_FOLDER)
)
PostProcess = PostProcessor(
"Manual Run", mylar.CONFIG.CHECK_FOLDER, queue=self.queue
)
result = PostProcess.Process()
logger.info("%s Finished checking for newly snatched downloads" % self.module)
helpers.job_management(
write=True,
job="Folder Monitor",
last_run_completed=helpers.utctimestamp(),
status="Waiting",
)
mylar.MONITOR_STATUS = "Waiting"
|
extractor | fivetv | # coding: utf-8
from __future__ import unicode_literals
import re
from ..utils import int_or_none
from .common import InfoExtractor
class FiveTVIE(InfoExtractor):
_VALID_URL = r"""(?x)
https?://
(?:www\.)?5-tv\.ru/
(?:
(?:[^/]+/)+(?P<id>\d+)|
(?P<path>[^/?#]+)(?:[/?#])?
)
"""
_TESTS = [
{
"url": "http://5-tv.ru/news/96814/",
"md5": "bbff554ad415ecf5416a2f48c22d9283",
"info_dict": {
"id": "96814",
"ext": "mp4",
"title": "Россияне выбрали имя для общенациональной платежной системы",
"description": "md5:a8aa13e2b7ad36789e9f77a74b6de660",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 180,
},
},
{
"url": "http://5-tv.ru/video/1021729/",
"info_dict": {
"id": "1021729",
"ext": "mp4",
"title": "3D принтер",
"description": "md5:d76c736d29ef7ec5c0cf7d7c65ffcb41",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 180,
},
},
{
# redirect to https://www.5-tv.ru/projects/1000095/izvestia-glavnoe/
"url": "http://www.5-tv.ru/glavnoe/#itemDetails",
"info_dict": {
"id": "glavnoe",
"ext": "mp4",
"title": r"re:^Итоги недели с \d+ по \d+ \w+ \d{4} года$",
"thumbnail": r"re:^https?://.*\.jpg$",
},
"skip": "redirect to «Известия. Главное» project page",
},
{
"url": "http://www.5-tv.ru/glavnoe/broadcasts/508645/",
"only_matching": True,
},
{
"url": "http://5-tv.ru/films/1507502/",
"only_matching": True,
},
{
"url": "http://5-tv.ru/programs/broadcast/508713/",
"only_matching": True,
},
{
"url": "http://5-tv.ru/angel/",
"only_matching": True,
},
{
"url": "http://www.5-tv.ru/schedule/?iframe=true&width=900&height=450",
"only_matching": True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group("id") or mobj.group("path")
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
[
r'<div[^>]+?class="(?:flow)?player[^>]+?data-href="([^"]+)"',
r'<a[^>]+?href="([^"]+)"[^>]+?class="videoplayer"',
],
webpage,
"video url",
)
title = self._og_search_title(webpage, default=None) or self._search_regex(
r"<title>([^<]+)</title>", webpage, "title"
)
duration = int_or_none(
self._og_search_property(
"video:duration", webpage, "duration", default=None
)
)
return {
"id": video_id,
"url": video_url,
"title": title,
"description": self._og_search_description(webpage, default=None),
"thumbnail": self._og_search_thumbnail(webpage, default=None),
"duration": duration,
}
|
searx | external_urls | import math
from searx.data import EXTERNAL_URLS
IMDB_PREFIX_TO_URL_ID = {
"tt": "imdb_title",
"mn": "imdb_name",
"ch": "imdb_character",
"co": "imdb_company",
"ev": "imdb_event",
}
HTTP_WIKIMEDIA_IMAGE = "http://commons.wikimedia.org/wiki/Special:FilePath/"
def get_imdb_url_id(imdb_item_id):
id_prefix = imdb_item_id[:2]
return IMDB_PREFIX_TO_URL_ID.get(id_prefix)
def get_wikimedia_image_id(url):
if url.startswith(HTTP_WIKIMEDIA_IMAGE):
return url[len(HTTP_WIKIMEDIA_IMAGE) :]
if url.startswith("File:"):
return url[len("File:") :]
return url
def get_external_url(url_id, item_id, alternative="default"):
"""Return an external URL or None if url_id is not found.
url_id can take value from data/external_urls.json
The "imdb_id" value is automatically converted according to the item_id value.
If item_id is None, the raw URL with the $1 is returned.
"""
if item_id is not None:
if url_id == "imdb_id":
url_id = get_imdb_url_id(item_id)
elif url_id == "wikimedia_image":
item_id = get_wikimedia_image_id(item_id)
url_description = EXTERNAL_URLS.get(url_id)
if url_description:
url_template = url_description["urls"].get(alternative)
if url_template is not None:
if item_id is not None:
return url_template.replace("$1", item_id)
else:
return url_template
return None
def get_earth_coordinates_url(latitude, longitude, osm_zoom, alternative="default"):
url = (
get_external_url("map", None, alternative)
.replace("${latitude}", str(latitude))
.replace("${longitude}", str(longitude))
.replace("${zoom}", str(osm_zoom))
)
return url
def area_to_osm_zoom(area):
"""Convert an area in km² into an OSM zoom. Less reliable if the shape is not round.
logarithm regression using these data:
* 9596961 -> 4 (China)
* 3287263 -> 5 (India)
* 643801 -> 6 (France)
* 6028 -> 9
* 1214 -> 10
* 891 -> 12
* 12 -> 13
In WolframAlpha:
>>> log fit {9596961,15},{3287263, 14},{643801,13},{6028,10},{1214,9},{891,7},{12,6}
with 15 = 19-4 (China); 14 = 19-5 (India) and so on
Args:
area (int,float,str): area in km²
Returns:
int: OSM zoom or 19 in area is not a number
"""
try:
amount = float(area)
return max(0, min(19, round(19 - 0.688297 * math.log(226.878 * amount))))
except ValueError:
return 19
|
mopidy | zeroconf | import logging
import string
from typing import Optional
logger = logging.getLogger(__name__)
try:
import dbus # pyright: ignore[reportMissingImports]
except ImportError:
dbus = None
_AVAHI_IF_UNSPEC = -1
_AVAHI_PROTO_UNSPEC = -1
_AVAHI_PUBLISHFLAGS_NONE = 0
def _is_loopback_address(host: str) -> bool:
return host.startswith(("127.", "::ffff:127.")) or host == "::1"
def _convert_text_list_to_dbus_format(text_list: list[str]):
assert dbus
array = dbus.Array(signature="ay")
for text in text_list:
array.append([dbus.Byte(ord(c)) for c in text])
return array
class Zeroconf:
"""Publish a network service with Zeroconf.
Currently, this only works on Linux using Avahi via D-Bus.
:param str name: human readable name of the service, e.g. 'MPD on neptune'
:param str stype: service type, e.g. '_mpd._tcp'
:param int port: TCP port of the service, e.g. 6600
:param str domain: local network domain name, defaults to ''
:param str host: interface to advertise the service on, defaults to ''
:param text: extra information depending on ``stype``, defaults to empty
list
:type text: list of str
"""
def __init__( # noqa: PLR0913
self,
name: str,
stype: str,
port: int,
domain: str = "",
host: str = "",
text: Optional[list[str]] = None,
) -> None:
self.stype = stype
self.port = port
self.domain = domain
self.host = host
self.text = text or []
self.bus = None
self.server = None
self.group = None
self.display_hostname = None
self.name = None
if dbus:
try:
self.bus = dbus.SystemBus()
self.server = dbus.Interface(
self.bus.get_object("org.freedesktop.Avahi", "/"),
"org.freedesktop.Avahi.Server",
)
self.display_hostname = f"{self.server.GetHostName()}"
self.name = string.Template(name).safe_substitute(
hostname=self.display_hostname, port=port
)
except dbus.exceptions.DBusException as e:
logger.debug("%s: Server failed: %s", self, e)
def __str__(self) -> str:
return (
f"Zeroconf service {self.name!r} "
f"({self.stype} at [{self.host}]:{self.port:d})"
)
def publish(self) -> bool: # noqa: PLR0911
"""Publish the service.
Call when your service starts.
"""
if _is_loopback_address(self.host):
logger.debug("%s: Publish on loopback interface is not supported.", self)
return False
if not dbus:
logger.debug("%s: dbus not installed; publish failed.", self)
return False
if not self.bus:
logger.debug("%s: Bus not available; publish failed.", self)
return False
if not self.server:
logger.debug("%s: Server not available; publish failed.", self)
return False
try:
if not self.bus.name_has_owner("org.freedesktop.Avahi"):
logger.debug("%s: Avahi service not running; publish failed.", self)
return False
self.group = dbus.Interface(
self.bus.get_object(
"org.freedesktop.Avahi", self.server.EntryGroupNew()
),
"org.freedesktop.Avahi.EntryGroup",
)
self.group.AddService(
_AVAHI_IF_UNSPEC,
_AVAHI_PROTO_UNSPEC,
dbus.UInt32(_AVAHI_PUBLISHFLAGS_NONE),
self.name,
self.stype,
self.domain,
self.host,
dbus.UInt16(self.port),
_convert_text_list_to_dbus_format(self.text),
)
self.group.Commit()
logger.debug("%s: Published", self)
except dbus.exceptions.DBusException as e:
logger.debug("%s: Publish failed: %s", self, e)
return False
else:
return True
def unpublish(self) -> None:
"""Unpublish the service.
Call when your service shuts down.
"""
if not dbus or not self.group:
return
try:
self.group.Reset()
logger.debug("%s: Unpublished", self)
except dbus.exceptions.DBusException as e:
logger.debug("%s: Unpublish failed: %s", self, e)
finally:
self.group = None
|
models | printable | # The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2015 reddit
# Inc. All Rights Reserved.
###############################################################################
from pylons import request
from pylons import tmpl_context as c
from r2.lib import hooks
from r2.lib.strings import Score
class Printable(object):
show_spam = False
show_reports = False
is_special = False
can_ban = False
deleted = False
rowstyle_cls = ""
collapsed = False
author = None
margin = 0
is_focal = False
childlisting = None
cache_ignore = set(
[
"c",
"author",
"score_fmt",
"child",
# displayed score is cachable, so remove score
# related fields.
"voting_score",
"display_score",
"render_score",
"score",
"_score",
"upvotes",
"_ups",
"downvotes",
"_downs",
"subreddit_slow",
"_deleted",
"_spam",
"cachable",
"make_permalink",
"permalink",
"timesince",
"num", # listings only, replaced by CachedVariable
"rowstyle_cls", # listings only, replaced by CachedVariable
"upvote_ratio",
"should_incr_counts",
"keep_item",
]
)
@classmethod
def update_nofollow(cls, user, wrapped):
pass
@classmethod
def add_props(cls, user, wrapped):
from r2.lib.wrapped import CachedVariable
for item in wrapped:
# insert replacement variable for timesince to allow for
# caching of thing templates
item.display = CachedVariable("display")
item.timesince = CachedVariable("timesince")
item.childlisting = CachedVariable("childlisting")
score_fmt = getattr(item, "score_fmt", Score.number_only)
item.display_score = map(score_fmt, item.voting_score)
if item.cachable:
item.render_score = item.display_score
item.display_score = map(
CachedVariable, ["scoredislikes", "scoreunvoted", "scorelikes"]
)
hooks.get_hook("add_props").call(items=wrapped)
@property
def permalink(self, *a, **kw):
raise NotImplementedError
def keep_item(self, wrapped):
return True
@staticmethod
def wrapped_cache_key(wrapped, style):
s = [wrapped._fullname, wrapped._spam]
# Printables can contain embedded WrappedUsers, which need to consider
# the site and user's flair settings. Add something to the key
# indicating there might be flair--we haven't built the WrappedUser yet
# so we can't check to see if there's actually flair.
if c.site.flair_enabled and c.user.pref_show_flair:
s.append("user_flair_enabled")
if style == "htmllite":
s.extend(
[
c.bgcolor,
c.bordercolor,
request.GET.has_key("style"),
request.GET.get("expanded"),
getattr(wrapped, "embed_voting_style", None),
]
)
return s
|
tasks | plugins | import argparse
import os
import re
import shutil
import textwrap
from collections import defaultdict
from contextlib import suppress
from pathlib import Path
from typing import List, Optional, Tuple
from httpie.compat import get_dist_name, importlib_metadata
from httpie.context import Environment
from httpie.manager.cli import missing_subcommand, parser
from httpie.manager.compat import PipError, run_pip
from httpie.status import ExitStatus
from httpie.utils import get_site_paths
PEP_503 = re.compile(r"[-_.]+")
class PluginInstaller:
def __init__(self, env: Environment, debug: bool = False) -> None:
self.env = env
self.dir = env.config.plugins_dir
self.debug = debug
self.setup_plugins_dir()
def setup_plugins_dir(self) -> None:
try:
self.dir.mkdir(exist_ok=True, parents=True)
except OSError:
self.env.stderr.write(
f'Couldn\'t create "{self.dir!s}"'
" directory for plugin installation."
" Please re-check the permissions for that directory,"
" and if needed, allow write-access."
)
raise
def fail(
self, command: str, target: Optional[str] = None, reason: Optional[str] = None
) -> ExitStatus:
message = f"Can't {command}"
if target:
message += f" {target!r}"
if reason:
message += f": {reason}"
self.env.stderr.write(message + "\n")
return ExitStatus.ERROR
def _install(self, targets: List[str], mode="install") -> Tuple[bytes, ExitStatus]:
pip_args = [
"install",
"--prefer-binary",
f"--prefix={self.dir}",
"--no-warn-script-location",
]
if mode == "upgrade":
pip_args.append("--upgrade")
pip_args.extend(targets)
try:
stdout = run_pip(pip_args)
except PipError as pip_error:
error = pip_error
stdout = pip_error.stdout
else:
error = None
self.env.stdout.write(stdout.decode())
if error:
reason = None
if error.stderr:
stderr = error.stderr.decode()
if self.debug:
self.env.stderr.write("Command failed: ")
self.env.stderr.write("pip " + " ".join(pip_args) + "\n")
self.env.stderr.write(textwrap.indent(" ", stderr))
last_line = stderr.strip().splitlines()[-1]
severity, _, message = last_line.partition(": ")
if severity == "ERROR":
reason = message
stdout = error.stdout
exit_status = self.fail(mode, ", ".join(targets), reason)
else:
exit_status = ExitStatus.SUCCESS
return stdout, exit_status
def install(self, targets: List[str]) -> ExitStatus:
self.env.stdout.write(f"Installing {', '.join(targets)}...\n")
self.env.stdout.flush()
_, exit_status = self._install(targets)
return exit_status
def _clear_metadata(self, targets: List[str]) -> None:
# Due to an outstanding pip problem[0], we have to get rid of
# existing metadata for old versions manually.
# [0]: https://github.com/pypa/pip/issues/10727
result_deps = defaultdict(list)
for site_dir in get_site_paths(self.dir):
for child in site_dir.iterdir():
if child.suffix in {".dist-info", ".egg-info"}:
name, _, version = child.stem.rpartition("-")
result_deps[name].append((version, child))
for target in targets:
name, _, version = target.rpartition("-")
name = PEP_503.sub("-", name).lower().replace("-", "_")
if name not in result_deps:
continue
for result_version, meta_path in result_deps[name]:
if version != result_version:
shutil.rmtree(meta_path)
def upgrade(self, targets: List[str]) -> ExitStatus:
self.env.stdout.write(f"Upgrading {', '.join(targets)}...\n")
self.env.stdout.flush()
raw_stdout, exit_status = self._install(targets, mode="upgrade")
if not raw_stdout:
return exit_status
stdout = raw_stdout.decode()
installation_line = stdout.splitlines()[-1]
if installation_line.startswith("Successfully installed"):
self._clear_metadata(installation_line.split()[2:])
def _uninstall(self, target: str) -> Optional[ExitStatus]:
try:
distribution = importlib_metadata.distribution(target)
except importlib_metadata.PackageNotFoundError:
return self.fail("uninstall", target, "package is not installed")
base_dir = Path(distribution.locate_file(".")).resolve()
if self.dir not in base_dir.parents:
# If the package is installed somewhere else (e.g on the site packages
# of the real python interpreter), than that means this package is not
# installed through us.
return self.fail(
"uninstall",
target,
"package is not installed through httpie plugins" " interface",
)
files = distribution.files
if files is None:
return self.fail("uninstall", target, "couldn't locate the package")
# TODO: Consider handling failures here (e.g if it fails,
# just revert the operation and leave the site-packages
# in a proper shape).
for file in files:
with suppress(FileNotFoundError):
os.unlink(distribution.locate_file(file))
metadata_path = getattr(distribution, "_path", None)
if (
metadata_path
and metadata_path.exists()
and not any(metadata_path.iterdir())
):
metadata_path.rmdir()
self.env.stdout.write(f"Successfully uninstalled {target}\n")
def uninstall(self, targets: List[str]) -> ExitStatus:
# Unfortunately uninstall doesn't work with custom pip schemes. See:
# - https://github.com/pypa/pip/issues/5595
# - https://github.com/pypa/pip/issues/4575
# so we have to implement our own uninstalling logic. Which works
# on top of the importlib_metadata.
exit_code = ExitStatus.SUCCESS
for target in targets:
exit_code |= self._uninstall(target) or ExitStatus.SUCCESS
return ExitStatus(exit_code)
def list(self) -> None:
from httpie.plugins.registry import plugin_manager
known_plugins = defaultdict(list)
for entry_point in plugin_manager.iter_entry_points(self.dir):
ep_info = (entry_point.group, entry_point.name)
ep_name = get_dist_name(entry_point) or entry_point.module
known_plugins[ep_name].append(ep_info)
for plugin, entry_points in known_plugins.items():
self.env.stdout.write(plugin)
version = importlib_metadata.version(plugin)
if version is not None:
self.env.stdout.write(f" ({version})")
self.env.stdout.write("\n")
for group, entry_point in sorted(entry_points):
self.env.stdout.write(f" {entry_point} ({group})\n")
def run(
self,
action: Optional[str],
args: argparse.Namespace,
) -> ExitStatus:
from httpie.plugins.manager import enable_plugins
if action is None:
parser.error(missing_subcommand("plugins"))
with enable_plugins(self.dir):
if action == "install":
status = self.install(args.targets)
elif action == "upgrade":
status = self.upgrade(args.targets)
elif action == "uninstall":
status = self.uninstall(args.targets)
elif action == "list":
status = self.list()
return status or ExitStatus.SUCCESS
def cli_plugins(env: Environment, args: argparse.Namespace) -> ExitStatus:
plugins = PluginInstaller(env, debug=args.debug)
try:
action = args.cli_plugins_action
except AttributeError:
action = args.plugins_action
return plugins.run(action, args)
|
frontend | user | # This file is part of Supysonic.
# Supysonic is a Python implementation of the Subsonic server API.
#
# Copyright (C) 2013-2023 Alban 'spl0k' Féron
#
# Distributed under terms of the GNU AGPLv3 license.
import logging
from functools import wraps
from flask import (
current_app,
flash,
redirect,
render_template,
request,
session,
url_for,
)
from ..db import ClientPrefs, User
from ..lastfm import LastFm
from ..managers.user import UserManager
from . import admin_only, frontend
logger = logging.getLogger(__name__)
def me_or_uuid(f, arg="uid"):
@wraps(f)
def decorated_func(*args, **kwargs):
if kwargs:
uid = kwargs[arg]
else:
uid = args[0]
if uid == "me":
user = request.user
elif not request.user.admin:
return redirect(url_for("frontend.index"))
else:
try:
user = UserManager.get(uid)
except ValueError as e:
flash(str(e), "error")
return redirect(url_for("frontend.index"))
except User.DoesNotExist:
flash("No such user", "error")
return redirect(url_for("frontend.index"))
if kwargs:
kwargs["user"] = user
else:
args = (uid, user)
return f(*args, **kwargs)
return decorated_func
@frontend.route("/user")
@admin_only
def user_index():
return render_template("users.html", users=User.select())
@frontend.route("/user/<uid>")
@me_or_uuid
def user_profile(uid, user):
return render_template(
"profile.html",
user=user,
api_key=current_app.config["LASTFM"]["api_key"],
clients=user.clients,
)
@frontend.route("/user/<uid>", methods=["POST"])
@me_or_uuid
def update_clients(uid, user):
clients_opts = {}
for key, value in request.form.items():
if "_" not in key:
continue
parts = key.split("_")
if len(parts) != 2:
continue
client, opt = parts
if not client or not opt:
continue
if client not in clients_opts:
clients_opts[client] = {opt: value}
else:
clients_opts[client][opt] = value
logger.debug(clients_opts)
for client, opts in clients_opts.items():
prefs = user.clients.where(ClientPrefs.client_name == client).first()
if prefs is None:
continue
if "delete" in opts and opts["delete"] in [
"on",
"true",
"checked",
"selected",
"1",
]:
prefs.delete_instance()
continue
prefs.format = opts["format"] if "format" in opts and opts["format"] else None
prefs.bitrate = (
int(opts["bitrate"]) if "bitrate" in opts and opts["bitrate"] else None
)
prefs.save()
flash("Clients preferences updated.")
return user_profile(uid, user)
@frontend.route("/user/<uid>/changeusername")
@admin_only
def change_username_form(uid):
try:
user = UserManager.get(uid)
except ValueError as e:
flash(str(e), "error")
return redirect(url_for("frontend.index"))
except User.DoesNotExist:
flash("No such user", "error")
return redirect(url_for("frontend.index"))
return render_template("change_username.html", user=user)
@frontend.route("/user/<uid>/changeusername", methods=["POST"])
@admin_only
def change_username_post(uid):
try:
user = UserManager.get(uid)
except ValueError as e:
flash(str(e), "error")
return redirect(url_for("frontend.index"))
except User.DoesNotExist:
flash("No such user", "error")
return redirect(url_for("frontend.index"))
username = request.form.get("user")
if username in ("", None):
flash("The username is required")
return render_template("change_username.html", user=user)
if user.name != username:
try:
User.get(name=username)
flash("This name is already taken")
return render_template("change_username.html", user=user)
except User.DoesNotExist:
pass
if request.form.get("admin") is None:
admin = False
else:
admin = True
if user.name != username or user.admin != admin:
user.name = username
user.admin = admin
user.save()
flash(f"User '{username}' updated.")
else:
flash(f"No changes for '{username}'.")
return redirect(url_for("frontend.user_profile", uid=uid))
@frontend.route("/user/<uid>/changemail")
@me_or_uuid
def change_mail_form(uid, user):
return render_template("change_mail.html", user=user)
@frontend.route("/user/<uid>/changemail", methods=["POST"])
@me_or_uuid
def change_mail_post(uid, user):
mail = request.form.get("mail", "")
# No validation, lol.
user.mail = mail
return redirect(url_for("frontend.user_profile", uid=uid))
@frontend.route("/user/<uid>/changepass")
@me_or_uuid
def change_password_form(uid, user):
return render_template("change_pass.html", user=user)
@frontend.route("/user/<uid>/changepass", methods=["POST"])
@me_or_uuid
def change_password_post(uid, user):
error = False
if user.id == request.user.id:
current = request.form.get("current")
if not current:
flash("The current password is required")
error = True
new, confirm = map(request.form.get, ("new", "confirm"))
if not new:
flash("The new password is required")
error = True
if new != confirm:
flash("The new password and its confirmation don't match")
error = True
if not error:
try:
if user.id == request.user.id:
UserManager.change_password(user.id, current, new)
else:
UserManager.change_password2(user.name, new)
flash("Password changed")
return redirect(url_for("frontend.user_profile", uid=uid))
except ValueError as e:
flash(str(e), "error")
return change_password_form(uid, user)
@frontend.route("/user/add")
@admin_only
def add_user_form():
return render_template("adduser.html")
@frontend.route("/user/add", methods=["POST"])
@admin_only
def add_user_post():
error = False
args = request.form.copy()
(name, passwd, passwd_confirm) = map(
args.pop, ("user", "passwd", "passwd_confirm"), (None,) * 3
)
if not name:
flash("The name is required.")
error = True
if not passwd:
flash("Please provide a password.")
error = True
elif passwd != passwd_confirm:
flash("The passwords don't match.")
error = True
if not error:
try:
UserManager.add(name, passwd, **args)
flash(f"User '{name}' successfully added")
return redirect(url_for("frontend.user_index"))
except ValueError as e:
flash(str(e), "error")
return add_user_form()
@frontend.route("/user/del/<uid>")
@admin_only
def del_user(uid):
try:
UserManager.delete(uid)
flash("Deleted user")
except ValueError as e:
flash(str(e), "error")
except User.DoesNotExist:
flash("No such user", "error")
return redirect(url_for("frontend.user_index"))
@frontend.route("/user/<uid>/lastfm/link")
@me_or_uuid
def lastfm_reg(uid, user):
token = request.args.get("token")
if not token:
flash("Missing LastFM auth token")
return redirect(url_for("frontend.user_profile", uid=uid))
lfm = LastFm(current_app.config["LASTFM"], user)
status, error = lfm.link_account(token)
flash(error if not status else "Successfully linked LastFM account")
return redirect(url_for("frontend.user_profile", uid=uid))
@frontend.route("/user/<uid>/lastfm/unlink")
@me_or_uuid
def lastfm_unreg(uid, user):
lfm = LastFm(current_app.config["LASTFM"], user)
lfm.unlink_account()
flash("Unlinked LastFM account")
return redirect(url_for("frontend.user_profile", uid=uid))
@frontend.route("/user/login", methods=["GET", "POST"])
def login():
return_url = request.args.get("returnUrl") or url_for("frontend.index")
if request.user:
flash("Already logged in")
return redirect(return_url)
if request.method == "GET":
return render_template("login.html")
name, password = map(request.form.get, ("user", "password"))
error = False
if not name:
flash("Missing user name")
error = True
if not password:
flash("Missing password")
error = True
if not error:
user = UserManager.try_auth(name, password)
if user:
logger.info("Logged user %s (IP: %s)", name, request.remote_addr)
session["userid"] = str(user.id)
flash("Logged in!")
return redirect(return_url)
else:
logger.error(
"Failed login attempt for user %s (IP: %s)", name, request.remote_addr
)
flash("Wrong username or password")
return render_template("login.html")
@frontend.route("/user/logout")
def logout():
session.clear()
flash("Logged out!")
return redirect(url_for("frontend.login"))
|
CTFd | config | import configparser
import os
from distutils.util import strtobool
from typing import Union
from sqlalchemy.engine.url import URL
class EnvInterpolation(configparser.BasicInterpolation):
"""Interpolation which expands environment variables in values."""
def before_get(self, parser, section, option, value, defaults):
value = super().before_get(parser, section, option, value, defaults)
envvar = os.getenv(option)
if value == "" and envvar:
return process_string_var(envvar)
else:
return value
def process_string_var(value):
if value == "":
return None
if value.isdigit():
return int(value)
elif value.replace(".", "", 1).isdigit():
return float(value)
try:
return bool(strtobool(value))
except ValueError:
return value
def process_boolean_str(value):
if type(value) is bool:
return value
if value is None:
return False
if value == "":
return None
return bool(strtobool(value))
def empty_str_cast(value, default=None):
if value == "":
return default
return value
def gen_secret_key():
# Attempt to read the secret from the secret file
# This will fail if the secret has not been written
try:
with open(".ctfd_secret_key", "rb") as secret:
key = secret.read()
except OSError:
key = None
if not key:
key = os.urandom(64)
# Attempt to write the secret file
# This will fail if the filesystem is read-only
try:
with open(".ctfd_secret_key", "wb") as secret:
secret.write(key)
secret.flush()
except OSError:
pass
return key
config_ini = configparser.ConfigParser(interpolation=EnvInterpolation())
config_ini.optionxform = str # Makes the key value case-insensitive
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "config.ini")
config_ini.read(path)
# fmt: off
class ServerConfig(object):
SECRET_KEY: str = empty_str_cast(config_ini["server"]["SECRET_KEY"]) \
or gen_secret_key()
DATABASE_URL: str = empty_str_cast(config_ini["server"]["DATABASE_URL"])
if not DATABASE_URL:
if empty_str_cast(config_ini["server"]["DATABASE_HOST"]) is not None:
# construct URL from individual variables
DATABASE_URL = str(URL(
drivername=empty_str_cast(config_ini["server"]["DATABASE_PROTOCOL"]) or "mysql+pymysql",
username=empty_str_cast(config_ini["server"]["DATABASE_USER"]) or "ctfd",
password=empty_str_cast(config_ini["server"]["DATABASE_PASSWORD"]),
host=empty_str_cast(config_ini["server"]["DATABASE_HOST"]),
port=empty_str_cast(config_ini["server"]["DATABASE_PORT"]),
database=empty_str_cast(config_ini["server"]["DATABASE_NAME"]) or "ctfd",
))
else:
# default to local SQLite DB
DATABASE_URL = f"sqlite:///{os.path.dirname(os.path.abspath(__file__))}/ctfd.db"
REDIS_URL: str = empty_str_cast(config_ini["server"]["REDIS_URL"])
REDIS_HOST: str = empty_str_cast(config_ini["server"]["REDIS_HOST"])
REDIS_PROTOCOL: str = empty_str_cast(config_ini["server"]["REDIS_PROTOCOL"]) or "redis"
REDIS_USER: str = empty_str_cast(config_ini["server"]["REDIS_USER"])
REDIS_PASSWORD: str = empty_str_cast(config_ini["server"]["REDIS_PASSWORD"])
REDIS_PORT: int = empty_str_cast(config_ini["server"]["REDIS_PORT"]) or 6379
REDIS_DB: int = empty_str_cast(config_ini["server"]["REDIS_DB"]) or 0
if REDIS_URL or REDIS_HOST is None:
CACHE_REDIS_URL = REDIS_URL
else:
# construct URL from individual variables
CACHE_REDIS_URL = f"{REDIS_PROTOCOL}://"
if REDIS_USER:
CACHE_REDIS_URL += REDIS_USER
if REDIS_PASSWORD:
CACHE_REDIS_URL += f":{REDIS_PASSWORD}"
CACHE_REDIS_URL += f"@{REDIS_HOST}:{REDIS_PORT}/{REDIS_DB}"
SQLALCHEMY_DATABASE_URI = DATABASE_URL
if CACHE_REDIS_URL:
CACHE_TYPE: str = "redis"
else:
CACHE_TYPE: str = "filesystem"
CACHE_DIR: str = os.path.join(
os.path.dirname(__file__), os.pardir, ".data", "filesystem_cache"
)
# Override the threshold of cached values on the filesystem. The default is 500. Don't change unless you know what you're doing.
CACHE_THRESHOLD: int = 0
# === SECURITY ===
SESSION_COOKIE_HTTPONLY: bool = config_ini["security"].getboolean("SESSION_COOKIE_HTTPONLY", fallback=True)
SESSION_COOKIE_SAMESITE: str = empty_str_cast(config_ini["security"]["SESSION_COOKIE_SAMESITE"]) \
or "Lax"
PERMANENT_SESSION_LIFETIME: int = config_ini["security"].getint("PERMANENT_SESSION_LIFETIME") \
or 604800
"""
TRUSTED_PROXIES:
Defines a set of regular expressions used for finding a user's IP address if the CTFd instance
is behind a proxy. If you are running a CTF and users are on the same network as you, you may choose to remove
some proxies from the list.
CTFd only uses IP addresses for cursory tracking purposes. It is ill-advised to do anything complicated based
solely on IP addresses unless you know what you are doing.
"""
TRUSTED_PROXIES = [
r"^127\.0\.0\.1$",
# Remove the following proxies if you do not trust the local network
# For example if you are running a CTF on your laptop and the teams are
# all on the same network
r"^::1$",
r"^fc00:",
r"^10\.",
r"^172\.(1[6-9]|2[0-9]|3[0-1])\.",
r"^192\.168\.",
]
# === EMAIL ===
MAILFROM_ADDR: str = config_ini["email"]["MAILFROM_ADDR"] \
or "noreply@examplectf.com"
MAIL_SERVER: str = empty_str_cast(config_ini["email"]["MAIL_SERVER"])
MAIL_PORT: int = empty_str_cast(config_ini["email"]["MAIL_PORT"])
MAIL_USEAUTH: bool = process_boolean_str(config_ini["email"]["MAIL_USEAUTH"])
MAIL_USERNAME: str = empty_str_cast(config_ini["email"]["MAIL_USERNAME"])
MAIL_PASSWORD: str = empty_str_cast(config_ini["email"]["MAIL_PASSWORD"])
MAIL_TLS: bool = process_boolean_str(config_ini["email"]["MAIL_TLS"])
MAIL_SSL: bool = process_boolean_str(config_ini["email"]["MAIL_SSL"])
MAILSENDER_ADDR: str = empty_str_cast(config_ini["email"]["MAILSENDER_ADDR"])
MAILGUN_API_KEY: str = empty_str_cast(config_ini["email"]["MAILGUN_API_KEY"])
MAILGUN_BASE_URL: str = empty_str_cast(config_ini["email"]["MAILGUN_API_KEY"])
MAIL_PROVIDER: str = empty_str_cast(config_ini["email"].get("MAIL_PROVIDER"))
# === LOGS ===
LOG_FOLDER: str = empty_str_cast(config_ini["logs"]["LOG_FOLDER"]) \
or os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs")
# === UPLOADS ===
UPLOAD_PROVIDER: str = empty_str_cast(config_ini["uploads"]["UPLOAD_PROVIDER"]) \
or "filesystem"
UPLOAD_FOLDER: str = empty_str_cast(config_ini["uploads"]["UPLOAD_FOLDER"]) \
or os.path.join(os.path.dirname(os.path.abspath(__file__)), "uploads")
if UPLOAD_PROVIDER == "s3":
AWS_ACCESS_KEY_ID: str = empty_str_cast(config_ini["uploads"]["AWS_ACCESS_KEY_ID"])
AWS_SECRET_ACCESS_KEY: str = empty_str_cast(config_ini["uploads"]["AWS_SECRET_ACCESS_KEY"])
AWS_S3_BUCKET: str = empty_str_cast(config_ini["uploads"]["AWS_S3_BUCKET"])
AWS_S3_ENDPOINT_URL: str = empty_str_cast(config_ini["uploads"]["AWS_S3_ENDPOINT_URL"])
AWS_S3_REGION: str = empty_str_cast(config_ini["uploads"]["AWS_S3_REGION"])
AWS_S3_CUSTOM_DOMAIN: str = empty_str_cast(config_ini["uploads"].get("AWS_S3_CUSTOM_DOMAIN", ""))
AWS_S3_ADDRESSING_STYLE: str = empty_str_cast(config_ini["uploads"].get("AWS_S3_ADDRESSING_STYLE", ""), default="auto")
# === OPTIONAL ===
REVERSE_PROXY: Union[str, bool] = empty_str_cast(config_ini["optional"]["REVERSE_PROXY"], default=False)
TEMPLATES_AUTO_RELOAD: bool = process_boolean_str(empty_str_cast(config_ini["optional"]["TEMPLATES_AUTO_RELOAD"], default=True))
THEME_FALLBACK: bool = process_boolean_str(empty_str_cast(config_ini["optional"]["THEME_FALLBACK"], default=True))
SQLALCHEMY_TRACK_MODIFICATIONS: bool = process_boolean_str(empty_str_cast(config_ini["optional"]["SQLALCHEMY_TRACK_MODIFICATIONS"], default=False))
SWAGGER_UI: bool = process_boolean_str(empty_str_cast(config_ini["optional"]["SWAGGER_UI"], default=False))
SWAGGER_UI_ENDPOINT: str = "/" if SWAGGER_UI else None
UPDATE_CHECK: bool = process_boolean_str(empty_str_cast(config_ini["optional"]["UPDATE_CHECK"], default=True))
APPLICATION_ROOT: str = empty_str_cast(config_ini["optional"]["APPLICATION_ROOT"], default="/")
SERVER_SENT_EVENTS: bool = process_boolean_str(empty_str_cast(config_ini["optional"]["SERVER_SENT_EVENTS"], default=True))
HTML_SANITIZATION: bool = process_boolean_str(empty_str_cast(config_ini["optional"]["HTML_SANITIZATION"], default=False))
SAFE_MODE: bool = process_boolean_str(empty_str_cast(config_ini["optional"].get("SAFE_MODE", False), default=False))
if DATABASE_URL.startswith("sqlite") is False:
SQLALCHEMY_ENGINE_OPTIONS = {
"max_overflow": int(empty_str_cast(config_ini["optional"]["SQLALCHEMY_MAX_OVERFLOW"], default=20)), # noqa: E131
"pool_pre_ping": empty_str_cast(config_ini["optional"]["SQLALCHEMY_POOL_PRE_PING"], default=True), # noqa: E131
}
# === OAUTH ===
OAUTH_CLIENT_ID: str = empty_str_cast(config_ini["oauth"]["OAUTH_CLIENT_ID"])
OAUTH_CLIENT_SECRET: str = empty_str_cast(config_ini["oauth"]["OAUTH_CLIENT_SECRET"])
# fmt: on
class TestingConfig(ServerConfig):
SECRET_KEY = "AAAAAAAAAAAAAAAAAAAA"
PRESERVE_CONTEXT_ON_EXCEPTION = False
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.getenv("TESTING_DATABASE_URL") or "sqlite://"
MAIL_SERVER = os.getenv("TESTING_MAIL_SERVER")
SERVER_NAME = "localhost"
UPDATE_CHECK = False
REDIS_URL = None
CACHE_TYPE = "simple"
CACHE_THRESHOLD = 500
SAFE_MODE = True
# Actually initialize ServerConfig to allow us to add more attributes on
Config = ServerConfig()
for k, v in config_ini.items("extra"):
setattr(Config, k, v)
|
shared | patterns | from collections import OrderedDict
from itertools import chain
# noinspection PyPackageRequirements
import wx
from gui.utils.sorter import smartSort
from service.damagePattern import DamagePattern as DmgPatternSvc
_t = wx.GetTranslation
class DamagePatternMixin:
def _getPatterns(self):
sDP = DmgPatternSvc.getInstance()
builtinPatterns = sDP.getBuiltinDamagePatternList()
userPatterns = sorted(
sDP.getUserDamagePatternList(), key=lambda p: smartSort(p.fullName)
)
# Order here is important: patterns with duplicate names from the latter will overwrite
# patterns from the former
patterns = sorted(
chain(builtinPatterns, userPatterns),
key=lambda p: p.fullName not in ["Uniform", "Selected Ammo"],
)
return patterns
def _getItems(self, patterns):
items = (OrderedDict(), OrderedDict())
for pattern in patterns:
container = items
for categoryName in pattern.hierarchy:
categoryName = _t(categoryName) if pattern.builtin else categoryName
container = container[1].setdefault(
categoryName, (OrderedDict(), OrderedDict())
)
shortName = _t(pattern.shortName) if pattern.builtin else pattern.shortName
container[0][shortName] = pattern
return items
|
migrations | 0018_auto_20211017_2136 | # Generated by Django 3.2.7 on 2021-10-17 21:36
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("babybuddy", "0017_settings_hide_age"),
]
operations = [
migrations.AlterField(
model_name="settings",
name="dashboard_hide_age",
field=models.DurationField(
blank=True,
choices=[
(None, "show all data"),
(datetime.timedelta(days=1), "1 day"),
(datetime.timedelta(days=2), "2 days"),
(datetime.timedelta(days=3), "3 days"),
(datetime.timedelta(days=7), "1 week"),
(datetime.timedelta(days=28), "4 weeks"),
],
default=None,
help_text="This setting controls which data will be shown in the dashboard.",
null=True,
verbose_name="Hide data older than",
),
),
migrations.AlterField(
model_name="settings",
name="dashboard_refresh_rate",
field=models.DurationField(
blank=True,
choices=[
(None, "disabled"),
(datetime.timedelta(seconds=60), "1 min."),
(datetime.timedelta(seconds=120), "2 min."),
(datetime.timedelta(seconds=180), "3 min."),
(datetime.timedelta(seconds=240), "4 min."),
(datetime.timedelta(seconds=300), "5 min."),
(datetime.timedelta(seconds=600), "10 min."),
(datetime.timedelta(seconds=900), "15 min."),
(datetime.timedelta(seconds=1800), "30 min."),
],
default=datetime.timedelta(seconds=60),
help_text="If supported by browser, the dashboard will only refresh when visible, and also when receiving focus.",
null=True,
verbose_name="Refresh rate",
),
),
migrations.AlterField(
model_name="settings",
name="language",
field=models.CharField(
choices=[
("en-US", "English (US)"),
("en-GB", "English (UK)"),
("nl", "Dutch"),
("fr", "French"),
("fi", "Finnish"),
("de", "German"),
("it", "Italian"),
("pl", "Polish"),
("pt", "Portuguese"),
("es", "Spanish"),
("sv", "Swedish"),
("tr", "Turkish"),
],
default="en-US",
max_length=255,
verbose_name="Language",
),
),
migrations.AlterField(
model_name="settings",
name="timezone",
field=models.CharField(
choices=[
("Africa/Abidjan", "Africa/Abidjan"),
("Africa/Accra", "Africa/Accra"),
("Africa/Addis_Ababa", "Africa/Addis_Ababa"),
("Africa/Algiers", "Africa/Algiers"),
("Africa/Asmara", "Africa/Asmara"),
("Africa/Bamako", "Africa/Bamako"),
("Africa/Bangui", "Africa/Bangui"),
("Africa/Banjul", "Africa/Banjul"),
("Africa/Bissau", "Africa/Bissau"),
("Africa/Blantyre", "Africa/Blantyre"),
("Africa/Brazzaville", "Africa/Brazzaville"),
("Africa/Bujumbura", "Africa/Bujumbura"),
("Africa/Cairo", "Africa/Cairo"),
("Africa/Casablanca", "Africa/Casablanca"),
("Africa/Ceuta", "Africa/Ceuta"),
("Africa/Conakry", "Africa/Conakry"),
("Africa/Dakar", "Africa/Dakar"),
("Africa/Dar_es_Salaam", "Africa/Dar_es_Salaam"),
("Africa/Djibouti", "Africa/Djibouti"),
("Africa/Douala", "Africa/Douala"),
("Africa/El_Aaiun", "Africa/El_Aaiun"),
("Africa/Freetown", "Africa/Freetown"),
("Africa/Gaborone", "Africa/Gaborone"),
("Africa/Harare", "Africa/Harare"),
("Africa/Johannesburg", "Africa/Johannesburg"),
("Africa/Juba", "Africa/Juba"),
("Africa/Kampala", "Africa/Kampala"),
("Africa/Khartoum", "Africa/Khartoum"),
("Africa/Kigali", "Africa/Kigali"),
("Africa/Kinshasa", "Africa/Kinshasa"),
("Africa/Lagos", "Africa/Lagos"),
("Africa/Libreville", "Africa/Libreville"),
("Africa/Lome", "Africa/Lome"),
("Africa/Luanda", "Africa/Luanda"),
("Africa/Lubumbashi", "Africa/Lubumbashi"),
("Africa/Lusaka", "Africa/Lusaka"),
("Africa/Malabo", "Africa/Malabo"),
("Africa/Maputo", "Africa/Maputo"),
("Africa/Maseru", "Africa/Maseru"),
("Africa/Mbabane", "Africa/Mbabane"),
("Africa/Mogadishu", "Africa/Mogadishu"),
("Africa/Monrovia", "Africa/Monrovia"),
("Africa/Nairobi", "Africa/Nairobi"),
("Africa/Ndjamena", "Africa/Ndjamena"),
("Africa/Niamey", "Africa/Niamey"),
("Africa/Nouakchott", "Africa/Nouakchott"),
("Africa/Ouagadougou", "Africa/Ouagadougou"),
("Africa/Porto-Novo", "Africa/Porto-Novo"),
("Africa/Sao_Tome", "Africa/Sao_Tome"),
("Africa/Tripoli", "Africa/Tripoli"),
("Africa/Tunis", "Africa/Tunis"),
("Africa/Windhoek", "Africa/Windhoek"),
("America/Adak", "America/Adak"),
("America/Anchorage", "America/Anchorage"),
("America/Anguilla", "America/Anguilla"),
("America/Antigua", "America/Antigua"),
("America/Araguaina", "America/Araguaina"),
(
"America/Argentina/Buenos_Aires",
"America/Argentina/Buenos_Aires",
),
("America/Argentina/Catamarca", "America/Argentina/Catamarca"),
("America/Argentina/Cordoba", "America/Argentina/Cordoba"),
("America/Argentina/Jujuy", "America/Argentina/Jujuy"),
("America/Argentina/La_Rioja", "America/Argentina/La_Rioja"),
("America/Argentina/Mendoza", "America/Argentina/Mendoza"),
(
"America/Argentina/Rio_Gallegos",
"America/Argentina/Rio_Gallegos",
),
("America/Argentina/Salta", "America/Argentina/Salta"),
("America/Argentina/San_Juan", "America/Argentina/San_Juan"),
("America/Argentina/San_Luis", "America/Argentina/San_Luis"),
("America/Argentina/Tucuman", "America/Argentina/Tucuman"),
("America/Argentina/Ushuaia", "America/Argentina/Ushuaia"),
("America/Aruba", "America/Aruba"),
("America/Asuncion", "America/Asuncion"),
("America/Atikokan", "America/Atikokan"),
("America/Bahia", "America/Bahia"),
("America/Bahia_Banderas", "America/Bahia_Banderas"),
("America/Barbados", "America/Barbados"),
("America/Belem", "America/Belem"),
("America/Belize", "America/Belize"),
("America/Blanc-Sablon", "America/Blanc-Sablon"),
("America/Boa_Vista", "America/Boa_Vista"),
("America/Bogota", "America/Bogota"),
("America/Boise", "America/Boise"),
("America/Cambridge_Bay", "America/Cambridge_Bay"),
("America/Campo_Grande", "America/Campo_Grande"),
("America/Cancun", "America/Cancun"),
("America/Caracas", "America/Caracas"),
("America/Cayenne", "America/Cayenne"),
("America/Cayman", "America/Cayman"),
("America/Chicago", "America/Chicago"),
("America/Chihuahua", "America/Chihuahua"),
("America/Costa_Rica", "America/Costa_Rica"),
("America/Creston", "America/Creston"),
("America/Cuiaba", "America/Cuiaba"),
("America/Curacao", "America/Curacao"),
("America/Danmarkshavn", "America/Danmarkshavn"),
("America/Dawson", "America/Dawson"),
("America/Dawson_Creek", "America/Dawson_Creek"),
("America/Denver", "America/Denver"),
("America/Detroit", "America/Detroit"),
("America/Dominica", "America/Dominica"),
("America/Edmonton", "America/Edmonton"),
("America/Eirunepe", "America/Eirunepe"),
("America/El_Salvador", "America/El_Salvador"),
("America/Fort_Nelson", "America/Fort_Nelson"),
("America/Fortaleza", "America/Fortaleza"),
("America/Glace_Bay", "America/Glace_Bay"),
("America/Goose_Bay", "America/Goose_Bay"),
("America/Grand_Turk", "America/Grand_Turk"),
("America/Grenada", "America/Grenada"),
("America/Guadeloupe", "America/Guadeloupe"),
("America/Guatemala", "America/Guatemala"),
("America/Guayaquil", "America/Guayaquil"),
("America/Guyana", "America/Guyana"),
("America/Halifax", "America/Halifax"),
("America/Havana", "America/Havana"),
("America/Hermosillo", "America/Hermosillo"),
("America/Indiana/Indianapolis", "America/Indiana/Indianapolis"),
("America/Indiana/Knox", "America/Indiana/Knox"),
("America/Indiana/Marengo", "America/Indiana/Marengo"),
("America/Indiana/Petersburg", "America/Indiana/Petersburg"),
("America/Indiana/Tell_City", "America/Indiana/Tell_City"),
("America/Indiana/Vevay", "America/Indiana/Vevay"),
("America/Indiana/Vincennes", "America/Indiana/Vincennes"),
("America/Indiana/Winamac", "America/Indiana/Winamac"),
("America/Inuvik", "America/Inuvik"),
("America/Iqaluit", "America/Iqaluit"),
("America/Jamaica", "America/Jamaica"),
("America/Juneau", "America/Juneau"),
("America/Kentucky/Louisville", "America/Kentucky/Louisville"),
("America/Kentucky/Monticello", "America/Kentucky/Monticello"),
("America/Kralendijk", "America/Kralendijk"),
("America/La_Paz", "America/La_Paz"),
("America/Lima", "America/Lima"),
("America/Los_Angeles", "America/Los_Angeles"),
("America/Lower_Princes", "America/Lower_Princes"),
("America/Maceio", "America/Maceio"),
("America/Managua", "America/Managua"),
("America/Manaus", "America/Manaus"),
("America/Marigot", "America/Marigot"),
("America/Martinique", "America/Martinique"),
("America/Matamoros", "America/Matamoros"),
("America/Mazatlan", "America/Mazatlan"),
("America/Menominee", "America/Menominee"),
("America/Merida", "America/Merida"),
("America/Metlakatla", "America/Metlakatla"),
("America/Mexico_City", "America/Mexico_City"),
("America/Miquelon", "America/Miquelon"),
("America/Moncton", "America/Moncton"),
("America/Monterrey", "America/Monterrey"),
("America/Montevideo", "America/Montevideo"),
("America/Montserrat", "America/Montserrat"),
("America/Nassau", "America/Nassau"),
("America/New_York", "America/New_York"),
("America/Nipigon", "America/Nipigon"),
("America/Nome", "America/Nome"),
("America/Noronha", "America/Noronha"),
("America/North_Dakota/Beulah", "America/North_Dakota/Beulah"),
("America/North_Dakota/Center", "America/North_Dakota/Center"),
(
"America/North_Dakota/New_Salem",
"America/North_Dakota/New_Salem",
),
("America/Nuuk", "America/Nuuk"),
("America/Ojinaga", "America/Ojinaga"),
("America/Panama", "America/Panama"),
("America/Pangnirtung", "America/Pangnirtung"),
("America/Paramaribo", "America/Paramaribo"),
("America/Phoenix", "America/Phoenix"),
("America/Port-au-Prince", "America/Port-au-Prince"),
("America/Port_of_Spain", "America/Port_of_Spain"),
("America/Porto_Velho", "America/Porto_Velho"),
("America/Puerto_Rico", "America/Puerto_Rico"),
("America/Punta_Arenas", "America/Punta_Arenas"),
("America/Rainy_River", "America/Rainy_River"),
("America/Rankin_Inlet", "America/Rankin_Inlet"),
("America/Recife", "America/Recife"),
("America/Regina", "America/Regina"),
("America/Resolute", "America/Resolute"),
("America/Rio_Branco", "America/Rio_Branco"),
("America/Santarem", "America/Santarem"),
("America/Santiago", "America/Santiago"),
("America/Santo_Domingo", "America/Santo_Domingo"),
("America/Sao_Paulo", "America/Sao_Paulo"),
("America/Scoresbysund", "America/Scoresbysund"),
("America/Sitka", "America/Sitka"),
("America/St_Barthelemy", "America/St_Barthelemy"),
("America/St_Johns", "America/St_Johns"),
("America/St_Kitts", "America/St_Kitts"),
("America/St_Lucia", "America/St_Lucia"),
("America/St_Thomas", "America/St_Thomas"),
("America/St_Vincent", "America/St_Vincent"),
("America/Swift_Current", "America/Swift_Current"),
("America/Tegucigalpa", "America/Tegucigalpa"),
("America/Thule", "America/Thule"),
("America/Thunder_Bay", "America/Thunder_Bay"),
("America/Tijuana", "America/Tijuana"),
("America/Toronto", "America/Toronto"),
("America/Tortola", "America/Tortola"),
("America/Vancouver", "America/Vancouver"),
("America/Whitehorse", "America/Whitehorse"),
("America/Winnipeg", "America/Winnipeg"),
("America/Yakutat", "America/Yakutat"),
("America/Yellowknife", "America/Yellowknife"),
("Antarctica/Casey", "Antarctica/Casey"),
("Antarctica/Davis", "Antarctica/Davis"),
("Antarctica/DumontDUrville", "Antarctica/DumontDUrville"),
("Antarctica/Macquarie", "Antarctica/Macquarie"),
("Antarctica/Mawson", "Antarctica/Mawson"),
("Antarctica/McMurdo", "Antarctica/McMurdo"),
("Antarctica/Palmer", "Antarctica/Palmer"),
("Antarctica/Rothera", "Antarctica/Rothera"),
("Antarctica/Syowa", "Antarctica/Syowa"),
("Antarctica/Troll", "Antarctica/Troll"),
("Antarctica/Vostok", "Antarctica/Vostok"),
("Arctic/Longyearbyen", "Arctic/Longyearbyen"),
("Asia/Aden", "Asia/Aden"),
("Asia/Almaty", "Asia/Almaty"),
("Asia/Amman", "Asia/Amman"),
("Asia/Anadyr", "Asia/Anadyr"),
("Asia/Aqtau", "Asia/Aqtau"),
("Asia/Aqtobe", "Asia/Aqtobe"),
("Asia/Ashgabat", "Asia/Ashgabat"),
("Asia/Atyrau", "Asia/Atyrau"),
("Asia/Baghdad", "Asia/Baghdad"),
("Asia/Bahrain", "Asia/Bahrain"),
("Asia/Baku", "Asia/Baku"),
("Asia/Bangkok", "Asia/Bangkok"),
("Asia/Barnaul", "Asia/Barnaul"),
("Asia/Beirut", "Asia/Beirut"),
("Asia/Bishkek", "Asia/Bishkek"),
("Asia/Brunei", "Asia/Brunei"),
("Asia/Chita", "Asia/Chita"),
("Asia/Choibalsan", "Asia/Choibalsan"),
("Asia/Colombo", "Asia/Colombo"),
("Asia/Damascus", "Asia/Damascus"),
("Asia/Dhaka", "Asia/Dhaka"),
("Asia/Dili", "Asia/Dili"),
("Asia/Dubai", "Asia/Dubai"),
("Asia/Dushanbe", "Asia/Dushanbe"),
("Asia/Famagusta", "Asia/Famagusta"),
("Asia/Gaza", "Asia/Gaza"),
("Asia/Hebron", "Asia/Hebron"),
("Asia/Ho_Chi_Minh", "Asia/Ho_Chi_Minh"),
("Asia/Hong_Kong", "Asia/Hong_Kong"),
("Asia/Hovd", "Asia/Hovd"),
("Asia/Irkutsk", "Asia/Irkutsk"),
("Asia/Jakarta", "Asia/Jakarta"),
("Asia/Jayapura", "Asia/Jayapura"),
("Asia/Jerusalem", "Asia/Jerusalem"),
("Asia/Kabul", "Asia/Kabul"),
("Asia/Kamchatka", "Asia/Kamchatka"),
("Asia/Karachi", "Asia/Karachi"),
("Asia/Kathmandu", "Asia/Kathmandu"),
("Asia/Khandyga", "Asia/Khandyga"),
("Asia/Kolkata", "Asia/Kolkata"),
("Asia/Krasnoyarsk", "Asia/Krasnoyarsk"),
("Asia/Kuala_Lumpur", "Asia/Kuala_Lumpur"),
("Asia/Kuching", "Asia/Kuching"),
("Asia/Kuwait", "Asia/Kuwait"),
("Asia/Macau", "Asia/Macau"),
("Asia/Magadan", "Asia/Magadan"),
("Asia/Makassar", "Asia/Makassar"),
("Asia/Manila", "Asia/Manila"),
("Asia/Muscat", "Asia/Muscat"),
("Asia/Nicosia", "Asia/Nicosia"),
("Asia/Novokuznetsk", "Asia/Novokuznetsk"),
("Asia/Novosibirsk", "Asia/Novosibirsk"),
("Asia/Omsk", "Asia/Omsk"),
("Asia/Oral", "Asia/Oral"),
("Asia/Phnom_Penh", "Asia/Phnom_Penh"),
("Asia/Pontianak", "Asia/Pontianak"),
("Asia/Pyongyang", "Asia/Pyongyang"),
("Asia/Qatar", "Asia/Qatar"),
("Asia/Qostanay", "Asia/Qostanay"),
("Asia/Qyzylorda", "Asia/Qyzylorda"),
("Asia/Riyadh", "Asia/Riyadh"),
("Asia/Sakhalin", "Asia/Sakhalin"),
("Asia/Samarkand", "Asia/Samarkand"),
("Asia/Seoul", "Asia/Seoul"),
("Asia/Shanghai", "Asia/Shanghai"),
("Asia/Singapore", "Asia/Singapore"),
("Asia/Srednekolymsk", "Asia/Srednekolymsk"),
("Asia/Taipei", "Asia/Taipei"),
("Asia/Tashkent", "Asia/Tashkent"),
("Asia/Tbilisi", "Asia/Tbilisi"),
("Asia/Tehran", "Asia/Tehran"),
("Asia/Thimphu", "Asia/Thimphu"),
("Asia/Tokyo", "Asia/Tokyo"),
("Asia/Tomsk", "Asia/Tomsk"),
("Asia/Ulaanbaatar", "Asia/Ulaanbaatar"),
("Asia/Urumqi", "Asia/Urumqi"),
("Asia/Ust-Nera", "Asia/Ust-Nera"),
("Asia/Vientiane", "Asia/Vientiane"),
("Asia/Vladivostok", "Asia/Vladivostok"),
("Asia/Yakutsk", "Asia/Yakutsk"),
("Asia/Yangon", "Asia/Yangon"),
("Asia/Yekaterinburg", "Asia/Yekaterinburg"),
("Asia/Yerevan", "Asia/Yerevan"),
("Atlantic/Azores", "Atlantic/Azores"),
("Atlantic/Bermuda", "Atlantic/Bermuda"),
("Atlantic/Canary", "Atlantic/Canary"),
("Atlantic/Cape_Verde", "Atlantic/Cape_Verde"),
("Atlantic/Faroe", "Atlantic/Faroe"),
("Atlantic/Madeira", "Atlantic/Madeira"),
("Atlantic/Reykjavik", "Atlantic/Reykjavik"),
("Atlantic/South_Georgia", "Atlantic/South_Georgia"),
("Atlantic/St_Helena", "Atlantic/St_Helena"),
("Atlantic/Stanley", "Atlantic/Stanley"),
("Australia/Adelaide", "Australia/Adelaide"),
("Australia/Brisbane", "Australia/Brisbane"),
("Australia/Broken_Hill", "Australia/Broken_Hill"),
("Australia/Darwin", "Australia/Darwin"),
("Australia/Eucla", "Australia/Eucla"),
("Australia/Hobart", "Australia/Hobart"),
("Australia/Lindeman", "Australia/Lindeman"),
("Australia/Lord_Howe", "Australia/Lord_Howe"),
("Australia/Melbourne", "Australia/Melbourne"),
("Australia/Perth", "Australia/Perth"),
("Australia/Sydney", "Australia/Sydney"),
("Canada/Atlantic", "Canada/Atlantic"),
("Canada/Central", "Canada/Central"),
("Canada/Eastern", "Canada/Eastern"),
("Canada/Mountain", "Canada/Mountain"),
("Canada/Newfoundland", "Canada/Newfoundland"),
("Canada/Pacific", "Canada/Pacific"),
("Europe/Amsterdam", "Europe/Amsterdam"),
("Europe/Andorra", "Europe/Andorra"),
("Europe/Astrakhan", "Europe/Astrakhan"),
("Europe/Athens", "Europe/Athens"),
("Europe/Belgrade", "Europe/Belgrade"),
("Europe/Berlin", "Europe/Berlin"),
("Europe/Bratislava", "Europe/Bratislava"),
("Europe/Brussels", "Europe/Brussels"),
("Europe/Bucharest", "Europe/Bucharest"),
("Europe/Budapest", "Europe/Budapest"),
("Europe/Busingen", "Europe/Busingen"),
("Europe/Chisinau", "Europe/Chisinau"),
("Europe/Copenhagen", "Europe/Copenhagen"),
("Europe/Dublin", "Europe/Dublin"),
("Europe/Gibraltar", "Europe/Gibraltar"),
("Europe/Guernsey", "Europe/Guernsey"),
("Europe/Helsinki", "Europe/Helsinki"),
("Europe/Isle_of_Man", "Europe/Isle_of_Man"),
("Europe/Istanbul", "Europe/Istanbul"),
("Europe/Jersey", "Europe/Jersey"),
("Europe/Kaliningrad", "Europe/Kaliningrad"),
("Europe/Kiev", "Europe/Kiev"),
("Europe/Kirov", "Europe/Kirov"),
("Europe/Lisbon", "Europe/Lisbon"),
("Europe/Ljubljana", "Europe/Ljubljana"),
("Europe/London", "Europe/London"),
("Europe/Luxembourg", "Europe/Luxembourg"),
("Europe/Madrid", "Europe/Madrid"),
("Europe/Malta", "Europe/Malta"),
("Europe/Mariehamn", "Europe/Mariehamn"),
("Europe/Minsk", "Europe/Minsk"),
("Europe/Monaco", "Europe/Monaco"),
("Europe/Moscow", "Europe/Moscow"),
("Europe/Oslo", "Europe/Oslo"),
("Europe/Paris", "Europe/Paris"),
("Europe/Podgorica", "Europe/Podgorica"),
("Europe/Prague", "Europe/Prague"),
("Europe/Riga", "Europe/Riga"),
("Europe/Rome", "Europe/Rome"),
("Europe/Samara", "Europe/Samara"),
("Europe/San_Marino", "Europe/San_Marino"),
("Europe/Sarajevo", "Europe/Sarajevo"),
("Europe/Saratov", "Europe/Saratov"),
("Europe/Simferopol", "Europe/Simferopol"),
("Europe/Skopje", "Europe/Skopje"),
("Europe/Sofia", "Europe/Sofia"),
("Europe/Stockholm", "Europe/Stockholm"),
("Europe/Tallinn", "Europe/Tallinn"),
("Europe/Tirane", "Europe/Tirane"),
("Europe/Ulyanovsk", "Europe/Ulyanovsk"),
("Europe/Uzhgorod", "Europe/Uzhgorod"),
("Europe/Vaduz", "Europe/Vaduz"),
("Europe/Vatican", "Europe/Vatican"),
("Europe/Vienna", "Europe/Vienna"),
("Europe/Vilnius", "Europe/Vilnius"),
("Europe/Volgograd", "Europe/Volgograd"),
("Europe/Warsaw", "Europe/Warsaw"),
("Europe/Zagreb", "Europe/Zagreb"),
("Europe/Zaporozhye", "Europe/Zaporozhye"),
("Europe/Zurich", "Europe/Zurich"),
("GMT", "GMT"),
("Indian/Antananarivo", "Indian/Antananarivo"),
("Indian/Chagos", "Indian/Chagos"),
("Indian/Christmas", "Indian/Christmas"),
("Indian/Cocos", "Indian/Cocos"),
("Indian/Comoro", "Indian/Comoro"),
("Indian/Kerguelen", "Indian/Kerguelen"),
("Indian/Mahe", "Indian/Mahe"),
("Indian/Maldives", "Indian/Maldives"),
("Indian/Mauritius", "Indian/Mauritius"),
("Indian/Mayotte", "Indian/Mayotte"),
("Indian/Reunion", "Indian/Reunion"),
("Pacific/Apia", "Pacific/Apia"),
("Pacific/Auckland", "Pacific/Auckland"),
("Pacific/Bougainville", "Pacific/Bougainville"),
("Pacific/Chatham", "Pacific/Chatham"),
("Pacific/Chuuk", "Pacific/Chuuk"),
("Pacific/Easter", "Pacific/Easter"),
("Pacific/Efate", "Pacific/Efate"),
("Pacific/Enderbury", "Pacific/Enderbury"),
("Pacific/Fakaofo", "Pacific/Fakaofo"),
("Pacific/Fiji", "Pacific/Fiji"),
("Pacific/Funafuti", "Pacific/Funafuti"),
("Pacific/Galapagos", "Pacific/Galapagos"),
("Pacific/Gambier", "Pacific/Gambier"),
("Pacific/Guadalcanal", "Pacific/Guadalcanal"),
("Pacific/Guam", "Pacific/Guam"),
("Pacific/Honolulu", "Pacific/Honolulu"),
("Pacific/Kiritimati", "Pacific/Kiritimati"),
("Pacific/Kosrae", "Pacific/Kosrae"),
("Pacific/Kwajalein", "Pacific/Kwajalein"),
("Pacific/Majuro", "Pacific/Majuro"),
("Pacific/Marquesas", "Pacific/Marquesas"),
("Pacific/Midway", "Pacific/Midway"),
("Pacific/Nauru", "Pacific/Nauru"),
("Pacific/Niue", "Pacific/Niue"),
("Pacific/Norfolk", "Pacific/Norfolk"),
("Pacific/Noumea", "Pacific/Noumea"),
("Pacific/Pago_Pago", "Pacific/Pago_Pago"),
("Pacific/Palau", "Pacific/Palau"),
("Pacific/Pitcairn", "Pacific/Pitcairn"),
("Pacific/Pohnpei", "Pacific/Pohnpei"),
("Pacific/Port_Moresby", "Pacific/Port_Moresby"),
("Pacific/Rarotonga", "Pacific/Rarotonga"),
("Pacific/Saipan", "Pacific/Saipan"),
("Pacific/Tahiti", "Pacific/Tahiti"),
("Pacific/Tarawa", "Pacific/Tarawa"),
("Pacific/Tongatapu", "Pacific/Tongatapu"),
("Pacific/Wake", "Pacific/Wake"),
("Pacific/Wallis", "Pacific/Wallis"),
("US/Alaska", "US/Alaska"),
("US/Arizona", "US/Arizona"),
("US/Central", "US/Central"),
("US/Eastern", "US/Eastern"),
("US/Hawaii", "US/Hawaii"),
("US/Mountain", "US/Mountain"),
("US/Pacific", "US/Pacific"),
("UTC", "UTC"),
],
default="UTC",
max_length=100,
verbose_name="Timezone",
),
),
]
|
extractor | tiktok | # coding: utf-8
from __future__ import unicode_literals
from ..utils import (
ExtractorError,
compat_str,
float_or_none,
int_or_none,
str_or_none,
try_get,
url_or_none,
)
from .common import InfoExtractor
class TikTokBaseIE(InfoExtractor):
def _extract_video(self, data, video_id=None):
video = data["video"]
description = str_or_none(try_get(data, lambda x: x["desc"]))
width = int_or_none(try_get(data, lambda x: video["width"]))
height = int_or_none(try_get(data, lambda x: video["height"]))
format_urls = set()
formats = []
for format_id in ("download", "play"):
format_url = url_or_none(video.get("%sAddr" % format_id))
if not format_url:
continue
if format_url in format_urls:
continue
format_urls.add(format_url)
formats.append(
{
"url": format_url,
"ext": "mp4",
"height": height,
"width": width,
"http_headers": {
"Referer": "https://www.tiktok.com/",
},
}
)
self._sort_formats(formats)
thumbnail = url_or_none(video.get("cover"))
duration = float_or_none(video.get("duration"))
uploader = try_get(data, lambda x: x["author"]["nickname"], compat_str)
uploader_id = try_get(data, lambda x: x["author"]["id"], compat_str)
timestamp = int_or_none(data.get("createTime"))
def stats(key):
return int_or_none(try_get(data, lambda x: x["stats"]["%sCount" % key]))
view_count = stats("play")
like_count = stats("digg")
comment_count = stats("comment")
repost_count = stats("share")
aweme_id = data.get("id") or video_id
return {
"id": aweme_id,
"title": uploader or aweme_id,
"description": description,
"thumbnail": thumbnail,
"duration": duration,
"uploader": uploader,
"uploader_id": uploader_id,
"timestamp": timestamp,
"view_count": view_count,
"like_count": like_count,
"comment_count": comment_count,
"repost_count": repost_count,
"formats": formats,
}
class TikTokIE(TikTokBaseIE):
_VALID_URL = r"https?://(?:www\.)?tiktok\.com/@[^/]+/video/(?P<id>\d+)"
_TESTS = [
{
"url": "https://www.tiktok.com/@zureeal/video/6606727368545406213",
"md5": "163ceff303bb52de60e6887fe399e6cd",
"info_dict": {
"id": "6606727368545406213",
"ext": "mp4",
"title": "Zureeal",
"description": "#bowsette#mario#cosplay#uk#lgbt#gaming#asian#bowsettecosplay",
"thumbnail": r"re:^https?://.*",
"duration": 15,
"uploader": "Zureeal",
"uploader_id": "188294915489964032",
"timestamp": 1538248586,
"upload_date": "20180929",
"view_count": int,
"like_count": int,
"comment_count": int,
"repost_count": int,
},
}
]
def _real_initialize(self):
# Setup session (will set necessary cookies)
self._request_webpage(
"https://www.tiktok.com/", None, note="Setting up session"
)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
page_props = self._parse_json(
self._search_regex(
r'<script[^>]+\bid=["\']__NEXT_DATA__[^>]+>\s*({.+?})\s*</script',
webpage,
"data",
),
video_id,
)["props"]["pageProps"]
data = try_get(page_props, lambda x: x["itemInfo"]["itemStruct"], dict)
if not data and page_props.get("statusCode") == 10216:
raise ExtractorError("This video is private", expected=True)
return self._extract_video(data, video_id)
class TikTokUserIE(TikTokBaseIE):
_VALID_URL = r"https://(?:www\.)?tiktok\.com/@(?P<id>[^/?#&]+)"
_TESTS = [
{
"url": "https://www.tiktok.com/@zureeal",
"info_dict": {
"id": "188294915489964032",
},
"playlist_mincount": 24,
}
]
_WORKING = False
@classmethod
def suitable(cls, url):
return (
False if TikTokIE.suitable(url) else super(TikTokUserIE, cls).suitable(url)
)
def _real_extract(self, url):
user_id = self._match_id(url)
data = self._download_json(
"https://m.tiktok.com/h5/share/usr/list/%s/" % user_id,
user_id,
query={"_signature": "_"},
)
entries = []
for aweme in data["aweme_list"]:
try:
entry = self._extract_video(aweme)
except ExtractorError:
continue
entry["extractor_key"] = TikTokIE.ie_key()
entries.append(entry)
return self.playlist_result(entries, user_id)
|
extractor | watchbox | # coding: utf-8
from __future__ import unicode_literals
import re
from ..compat import compat_str
from ..utils import (
int_or_none,
js_to_json,
strip_or_none,
try_get,
unescapeHTML,
unified_timestamp,
)
from .common import InfoExtractor
class WatchBoxIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?watchbox\.de/(?P<kind>serien|filme)/(?:[^/]+/)*[^/]+-(?P<id>\d+)"
_TESTS = [
{
# film
"url": "https://www.watchbox.de/filme/free-jimmy-12325.html",
"info_dict": {
"id": "341368",
"ext": "mp4",
"title": "Free Jimmy",
"description": "md5:bcd8bafbbf9dc0ef98063d344d7cc5f6",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 4890,
"age_limit": 16,
"release_year": 2009,
},
"params": {
"format": "bestvideo",
"skip_download": True,
},
"expected_warnings": ["Failed to download m3u8 information"],
},
{
# episode
"url": "https://www.watchbox.de/serien/ugly-americans-12231/staffel-1/date-in-der-hoelle-328286.html",
"info_dict": {
"id": "328286",
"ext": "mp4",
"title": "S01 E01 - Date in der Hölle",
"description": "md5:2f31c74a8186899f33cb5114491dae2b",
"thumbnail": r"re:^https?://.*\.jpg$",
"duration": 1291,
"age_limit": 12,
"release_year": 2010,
"series": "Ugly Americans",
"season_number": 1,
"episode": "Date in der Hölle",
"episode_number": 1,
},
"params": {
"format": "bestvideo",
"skip_download": True,
},
"expected_warnings": ["Failed to download m3u8 information"],
},
{
"url": "https://www.watchbox.de/serien/ugly-americans-12231/staffel-2/der-ring-des-powers-328270",
"only_matching": True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
kind, video_id = mobj.group("kind", "id")
webpage = self._download_webpage(url, video_id)
player_config = self._parse_json(
self._search_regex(
r'data-player-conf=(["\'])(?P<data>{.+?})\1',
webpage,
"player config",
default="{}",
group="data",
),
video_id,
transform_source=unescapeHTML,
fatal=False,
)
if not player_config:
player_config = (
self._parse_json(
self._search_regex(
r"playerConf\s*=\s*({.+?})\s*;",
webpage,
"player config",
default="{}",
),
video_id,
transform_source=js_to_json,
fatal=False,
)
or {}
)
source = player_config.get("source") or {}
video_id = compat_str(source.get("videoId") or video_id)
devapi = self._download_json(
"http://api.watchbox.de/devapi/id/%s" % video_id,
video_id,
query={
"format": "json",
"apikey": "hbbtv",
},
fatal=False,
)
item = try_get(devapi, lambda x: x["items"][0], dict) or {}
title = (
item.get("title")
or try_get(item, lambda x: x["movie"]["headline_movie"], compat_str)
or source["title"]
)
formats = []
hls_url = item.get("media_videourl_hls") or source.get("hls")
if hls_url:
formats.extend(
self._extract_m3u8_formats(
hls_url,
video_id,
"mp4",
entry_protocol="m3u8_native",
m3u8_id="hls",
fatal=False,
)
)
dash_url = item.get("media_videourl_wv") or source.get("dash")
if dash_url:
formats.extend(
self._extract_mpd_formats(
dash_url, video_id, mpd_id="dash", fatal=False
)
)
mp4_url = item.get("media_videourl")
if mp4_url:
formats.append(
{
"url": mp4_url,
"format_id": "mp4",
"width": int_or_none(item.get("width")),
"height": int_or_none(item.get("height")),
"tbr": int_or_none(item.get("bitrate")),
}
)
self._sort_formats(formats)
description = strip_or_none(item.get("descr"))
thumbnail = (
item.get("media_content_thumbnail_large")
or source.get("poster")
or item.get("media_thumbnail")
)
duration = int_or_none(item.get("media_length") or source.get("length"))
timestamp = unified_timestamp(item.get("pubDate"))
view_count = int_or_none(item.get("media_views"))
age_limit = int_or_none(try_get(item, lambda x: x["movie"]["fsk"]))
release_year = int_or_none(try_get(item, lambda x: x["movie"]["rel_year"]))
info = {
"id": video_id,
"title": title,
"description": description,
"thumbnail": thumbnail,
"duration": duration,
"timestamp": timestamp,
"view_count": view_count,
"age_limit": age_limit,
"release_year": release_year,
"formats": formats,
}
if kind.lower() == "serien":
series = try_get(
item, lambda x: x["special"]["title"], compat_str
) or source.get("format")
season_number = int_or_none(
self._search_regex(
r"^S(\d{1,2})\s*E\d{1,2}", title, "season number", default=None
)
or self._search_regex(
r"/staffel-(\d+)/", url, "season number", default=None
)
)
episode = source.get("title")
episode_number = int_or_none(
self._search_regex(
r"^S\d{1,2}\s*E(\d{1,2})", title, "episode number", default=None
)
)
info.update(
{
"series": series,
"season_number": season_number,
"episode": episode,
"episode_number": episode_number,
}
)
return info
|
migrations | 0078_auto_20200731_1323 | # Generated by Django 3.0.7 on 2020-07-31 13:23
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0077_cohortpeople_id_to_bigautofield"),
]
operations = [
migrations.AddField(
model_name="dashboarditem",
name="created_at",
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name="dashboarditem",
name="created_by",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AddField(
model_name="dashboarditem",
name="saved",
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name="dashboarditem",
name="dashboard",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="items",
to="posthog.Dashboard",
),
),
]
|
qltk | exfalsowindow | # Copyright 2004-2005 Joe Wreschnig, Michael Urman, Iñigo Serna
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
from gi.repository import GObject, Gtk, Pango
from quodlibet import _, app, config, formats, ngettext, qltk
from quodlibet.formats import AudioFileError
from quodlibet.plugins import PluginManager
from quodlibet.qltk import Icons
from quodlibet.qltk.about import AboutDialog
from quodlibet.qltk.appwindow import AppWindow
from quodlibet.qltk.delete import TrashMenuItem, trash_files
from quodlibet.qltk.edittags import EditTags
from quodlibet.qltk.filesel import MainFileSelector
from quodlibet.qltk.menubutton import MenuButton
from quodlibet.qltk.msg import CancelRevertSave
from quodlibet.qltk.pluginwin import PluginWindow
from quodlibet.qltk.prefs import PreferencesWindow as QLPreferencesWindow
from quodlibet.qltk.renamefiles import RenameFiles
from quodlibet.qltk.songsmenu import SongsMenuPluginHandler
from quodlibet.qltk.tagsfrompath import TagsFromPath
from quodlibet.qltk.tracknumbers import TrackNumbers
from quodlibet.qltk.window import PersistentWindowMixin, Window
from quodlibet.qltk.x import (
Align,
ConfigRHPaned,
MenuItem,
SeparatorMenuItem,
SymbolicIconImage,
)
from quodlibet.update import UpdateDialog
from quodlibet.util import connect_destroy, connect_obj, format_int_locale
from quodlibet.util.i18n import numeric_phrase
from quodlibet.util.path import mtime, normalize_path
from senf import fsnative
class ExFalsoWindow(Window, PersistentWindowMixin, AppWindow):
__gsignals__ = {
"changed": (GObject.SignalFlags.RUN_LAST, None, (object,)),
}
pm = SongsMenuPluginHandler()
@classmethod
def init_plugins(cls):
PluginManager.instance.register_handler(cls.pm)
def __init__(self, library, dir=None):
super().__init__(dialog=False)
self.set_title("Ex Falso")
self.set_default_size(750, 475)
self.enable_window_tracking("exfalso")
self.__library = library
hp = ConfigRHPaned("memory", "exfalso_paned_position", 1.0)
hp.set_border_width(0)
hp.set_position(250)
hp.show()
self.add(hp)
vb = Gtk.VBox()
bbox = Gtk.HBox(spacing=6)
def prefs_cb(*args):
window = PreferencesWindow(self)
window.show()
def plugin_window_cb(*args):
window = PluginWindow(self)
window.show()
def about_cb(*args):
about = AboutDialog(self, app)
about.run()
about.destroy()
def update_cb(*args):
d = UpdateDialog(self)
d.run()
d.destroy()
menu = Gtk.Menu()
about_item = MenuItem(_("_About"), Icons.HELP_ABOUT)
about_item.connect("activate", about_cb)
menu.append(about_item)
check_item = MenuItem(_("_Check for Updates…"), Icons.NETWORK_SERVER)
check_item.connect("activate", update_cb)
menu.append(check_item)
menu.append(SeparatorMenuItem())
plugin_item = MenuItem(_("_Plugins"), Icons.SYSTEM_RUN)
plugin_item.connect("activate", plugin_window_cb)
menu.append(plugin_item)
pref_item = MenuItem(_("_Preferences"), Icons.PREFERENCES_SYSTEM)
pref_item.connect("activate", prefs_cb)
menu.append(pref_item)
menu.show_all()
menu_button = MenuButton(
SymbolicIconImage(Icons.EMBLEM_SYSTEM, Gtk.IconSize.BUTTON),
arrow=True,
down=False,
)
menu_button.set_menu(menu)
bbox.pack_start(menu_button, False, True, 0)
l = Gtk.Label()
l.set_alignment(1.0, 0.5)
l.set_ellipsize(Pango.EllipsizeMode.END)
bbox.pack_start(l, True, True, 0)
self._fs = fs = MainFileSelector()
vb.pack_start(fs, True, True, 0)
vb.pack_start(Align(bbox, border=6), False, True, 0)
vb.show_all()
hp.pack1(vb, resize=True, shrink=False)
nb = qltk.Notebook()
nb.props.scrollable = True
nb.show()
for Page in [EditTags, TagsFromPath, RenameFiles, TrackNumbers]:
page = Page(self, self.__library)
page.show()
nb.append_page(page)
hp.pack2(nb, resize=True, shrink=False)
fs.connect("changed", self.__changed, l)
if dir:
fs.go_to(dir)
connect_destroy(self.__library, "changed", self.__library_changed, fs)
self.__save = None
connect_obj(self, "changed", self.set_pending, None)
for c in fs.get_children():
c.get_child().connect(
"button-press-event", self.__pre_selection_changed, fs, nb
)
c.get_child().connect("focus", self.__pre_selection_changed, fs, nb)
fs.get_children()[1].get_child().connect("popup-menu", self.__popup_menu, fs)
self.emit("changed", [])
self.get_child().show()
self.__ag = Gtk.AccelGroup()
key, mod = Gtk.accelerator_parse("<Primary>Q")
self.__ag.connect(key, mod, 0, lambda *x: self.destroy())
self.add_accel_group(self.__ag)
# GtkosxApplication assumes the menu bar is mapped, so add
# it but don't show it.
self._dummy_osx_menu_bar = Gtk.MenuBar()
vb.pack_start(self._dummy_osx_menu_bar, False, False, 0)
def __library_changed(self, library, songs, fs):
fs.rescan()
def set_as_osx_window(self, osx_app):
osx_app.set_menu_bar(self._dummy_osx_menu_bar)
def get_is_persistent(self):
return False
def open_file(self, filename):
assert isinstance(filename, fsnative)
if not os.path.isdir(filename):
return False
self._fs.go_to(filename)
def set_pending(self, button, *excess):
self.__save = button
def __pre_selection_changed(self, view, event, fs, nb):
if self.__save:
resp = CancelRevertSave(self).run()
if resp == Gtk.ResponseType.YES:
self.__save.clicked()
elif resp == Gtk.ResponseType.NO:
fs.rescan()
else:
nb.grab_focus()
return True # cancel or closed
def __popup_menu(self, view, fs):
# get all songs for the selection
filenames = [
normalize_path(f, canonicalise=True) for f in fs.get_selected_paths()
]
maybe_songs = [self.__library.get(f) for f in filenames]
songs = [s for s in maybe_songs if s]
if songs:
menu = self.pm.Menu(self.__library, songs)
if menu is None:
menu = Gtk.Menu()
else:
menu.prepend(SeparatorMenuItem())
else:
menu = Gtk.Menu()
b = TrashMenuItem()
b.connect("activate", self.__delete, filenames, fs)
menu.prepend(b)
def selection_done_cb(menu):
menu.destroy()
menu.connect("selection-done", selection_done_cb)
menu.show_all()
return view.popup_menu(menu, 0, Gtk.get_current_event_time())
def __delete(self, item, paths, fs):
trash_files(self, paths)
fs.rescan()
def __changed(self, selector, selection, label):
model, rows = selection.get_selected_rows()
files = []
if len(rows) < 2:
count = len(model or [])
else:
count = len(rows)
label.set_text(numeric_phrase("%d song", "%d songs", count))
for row in rows:
filename = model[row][0]
if not os.path.exists(filename):
pass
elif filename in self.__library:
song = self.__library[filename]
if song("~#mtime") + 1.0 < mtime(filename):
try:
song.reload()
except AudioFileError:
pass
files.append(song)
else:
files.append(formats.MusicFile(filename))
files = list(filter(None, files))
if len(files) == 0:
self.set_title("Ex Falso")
elif len(files) == 1:
self.set_title("%s - Ex Falso" % files[0].comma("title"))
else:
params = {
"title": files[0].comma("title"),
"count": format_int_locale(len(files) - 1),
}
self.set_title(
"%s - Ex Falso"
% (
ngettext(
"%(title)s and %(count)s more",
"%(title)s and %(count)s more",
len(files) - 1,
)
% params
)
)
self.__library.add(files)
self.emit("changed", files)
class PreferencesWindow(QLPreferencesWindow):
def __init__(self, parent):
if self.is_not_unique():
return
super().__init__(parent, all_pages=False)
# Seems nicer when there's only one page
self.set_resizable(True)
self.set_title(_("Ex Falso Preferences"))
self.get_child().show_all()
def __destroy(self):
config.save()
|
storage | storage | """Load and save Gaphor models to Gaphors own XML format.
Three functions are exported: `load(file_obj)`loads a model from a
file. `save(file_obj)` stores the current model in a file.
"""
__all__ = ["load", "save"]
import io
import logging
from functools import partial
from typing import Callable, Iterable
from gaphor import application
from gaphor.core.modeling import Diagram, Element, ElementFactory, Presentation
from gaphor.core.modeling.collection import collection
from gaphor.core.modeling.modelinglanguage import ModelingLanguage
from gaphor.core.modeling.stylesheet import StyleSheet
from gaphor.storage.parser import GaphorLoader, element, parse_generator
from gaphor.storage.xmlwriter import XMLWriter
FILE_FORMAT_VERSION = "3.0"
NAMESPACE_MODEL = "http://gaphor.sourceforge.net/model"
log = logging.getLogger(__name__)
def save(out=None, element_factory=None, status_queue=None):
for status in save_generator(out, element_factory):
if status_queue:
status_queue(status)
def save_generator(out, element_factory):
"""Save the current model using @writer, which is a
gaphor.storage.xmlwriter.XMLWriter instance."""
writer = XMLWriter(out)
writer.startDocument()
writer.startPrefixMapping("", NAMESPACE_MODEL)
writer.startElementNS(
(NAMESPACE_MODEL, "gaphor"),
None,
{
(NAMESPACE_MODEL, "version"): FILE_FORMAT_VERSION,
(NAMESPACE_MODEL, "gaphor-version"): application.distribution().version,
},
)
size = element_factory.size()
for n, e in enumerate(element_factory.values(), start=1):
clazz = e.__class__.__name__
assert e.id
writer.startElement(clazz, {"id": str(e.id)})
e.save(partial(save_element, element_factory=element_factory, writer=writer))
writer.endElement(clazz)
if n % 25 == 0:
yield (n * 100) / size
writer.endElementNS((NAMESPACE_MODEL, "gaphor"), None)
writer.endPrefixMapping("")
writer.endDocument()
def save_element(name, value, element_factory, writer):
"""Save attributes and references from items in the gaphor.UML module.
A value may be a primitive (string, int), a
gaphor.core.modeling.collection (which contains a list of references
to other UML elements) or a Diagram (which contains diagram items).
"""
def resolvable(value):
if value.id and value in element_factory:
return True
log.warning(
f"Model has unknown reference {value.id}. Reference will be skipped."
)
return False
def save_reference(name, value):
"""Save a value as a reference to another element in the model.
This applies to both UML and canvas items.
"""
if resolvable(value):
writer.startElement(name, {})
writer.startElement("ref", {"refid": value.id})
writer.endElement("ref")
writer.endElement(name)
def save_collection(name, value):
"""Save a list of references."""
if value:
writer.startElement(name, {})
writer.startElement("reflist", {})
for v in value:
if resolvable(v):
writer.startElement("ref", {"refid": v.id})
writer.endElement("ref")
writer.endElement("reflist")
writer.endElement(name)
def save_value(name, value):
"""Save a value (attribute)."""
if value is not None:
writer.startElement(name, {})
writer.startElement("val", {})
if isinstance(value, bool):
# Write booleans as 0/1.
writer.characters(str(int(value)))
else:
writer.characters(str(value))
writer.endElement("val")
writer.endElement(name)
if isinstance(value, Element):
save_reference(name, value)
elif isinstance(value, collection):
save_collection(name, value)
else:
save_value(name, value)
def load_elements(elements, element_factory, modeling_language, gaphor_version="1.0.0"):
for _ in load_elements_generator(
elements, element_factory, modeling_language, gaphor_version
):
pass
def load_elements_generator(
elements: dict[str, element],
element_factory: ElementFactory,
modeling_language: ModelingLanguage,
gaphor_version: str,
) -> Iterable[float]:
"""Load a file and create a model if possible.
Exceptions: IOError, ValueError.
"""
log.debug(f"Loading {len(elements)} elements")
# The elements are iterated three times:
size = len(elements) * 3
progress = 0
def update_status_queue() -> Iterable[float]:
nonlocal progress, size
progress += 1
if progress % 30 == 0:
yield (progress * 100) / size
# First create elements and canvas items in the factory
# The elements are stored as attribute 'element' on the parser objects:
yield from _load_elements_and_canvasitems(
elements,
element_factory,
modeling_language,
gaphor_version,
update_status_queue,
)
yield from _load_attributes_and_references(elements, update_status_queue)
upgrade_ensure_style_sheet_is_present(element_factory)
for _id, elem in list(elements.items()):
yield from update_status_queue()
assert elem.element
elem.element.postload()
def _load_elements_and_canvasitems(
elements: dict[str, element],
element_factory: ElementFactory,
modeling_language: ModelingLanguage,
gaphor_version: str,
update_status_queue: Callable[[], Iterable[float]],
):
def create_element(elem):
if elem.element:
return
if version_lower_than(gaphor_version, (2, 1, 0)):
elem = upgrade_element_owned_comment_to_comment(elem)
if version_lower_than(gaphor_version, (2, 3, 0)):
elem = upgrade_package_owned_classifier_to_owned_type(elem)
elem = upgrade_implementation_to_interface_realization(elem)
elem = upgrade_feature_parameters_to_owned_parameter(elem)
elem = upgrade_parameter_owner_formal_param(elem)
if version_lower_than(gaphor_version, (2, 5, 0)):
elem = upgrade_diagram_element(elem)
if version_lower_than(gaphor_version, (2, 6, 0)):
elem = upgrade_generalization_arrow_direction(elem)
if version_lower_than(gaphor_version, (2, 9, 0)):
elem = upgrade_flow_item_to_control_flow_item(elem, elements)
if version_lower_than(gaphor_version, (2, 19, 0)):
elem = upgrade_delete_property_information_flow(elem)
elem = upgrade_decision_node_item_show_type(elem)
if version_lower_than(gaphor_version, (2, 20, 0)):
elem = upgrade_note_on_model_element_only(elem, elements)
if not (cls := modeling_language.lookup_element(elem.type)):
raise UnknownModelElementError(
f"Type {elem.type} cannot be loaded: no such element"
)
if issubclass(cls, Presentation):
if "diagram" not in elem.references:
log.warning(
"Removing element %s of type %s without diagram", elem.id, cls
)
assert all(elem.id not in e.references for e in elements.values())
del elements[elem.id]
return
diagram_id = elem.references["diagram"]
diagram_elem = elements[diagram_id]
create_element(diagram_elem)
assert isinstance(diagram_elem.element, Diagram)
elem.element = element_factory.create_as(cls, elem.id, diagram_elem.element)
else:
elem.element = element_factory.create_as(cls, elem.id)
for _id, elem in list(elements.items()):
yield from update_status_queue()
create_element(elem)
def _load_attributes_and_references(elements, update_status_queue):
for _id, elem in list(elements.items()):
yield from update_status_queue()
# Ensure that all elements have their element instance ready...
assert elem.element
# load attributes and references:
for name, value in list(elem.values.items()):
try:
elem.element.load(name, value)
except AttributeError:
log.exception(f"Invalid attribute name {elem.type}.{name}")
for name, refids in list(elem.references.items()):
if isinstance(refids, list):
for refid in refids:
try:
ref = elements[refid]
except KeyError:
log.exception(
f"Invalid ID for reference ({refid}) for element {elem.type}.{name}"
)
else:
elem.element.load(name, ref.element)
else:
try:
ref = elements[refids]
except KeyError:
log.exception(f"Invalid ID for reference ({refids})")
else:
elem.element.load(name, ref.element)
def load(
file_obj: io.TextIOBase, element_factory, modeling_language, status_queue=None
):
"""Load a file and create a model if possible.
Optionally, a status queue function can be given, to which the
progress is written (as status_queue(progress)).
"""
for status in load_generator(file_obj, element_factory, modeling_language):
if status_queue:
status_queue(status)
def load_generator(
file_obj: io.TextIOBase,
element_factory: ElementFactory,
modeling_language: ModelingLanguage,
) -> Iterable[int]:
"""Load a file and create a model if possible.
This function is a generator. It will yield values from 0 to 100 (%)
to indicate its progression.
"""
assert isinstance(file_obj, io.TextIOBase)
# Use the incremental parser and yield the percentage of the file.
loader = GaphorLoader()
for percentage in parse_generator(file_obj, loader):
if percentage:
yield percentage / 2
else:
yield percentage
elements = loader.elements
gaphor_version = loader.gaphor_version
if version_lower_than(gaphor_version, (0, 17, 0)):
raise ValueError(
f"Gaphor model version should be at least 0.17.0 (found {gaphor_version})"
)
log.info(f"Read {len(elements)} elements from file")
element_factory.flush()
with element_factory.block_events():
for percentage in load_elements_generator(
elements, element_factory, modeling_language, gaphor_version
):
if percentage:
yield percentage / 2 + 50
else:
yield percentage
yield 100
element_factory.model_ready()
def version_lower_than(gaphor_version, version):
"""Only major and minor versions are checked.
>>> version_lower_than("0.3.0", (0, 15, 0))
True
"""
parts = gaphor_version.split(".")
return tuple(map(int, parts[:2])) < version[:2]
class UnknownModelElementError(Exception):
pass
# since 2.2.0
def upgrade_ensure_style_sheet_is_present(factory):
style_sheet = next(factory.select(StyleSheet), None)
if not style_sheet:
factory.create(StyleSheet)
# since 2.1.0
def upgrade_element_owned_comment_to_comment(elem):
for name, refids in dict(elem.references).items():
if name == "ownedComment":
elem.references["comment"] = refids
del elem.references["ownedComment"]
break
return elem
# since 2.3.0
def upgrade_package_owned_classifier_to_owned_type(elem):
for name, refids in dict(elem.references).items():
if name == "ownedClassifier":
elem.references["ownedType"] = refids
del elem.references["ownedClassifier"]
break
return elem
# since 2.3.0
def upgrade_implementation_to_interface_realization(elem):
if elem.type == "Implementation":
elem.type = "InterfaceRealization"
return elem
# since 2.3.0
def upgrade_feature_parameters_to_owned_parameter(elem):
formal_params = []
return_results = []
for name, refids in dict(elem.references).items():
if name == "formalParameter":
formal_params = refids
del elem.references["formalParameter"]
elif name == "returnResult":
return_results = refids
del elem.references["returnResult"]
elem.references["ownedParameter"] = formal_params + return_results
return elem
# since 2.3.0
def upgrade_parameter_owner_formal_param(elem):
for name, refids in dict(elem.references).items():
if name == "ownerReturnParam":
elem.references["ownerFormalParam"] = refids
del elem.references["ownerReturnParam"]
break
return elem
# since 2.5.0
def upgrade_diagram_element(elem):
if elem.type == "Diagram":
for name, refids in dict(elem.references).items():
if name == "package":
elem.references["element"] = refids
del elem.references["package"]
break
return elem
# since 2.6.0
def upgrade_generalization_arrow_direction(elem):
if elem.type == "GeneralizationItem":
head_ids, tail_ids = 0, 0
for name, refids in dict(elem.references).items():
if name == "head-connection":
head_ids = refids
elif name == "tail-connection":
tail_ids = refids
if head_ids and tail_ids:
elem.references["head-connection"], elem.references["tail-connection"] = (
tail_ids,
head_ids,
)
return elem
# since 2.19.0
def upgrade_decision_node_item_show_type(elem):
if elem.type == "DecisionNodeItem":
if "show_type" in elem.values:
elem.values["show_underlying_type"] = elem.values["show_type"]
del elem.values["show_type"]
return elem
# since 2.9.0
def upgrade_flow_item_to_control_flow_item(elem, elements):
if elem.type == "FlowItem":
if subject_id := elem.references.get("subject"):
subject_type = elements[subject_id].type
else:
subject_type = "ControlFlow"
elem.type = f"{subject_type}Item"
return elem
# since 2.19.0
def upgrade_delete_property_information_flow(elem):
if (
elem.type in ("Property", "Port", "ProxyPort")
and "informationFlow" in elem.references
):
del elem.references["informationFlow"]
return elem
# since 2.20.0
def upgrade_note_on_model_element_only(
elem: element, elements: dict[str, element]
) -> element:
if elem.type.endswith("Item") and "note" in elem.values:
if subject := elements.get(elem.references.get("subject", None)): # type: ignore[arg-type]
if subject.values.get("note"):
subject.values["note"] += "\n\n" + elem.values["note"]
else:
subject.values["note"] = elem.values["note"]
del elem.values["note"]
return elem
|
Code | GM | import operator
import os
from Code import Jugada, Util, VarGen
from LCEngine4 import pv2xpv, xpv2pv
class GMpartida:
def __init__(self, linea):
(
self.xpv,
self.event,
self.oponent,
self.date,
self.opening,
self.result,
self.color,
) = linea.split("|")
self.liPV = xpv2pv(self.xpv).split(" ")
self.lenPV = len(self.liPV)
def toline(self):
return "%s|%s|%s|%s|%s|%s|%s" % (
self.xpv,
self.event,
self.oponent,
self.date,
self.opening,
self.result,
self.color,
)
def isWhite(self, siWhite):
if siWhite:
return "W" in self.color
else:
return "B" in self.color
def isValidMove(self, ply, move):
if ply < self.lenPV:
return self.liPV[ply] == move
else:
return False
def isFinished(self, ply):
return ply >= self.lenPV
def move(self, ply):
return None if self.isFinished(ply) else self.liPV[ply]
def rotulo(self, siGM=True):
if siGM:
return _("Opponent") + ": <b>%s (%s)</b>" % (self.oponent, self.date)
else:
return "%s (%s)" % (self.oponent, self.date)
def rotuloBasico(self, siGM=True):
if siGM:
return _("Opponent") + ": %s (%s)" % (self.oponent, self.date)
else:
return "%s (%s)" % (self.oponent, self.date)
class GM:
def __init__(self, carpeta, gm):
self.gm = gm
self.carpeta = carpeta
self.dicAciertos = {}
self.liGMPartidas = self.read()
self.ply = 0
self.lastGame = None
def __len__(self):
return len(self.liGMPartidas)
def getLastGame(self):
return self.lastGame
def read(self):
# (kupad fix) linux is case sensitive and can't find the xgm file because ficheroGM is all lower-case, but all
# the xgm files have the first letter capitalized (including ones recently downloaded)
ficheroGM = "%s%s.xgm" % (self.gm[0].upper(), self.gm[1:])
f = open(os.path.join(self.carpeta, ficheroGM), "rb")
li = []
for linea in f:
linea = linea.strip()
if linea:
li.append(GMpartida(linea))
f.close()
return li
def colorFilter(self, isWhite):
self.liGMPartidas = [gmp for gmp in self.liGMPartidas if gmp.isWhite(isWhite)]
def play(self, move):
move = move.lower()
liP = []
ok = False
nextPly = self.ply + 1
for gmPartida in self.liGMPartidas:
if gmPartida.isValidMove(self.ply, move):
self.lastGame = gmPartida # - Siempre hay una ultima
ok = True
if not gmPartida.isFinished(nextPly):
liP.append(gmPartida)
self.liGMPartidas = liP
self.ply += 1
return ok
def isValidMove(self, move):
move = move.lower()
for gmPartida in self.liGMPartidas:
if gmPartida.isValidMove(self.ply, move):
return True
return False
def isFinished(self):
for gmp in self.liGMPartidas:
if not gmp.isFinished(self.ply):
return False
return True
def alternativas(self):
li = []
for gmPartida in self.liGMPartidas:
move = gmPartida.move(self.ply)
if move and move not in li:
li.append(move)
return li
def dameJugadasTXT(self, posicionBase, siGM):
li = []
dRepeticiones = {}
for gmPartida in self.liGMPartidas:
move = gmPartida.move(self.ply)
if move:
if move not in dRepeticiones:
dRepeticiones[move] = [len(li), 1]
desde, hasta, coronacion = move[:2], move[2:4], move[4:]
siBien, mens, jg = Jugada.dameJugada(
posicionBase, desde, hasta, coronacion
)
li.append(
[
desde,
hasta,
coronacion,
gmPartida.rotuloBasico(siGM),
jg.pgnSP(),
]
)
else:
dRepeticiones[move][1] += 1
pos = dRepeticiones[move][0]
li[pos][3] = _("%d games") % dRepeticiones[move][1]
return li
def rotuloPartidaSiUnica(self, siGM=True):
if len(self.liGMPartidas) == 1:
return self.liGMPartidas[0].rotulo(siGM)
else:
return ""
def resultado(self, partida):
gPartida = self.lastGame
apertura = partida.apertura.trNombre if partida.apertura else gPartida.opening
txt = _("Opponent") + " : <b>" + gPartida.oponent + "</b><br>"
event = gPartida.event
if event:
txt += _("Event") + " : <b>" + event + "</b><br>"
txt += _("Date") + " : <b>" + gPartida.date + "</b><br>"
txt += _("Opening") + " : <b>" + apertura + "</b><br>"
txt += _("Result") + " : <b>" + gPartida.result + "</b><br>"
txt += "<br>" * 2
aciertos = 0
for v in self.dicAciertos.itervalues():
if v:
aciertos += 1
total = len(self.dicAciertos)
if total:
porc = int(aciertos * 100.0 / total)
txt += _("Hints") + " : <b>%d%%</b>" % porc
else:
porc = 0
event = " - %s" % event if event else ""
txtResumen = "%s%s - %s - %s" % (
gPartida.oponent,
event,
gPartida.date,
gPartida.result,
)
return txt, porc, txtResumen
def ponPartidaElegida(self, numPartida):
self.liGMPartidas = [self.liGMPartidas[numPartida]]
def genToSelect(self):
liRegs = []
for num, part in enumerate(self.liGMPartidas):
dic = dict(
NOMBRE=part.oponent,
FECHA=part.date,
ECO=part.opening,
RESULT=part.result,
NUMERO=num,
EVENT=part.event,
)
liRegs.append(dic)
return liRegs
def write(self):
ficheroGM = self.gm + ".xgm"
with open(os.path.join(self.carpeta, ficheroGM), "wb") as q:
for part in self.liGMPartidas:
q.write(part.toline() + "\n")
def remove(self, num):
del self.liGMPartidas[num]
self.write()
def dicGM(siWoman):
dic = {}
nomfich = "GM/listaGM.txt"
if siWoman:
nomfich = "W" + nomfich
f = open(nomfich, "rb")
for linea in f:
if linea:
li = linea.split(VarGen.XSEP)
gm = li[0].lower()
nombre = li[1]
dic[gm] = nombre
f.close()
return dic
def listaGM(siWoman):
dic = dicGM(siWoman)
li = []
for entry in Util.listdir("WGM" if siWoman else "GM"):
fich = entry.name.lower()
if fich.endswith(".xgm"):
gm = fich[:-4].lower()
try:
li.append((dic[gm], gm, True, True))
except:
pass
li = sorted(li, key=operator.itemgetter(0))
return li
def listaGMpersonal(carpeta):
li = []
for entry in Util.listdir(carpeta):
fich = entry.name.lower()
if fich.endswith(".xgm"):
gm = fich[:-4]
siW = siB = False
with open(os.path.join(carpeta, fich)) as f:
for linea in f:
try:
gmp = GMpartida(linea.strip())
except:
continue
if not siW:
siW = gmp.isWhite(True)
if not siB:
siB = gmp.isWhite(False)
if siW and siB:
break
if siW or siB:
li.append((gm, gm, siW, siB))
li = sorted(li)
return li
def hayGMpersonal(carpeta):
return len(listaGMpersonal(carpeta)) > 0
class FabGM:
def __init__(self, configuracion, nomEntrenamiento, liJugadores):
self.configuracion = configuracion
self.nomEntrenamiento = nomEntrenamiento
self.liJugadores = liJugadores
self.f = None
self.added = 0
def write(self, txt):
if self.f is None:
fichero = (
os.path.join(
self.configuracion.dirPersonalTraining, self.nomEntrenamiento
)
+ ".xgm"
)
self.f = open(fichero, "wb")
self.f.write(txt)
self.added += 1
def close(self):
if self.f:
self.f.close()
self.f = None
def masMadera(self, pgn, partida, result):
dic = pgn.dic
if (
not ("White" in dic)
or not ("Black" in dic)
or (result and not ("Result" in dic))
):
return
if self.liJugadores:
xblancas = False
xnegras = False
for x in ["Black", "White"]:
if x in dic:
jugador = dic[x].upper()
si = False
for uno in self.liJugadores:
siContrario = uno.startswith("^")
if siContrario:
uno = uno[1:]
siZ = uno.endswith("*")
siA = uno.startswith("*")
uno = uno.replace("*", "").strip().upper()
if siA:
if jugador.endswith(uno):
si = True
if siZ: # form apara poner siA y siZ
si = uno in jugador
elif siZ:
if jugador.startswith(uno):
si = True
elif uno == jugador:
si = True
if si:
break
if si:
if x == "Black":
if siContrario:
xblancas = True
else:
xnegras = True
else:
if siContrario:
xnegras = True
else:
xblancas = True
if not (xblancas or xnegras):
return
self.masMaderaUno(dic, partida, xblancas, xnegras, result)
else:
self.masMaderaUno(dic, partida, True, False, result)
self.masMaderaUno(dic, partida, False, True, result)
def masMaderaUno(self, dic, partida, xblancas, xnegras, tpResult):
pk = ""
for jg in partida.liJugadas:
pk += jg.movimiento() + " "
event = dic.get("Event", "-")
oponente = dic["White"] + "-" + dic["Black"]
date = dic.get("Date", "-").replace("?", "").strip(".")
eco = dic.get("Eco", "-")
result = dic.get("Result", "-")
color = "W" if xblancas else "B"
if tpResult:
siEmpate = "2" in result
siGanaBlancas = not siEmpate and result.startswith("1")
siGanaNegras = not siEmpate and result.startswith("0")
if tpResult == 1:
if not ((xblancas and siGanaBlancas) or (xnegras and siGanaNegras)):
return
else:
if not (
siEmpate
or (xblancas and siGanaBlancas)
or (xnegras and siGanaNegras)
):
return
def nopipe(txt):
return txt.replace("|", " ").strip() if "|" in txt else txt
self.write(
"%s|%s|%s|%s|%s|%s|%s\n"
% (
pv2xpv(pk.strip()),
nopipe(event),
nopipe(oponente),
nopipe(date),
eco,
result,
color,
)
)
def xprocesa(self):
self.close()
return self.added
|
httpie | context | import argparse
import os
import sys
import warnings
from contextlib import contextmanager
from enum import Enum
from pathlib import Path
from typing import IO, TYPE_CHECKING, Iterator, Optional
try:
import curses
except ImportError:
curses = None # Compiled w/o curses
from .compat import cached_property, is_windows
from .config import DEFAULT_CONFIG_DIR, Config, ConfigFileError
from .encoding import UTF8
from .output.ui.palette import GenericColor
from .utils import repr_dict
if TYPE_CHECKING:
from rich.console import Console
class LogLevel(str, Enum):
INFO = "info"
WARNING = "warning"
ERROR = "error"
LOG_LEVEL_COLORS = {
LogLevel.INFO: GenericColor.PINK,
LogLevel.WARNING: GenericColor.ORANGE,
LogLevel.ERROR: GenericColor.RED,
}
LOG_LEVEL_DISPLAY_THRESHOLDS = {
LogLevel.INFO: 1,
LogLevel.WARNING: 2,
LogLevel.ERROR: float("inf"), # Never hide errors.
}
class Environment:
"""
Information about the execution context
(standard streams, config directory, etc).
By default, it represents the actual environment.
All of the attributes can be overwritten though, which
is used by the test suite to simulate various scenarios.
"""
args = argparse.Namespace()
is_windows: bool = is_windows
config_dir: Path = DEFAULT_CONFIG_DIR
stdin: Optional[IO] = sys.stdin # `None` when closed fd (#791)
stdin_isatty: bool = stdin.isatty() if stdin else False
stdin_encoding: str = None
stdout: IO = sys.stdout
stdout_isatty: bool = stdout.isatty()
stdout_encoding: str = None
stderr: IO = sys.stderr
stderr_isatty: bool = stderr.isatty()
colors = 256
program_name: str = "http"
# Whether to show progress bars / status spinners etc.
show_displays: bool = True
if not is_windows:
if curses:
try:
curses.setupterm()
colors = curses.tigetnum("colors")
except curses.error:
pass
else:
# noinspection PyUnresolvedReferences
import colorama.initialise
stdout = colorama.initialise.wrap_stream(
stdout, convert=None, strip=None, autoreset=True, wrap=True
)
stderr = colorama.initialise.wrap_stream(
stderr, convert=None, strip=None, autoreset=True, wrap=True
)
del colorama
def __init__(self, devnull=None, **kwargs):
"""
Use keyword arguments to overwrite
any of the class attributes for this instance.
"""
assert all(hasattr(type(self), attr) for attr in kwargs.keys())
self.__dict__.update(**kwargs)
# The original STDERR unaffected by --quiet’ing.
self._orig_stderr = self.stderr
self._devnull = devnull
# Keyword arguments > stream.encoding > default UTF-8
if self.stdin and self.stdin_encoding is None:
self.stdin_encoding = getattr(self.stdin, "encoding", None) or UTF8
if self.stdout_encoding is None:
actual_stdout = self.stdout
if is_windows:
# noinspection PyUnresolvedReferences
from colorama import AnsiToWin32
if isinstance(self.stdout, AnsiToWin32):
# noinspection PyUnresolvedReferences
actual_stdout = self.stdout.wrapped
self.stdout_encoding = getattr(actual_stdout, "encoding", None) or UTF8
self.quiet = kwargs.pop("quiet", 0)
def __str__(self):
defaults = dict(type(self).__dict__)
actual = dict(defaults)
actual.update(self.__dict__)
actual["config"] = self.config
return repr_dict(
{key: value for key, value in actual.items() if not key.startswith("_")}
)
def __repr__(self):
return f"<{type(self).__name__} {self}>"
_config: Config = None
@property
def config(self) -> Config:
config = self._config
if not config:
self._config = config = Config(directory=self.config_dir)
if not config.is_new():
try:
config.load()
except ConfigFileError as e:
self.log_error(e, level=LogLevel.WARNING)
return config
@property
def devnull(self) -> IO:
if self._devnull is None:
self._devnull = open(os.devnull, "w+")
return self._devnull
@contextmanager
def as_silent(self) -> Iterator[None]:
original_stdout = self.stdout
original_stderr = self.stderr
try:
self.stdout = self.devnull
self.stderr = self.devnull
yield
finally:
self.stdout = original_stdout
self.stderr = original_stderr
def log_error(self, msg: str, level: LogLevel = LogLevel.ERROR) -> None:
if self.stdout_isatty and self.quiet >= LOG_LEVEL_DISPLAY_THRESHOLDS[level]:
stderr = self.stderr # Not directly /dev/null, since stderr might be mocked
else:
stderr = self._orig_stderr
rich_console = self._make_rich_console(
file=stderr, force_terminal=stderr.isatty()
)
rich_console.print(
f"\n{self.program_name}: {level.value}: {msg}\n\n",
style=LOG_LEVEL_COLORS[level],
markup=False,
highlight=False,
soft_wrap=True,
)
def apply_warnings_filter(self) -> None:
if self.quiet >= LOG_LEVEL_DISPLAY_THRESHOLDS[LogLevel.WARNING]:
warnings.simplefilter("ignore")
def _make_rich_console(self, file: IO[str], force_terminal: bool) -> "Console":
from httpie.output.ui.rich_palette import _make_rich_color_theme
from rich.console import Console
style = getattr(self.args, "style", None)
theme = _make_rich_color_theme(style)
# Rich infers the rest of the knowledge (e.g encoding)
# dynamically by looking at the file/stderr.
return Console(
file=file,
force_terminal=force_terminal,
no_color=(self.colors == 0),
theme=theme,
)
# Rich recommends separating the actual console (stdout) from
# the error (stderr) console for better isolation between parts.
# https://rich.readthedocs.io/en/stable/console.html#error-console
@cached_property
def rich_console(self):
return self._make_rich_console(self.stdout, self.stdout_isatty)
@cached_property
def rich_error_console(self):
return self._make_rich_console(self.stderr, self.stderr_isatty)
|
trust-calculation | trust_graph | import hashlib
import logging
import math
import networkx as nx
from tribler.core.components.bandwidth_accounting.trust_calculation.graph_positioning import (
GraphPositioning,
)
from tribler.core.exceptions import TrustGraphException
from tribler.core.utilities.unicode import hexlify
MAX_NODES = 500
MAX_TRANSACTIONS = 2500
ROOT_NODE_ID = 0
class TrustGraph(nx.DiGraph):
def __init__(
self,
root_key,
bandwidth_db,
max_nodes=MAX_NODES,
max_transactions=MAX_TRANSACTIONS,
):
nx.DiGraph.__init__(self)
self._logger = logging.getLogger(self.__class__.__name__)
self.root_key = root_key
self.bandwidth_db = bandwidth_db
self.max_nodes = max_nodes
self.max_transactions = max_transactions
self.node_public_keys = []
self.edge_set = set()
# The root node is added first so it gets the node id zero.
self.get_or_create_node(root_key)
def reset(self, root_key):
self.clear()
self.node_public_keys = []
self.edge_set = set()
self.get_or_create_node(root_key)
def set_limits(self, max_nodes=None, max_transactions=None):
if max_nodes:
self.max_nodes = max_nodes
if max_transactions:
self.max_transactions = max_transactions
def get_or_create_node(self, peer_key, add_if_not_exist=True):
if peer_key in self.node_public_keys:
peer_graph_node_id = self.node_public_keys.index(peer_key)
return self.nodes()[peer_graph_node_id]
if not add_if_not_exist:
return None
if self.number_of_nodes() >= self.max_nodes:
raise TrustGraphException(
f"Max node peers ({self.max_nodes}) reached in the graph"
)
# Node does not exist in the graph so a new node at this point.
# The numeric node id is used here so the id for the new node becomes
# equal to the number of nodes in the graph.
node_id = self.number_of_nodes()
node_attrs = {
"id": node_id,
"key": hexlify(peer_key),
"total_up": self.bandwidth_db.get_total_given(peer_key),
"total_down": self.bandwidth_db.get_total_taken(peer_key),
}
self.add_node(node_id, **node_attrs)
self.node_public_keys.append(peer_key)
return self.nodes()[node_id]
def compose_graph_data(self):
# Reset the graph first
self.reset(self.root_key)
layer_1 = self.bandwidth_db.get_latest_transactions(self.root_key)
try:
for tx in layer_1:
self.add_bandwidth_transaction(tx)
# Stop at layer 2
counter_party = (
tx.public_key_a
if self.root_key != tx.public_key_a
else tx.public_key_b
)
layer_2 = self.bandwidth_db.get_latest_transactions(counter_party)
for tx2 in layer_2:
self.add_bandwidth_transaction(tx2)
except TrustGraphException as tge:
self._logger.warning("Error composing Trust graph: %s", tge)
def compute_edge_id(self, transaction):
sha2 = hashlib.sha3_224() # any safe hashing should do
sha2.update(transaction.public_key_a)
sha2.update(transaction.public_key_b)
return sha2.hexdigest()[:64]
def add_bandwidth_transaction(self, tx):
# First, compose a unique edge id for the transaction and check if it is already added.
edge_id = self.compute_edge_id(tx)
if len(self.edge_set) >= self.max_transactions:
raise TrustGraphException(
f"Max transactions ({self.max_transactions}) reached in the graph"
)
if edge_id not in self.edge_set:
peer1 = self.get_or_create_node(tx.public_key_a, add_if_not_exist=True)
peer2 = self.get_or_create_node(tx.public_key_b, add_if_not_exist=True)
if peer1 and peer2 and peer2["id"] not in self.successors(peer1["id"]):
self.add_edge(peer1["id"], peer2["id"])
self.edge_set.add(edge_id)
def compute_node_graph(self):
undirected_graph = self.to_undirected()
num_nodes = undirected_graph.number_of_nodes()
# Find bfs tree of the connected components
bfs_tree = nx.bfs_tree(undirected_graph, ROOT_NODE_ID)
# Position the nodes in a circular fashion according to the bfs tree
pos = GraphPositioning.hierarchy_pos(
bfs_tree, root=ROOT_NODE_ID, width=2 * math.pi, xcenter=0.5
)
graph_nodes = []
graph_edges = []
index_mapper = {}
node_id = ROOT_NODE_ID
max_x = max_y = 0.0001 # as close to zero
for _id, (theta, r) in pos.items():
index_mapper[_id] = node_id
node = undirected_graph.nodes()[_id]
node["id"] = node_id
node_id += 1
# convert from polar coordinates to cartesian coordinates
x = r * math.sin(theta) * num_nodes
y = r * math.cos(theta) * num_nodes
node["pos"] = [x, y]
graph_nodes.append(node)
# max values to be used for normalization
max_x = max(abs(x), max_x)
max_y = max(abs(y), max_y)
# Normalize the positions
for node in graph_nodes:
node["pos"][0] /= max_x
node["pos"][1] /= max_y
for edge in undirected_graph.edges():
graph_edges.append((index_mapper[edge[0]], index_mapper[edge[1]]))
return {"node": graph_nodes, "edge": graph_edges}
|
scripts | rpm | #!/usr/bin/python3 -B
"""
This file is part of the Stargate project, Copyright Stargate Team
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import argparse
import json
import os
import shutil
import sys
def parse_args():
parser = argparse.ArgumentParser(
description="Debian package creator script",
)
parser.add_argument(
"-i",
"--install",
action="store_true",
dest="install",
help="Install the package after creating it",
)
parser.add_argument(
"--plat-flags",
dest="plat_flags",
default=None,
help="Use non-default PLAT_FLAGS to compile",
)
return parser.parse_args()
args = parse_args()
if args.install:
# Warm up sudo
assert not os.system("sudo echo")
if args.plat_flags is None:
BUILD_CMD = "make"
else:
BUILD_CMD = f"PLAT_FLAGS='{args.plat_flags}' make"
PYTHON_VERSION = "".join(str(x) for x in sys.version_info[:2])
orig_wd = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"..",
),
)
os.chdir(orig_wd)
os.system("scripts/src.sh")
with open("src/meta.json") as f:
j = json.load(f)
MAJOR_VERSION = j["version"]["major"]
MINOR_VERSION = j["version"]["minor"]
global_version_fedora = MINOR_VERSION.replace("-", ".")
PACKAGE_NAME = "{}-{}".format(MAJOR_VERSION, global_version_fedora)
global_home = os.path.expanduser("~")
rpm_build_path = os.path.join(
global_home,
"rpmbuild",
"BUILD",
MAJOR_VERSION,
)
os.system(f"rm -rf {rpm_build_path}*")
if not os.path.isdir("{}/rpmbuild".format(global_home)):
if shutil.which("rpmdev-setuptree"):
os.system("rpmdev-setuptree")
else:
for dirname in (
"BUILD",
"BUILDROOT",
"RPM",
"SOURCES",
"SPECS",
):
path = os.path.join(global_home, "rpmbuild", dirname)
os.makedirs(path)
SPEC_DIR = "{}/rpmbuild/SPECS/".format(global_home)
SOURCE_DIR = "{}/rpmbuild/SOURCES/".format(global_home)
TARBALL_NAME = "{}.tar.gz".format(PACKAGE_NAME)
TARBALL_URL = "https://github.com/stargateaudio/stargate/archive" "/{}".format(
TARBALL_NAME
)
os.system('cp "{}" "{}"'.format(TARBALL_NAME, SOURCE_DIR))
global_spec_file = "{}.spec".format(
MAJOR_VERSION,
)
f_spec_template = """
%global debug_package %{{nil}}
Name: {0}
Version: {1}
Release: 1%{{?dist}}
Summary: Digital audio workstations, instrument and effect plugins
License: GPLv3
URL: http://github.com/stargateaudio/stargate/
Source0: {2}
# Commented out to allow compiling from an old Debian VM, these
# dependencies are still required
#BuildRequires: \
# alsa-lib-devel \
# fftw-devel \
# gcc \
# gcc-c++ \
# libsndfile-devel \
# portaudio-devel \
# portmidi-devel \
# python3-devel \
Requires: \
(alsa-lib or alsa) \
(fftw3 or libfftw3-3) \
libsndfile \
(portaudio or libportaudio2) \
(portmidi or libportmidi0) \
python3 \
(python3-jinja2 or python3-Jinja2) \
python3-numpy \
python3-psutil \
(python3-pyyaml or python3-PyYAML) \
(python3-pyqt6 or python3-qt6 or python3-qt5 or python3-pyqt5) \
(rubberband or rubberband-cli) \
vorbis-tools \
Recommends: \
ffmpeg \
lame \
python3-mutagen \
%global __python %{{__python3}}
%description
Stargate is digital audio workstations (DAWs), instrument and effect plugins
%prep
%setup -q
%build
{3}
%install
rm -rf $RPM_BUILD_ROOT
DESTDIR="$RPM_BUILD_ROOT" make install
%post
update-mime-database %{{_usr}}/share/mime/ || true
xdg-mime default {0}.desktop text/{0}.project || true
%files
%defattr(644, root, root)
%attr(755, root, root) /opt/{0}
%attr(755, root, root) %{{_usr}}/bin/{0}
%{{_usr}}/share/applications/{0}.desktop
%{{_usr}}/share/doc/{0}
%{{_usr}}/share/mime/packages/{0}.xml
%{{_usr}}/share/pixmaps/{0}.ico
%{{_usr}}/share/pixmaps/{0}.png
%doc
""".format(
MAJOR_VERSION,
global_version_fedora,
TARBALL_URL,
BUILD_CMD,
PYTHON_VERSION,
)
f_spec_file = open(global_spec_file, "w")
f_spec_file.write(f_spec_template)
f_spec_file.close()
os.system('cp "{}" "{}"'.format(global_spec_file, SPEC_DIR))
if args.install:
os.system("rm -f {}-*".format(MAJOR_VERSION))
os.chdir(SPEC_DIR)
f_rpm_result = os.system("rpmbuild -ba {}".format(global_spec_file))
if f_rpm_result:
print("Error: rpmbuild returned {}".format(f_rpm_result))
exit(f_rpm_result)
else:
pkg_name = "{}-*{}*rpm".format(MAJOR_VERSION, MINOR_VERSION)
cp_cmd = "cp ~/rpmbuild/RPMS/*/{} '{}'".format(pkg_name, orig_wd)
print(cp_cmd)
os.system(cp_cmd)
if args.install:
os.system("sudo dnf remove -y {0} '{0}-*'".format(MAJOR_VERSION))
# os.system("sudo rpm -e {0}".format(MAJOR_VERSION))
# os.system("sudo rpm -e {0}-debuginfo".format(MAJOR_VERSION))
os.system("sudo dnf install -y {}/{}".format(orig_wd, pkg_name))
|
bs4 | dammit | # -*- coding: utf-8 -*-
"""Beautiful Soup bonus library: Unicode, Dammit
This library converts a bytestream to Unicode through any means
necessary. It is heavily based on code from Mark Pilgrim's Universal
Feed Parser. It works best on XML and HTML, but it does not rewrite the
XML or HTML to reflect a new encoding; that's the tree builder's job.
"""
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__license__ = "MIT"
import codecs
import logging
import re
import string
from htmlentitydefs import codepoint2name
# Import a library to autodetect character encodings.
chardet_type = None
try:
# First try the fast C implementation.
# PyPI package: cchardet
import cchardet
def chardet_dammit(s):
return cchardet.detect(s)["encoding"]
except ImportError:
try:
# Fall back to the pure Python implementation
# Debian package: python-chardet
# PyPI package: chardet
import chardet
def chardet_dammit(s):
return chardet.detect(s)["encoding"]
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
# No chardet available.
def chardet_dammit(s):
return None
# Available from http://cjkpython.i18n.org/.
try:
import iconv_codec
except ImportError:
pass
xml_encoding_re = re.compile("^<\?.*encoding=['\"](.*?)['\"].*\?>".encode(), re.I)
html_meta_re = re.compile(
"<\s*meta[^>]+charset\s*=\s*[\"']?([^>]*?)[ /;'\">]".encode(), re.I
)
class EntitySubstitution(object):
"""Substitute XML or HTML entities for the corresponding characters."""
def _populate_class_variables():
lookup = {}
reverse_lookup = {}
characters_for_re = []
for codepoint, name in list(codepoint2name.items()):
character = unichr(codepoint)
if codepoint != 34:
# There's no point in turning the quotation mark into
# ", unless it happens within an attribute value, which
# is handled elsewhere.
characters_for_re.append(character)
lookup[character] = name
# But we do want to turn " into the quotation mark.
reverse_lookup[name] = character
re_definition = "[%s]" % "".join(characters_for_re)
return lookup, reverse_lookup, re.compile(re_definition)
(
CHARACTER_TO_HTML_ENTITY,
HTML_ENTITY_TO_CHARACTER,
CHARACTER_TO_HTML_ENTITY_RE,
) = _populate_class_variables()
CHARACTER_TO_XML_ENTITY = {
"'": "apos",
'"': "quot",
"&": "amp",
"<": "lt",
">": "gt",
}
BARE_AMPERSAND_OR_BRACKET = re.compile(
"([<>]|" "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" ")"
)
AMPERSAND_OR_BRACKET = re.compile("([<>&])")
@classmethod
def _substitute_html_entity(cls, matchobj):
entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0))
return "&%s;" % entity
@classmethod
def _substitute_xml_entity(cls, matchobj):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)]
return "&%s;" % entity
@classmethod
def quoted_attribute_value(self, value):
"""Make a value into a quoted XML attribute, possibly escaping it.
Most strings will be quoted using double quotes.
Bob's Bar -> "Bob's Bar"
If a string contains double quotes, it will be quoted using
single quotes.
Welcome to "my bar" -> 'Welcome to "my bar"'
If a string contains both single and double quotes, the
double quotes will be escaped, and the string will be quoted
using double quotes.
Welcome to "Bob's Bar" -> "Welcome to "Bob's bar"
"""
quote_with = '"'
if '"' in value:
if "'" in value:
# The string contains both single and double
# quotes. Turn the double quotes into
# entities. We quote the double quotes rather than
# the single quotes because the entity name is
# """ whether this is HTML or XML. If we
# quoted the single quotes, we'd have to decide
# between ' and &squot;.
replace_with = """
value = value.replace('"', replace_with)
else:
# There are double quotes but no single quotes.
# We can use single quotes to quote the attribute.
quote_with = "'"
return quote_with + value + quote_with
@classmethod
def substitute_xml(cls, value, make_quoted_attribute=False):
"""Substitute XML entities for special XML characters.
:param value: A string to be substituted. The less-than sign
will become <, the greater-than sign will become >,
and any ampersands will become &. If you want ampersands
that appear to be part of an entity definition to be left
alone, use substitute_xml_containing_entities() instead.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets and ampersands.
value = cls.AMPERSAND_OR_BRACKET.sub(cls._substitute_xml_entity, value)
if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_xml_containing_entities(cls, value, make_quoted_attribute=False):
"""Substitute XML entities for special XML characters.
:param value: A string to be substituted. The less-than sign will
become <, the greater-than sign will become >, and any
ampersands that are not part of an entity defition will
become &.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets, and ampersands that aren't part of
# entities.
value = cls.BARE_AMPERSAND_OR_BRACKET.sub(cls._substitute_xml_entity, value)
if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_html(cls, s):
"""Replace certain Unicode characters with named HTML entities.
This differs from data.encode(encoding, 'xmlcharrefreplace')
in that the goal is to make the result more readable (to those
with ASCII displays) rather than to recover from
errors. There's absolutely nothing wrong with a UTF-8 string
containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that
character with "é" will make it more readable to some
people.
"""
return cls.CHARACTER_TO_HTML_ENTITY_RE.sub(cls._substitute_html_entity, s)
class EncodingDetector:
"""Suggests a number of possible encodings for a bytestring.
Order of precedence:
1. Encodings you specifically tell EncodingDetector to try first
(the override_encodings argument to the constructor).
2. An encoding declared within the bytestring itself, either in an
XML declaration (if the bytestring is to be interpreted as an XML
document), or in a <meta> tag (if the bytestring is to be
interpreted as an HTML document.)
3. An encoding detected through textual analysis by chardet,
cchardet, or a similar external library.
4. UTF-8.
5. Windows-1252.
"""
def __init__(
self, markup, override_encodings=None, is_html=False, exclude_encodings=None
):
self.override_encodings = override_encodings or []
exclude_encodings = exclude_encodings or []
self.exclude_encodings = set([x.lower() for x in exclude_encodings])
self.chardet_encoding = None
self.is_html = is_html
self.declared_encoding = None
# First order of business: strip a byte-order mark.
self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup)
def _usable(self, encoding, tried):
if encoding is not None:
encoding = encoding.lower()
if encoding in self.exclude_encodings:
return False
if encoding not in tried:
tried.add(encoding)
return True
return False
@property
def encodings(self):
"""Yield a number of encodings that might work for this markup."""
tried = set()
for e in self.override_encodings:
if self._usable(e, tried):
yield e
# Did the document originally start with a byte-order mark
# that indicated its encoding?
if self._usable(self.sniffed_encoding, tried):
yield self.sniffed_encoding
# Look within the document for an XML or HTML encoding
# declaration.
if self.declared_encoding is None:
self.declared_encoding = self.find_declared_encoding(
self.markup, self.is_html
)
if self._usable(self.declared_encoding, tried):
yield self.declared_encoding
# Use third-party character set detection to guess at the
# encoding.
if self.chardet_encoding is None:
self.chardet_encoding = chardet_dammit(self.markup)
if self._usable(self.chardet_encoding, tried):
yield self.chardet_encoding
# As a last-ditch effort, try utf-8 and windows-1252.
for e in ("utf-8", "windows-1252"):
if self._usable(e, tried):
yield e
@classmethod
def strip_byte_order_mark(cls, data):
"""If a byte-order mark is present, strip it and return the encoding it implies."""
encoding = None
if isinstance(data, unicode):
# Unicode data cannot have a byte-order mark.
return data, encoding
if (len(data) >= 4) and (data[:2] == b"\xfe\xff") and (data[2:4] != "\x00\x00"):
encoding = "utf-16be"
data = data[2:]
elif (
(len(data) >= 4) and (data[:2] == b"\xff\xfe") and (data[2:4] != "\x00\x00")
):
encoding = "utf-16le"
data = data[2:]
elif data[:3] == b"\xef\xbb\xbf":
encoding = "utf-8"
data = data[3:]
elif data[:4] == b"\x00\x00\xfe\xff":
encoding = "utf-32be"
data = data[4:]
elif data[:4] == b"\xff\xfe\x00\x00":
encoding = "utf-32le"
data = data[4:]
return data, encoding
@classmethod
def find_declared_encoding(
cls, markup, is_html=False, search_entire_document=False
):
"""Given a document, tries to find its declared encoding.
An XML encoding is declared at the beginning of the document.
An HTML encoding is declared in a <meta> tag, hopefully near the
beginning of the document.
"""
if search_entire_document:
xml_endpos = html_endpos = len(markup)
else:
xml_endpos = 1024
html_endpos = max(2048, int(len(markup) * 0.05))
declared_encoding = None
declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos)
if not declared_encoding_match and is_html:
declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos)
if declared_encoding_match is not None:
declared_encoding = declared_encoding_match.groups()[0].decode(
"ascii", "replace"
)
if declared_encoding:
return declared_encoding.lower()
return None
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = {"macintosh": "mac-roman", "x-sjis": "shift-jis"}
ENCODINGS_WITH_SMART_QUOTES = [
"windows-1252",
"iso-8859-1",
"iso-8859-2",
]
def __init__(
self,
markup,
override_encodings=[],
smart_quotes_to=None,
is_html=False,
exclude_encodings=[],
):
self.smart_quotes_to = smart_quotes_to
self.tried_encodings = []
self.contains_replacement_characters = False
self.is_html = is_html
self.log = logging.getLogger(__name__)
self.detector = EncodingDetector(
markup, override_encodings, is_html, exclude_encodings
)
# Short-circuit if the data is in Unicode to begin with.
if isinstance(markup, unicode) or markup == "":
self.markup = markup
self.unicode_markup = unicode(markup)
self.original_encoding = None
return
# The encoding detector may have stripped a byte-order mark.
# Use the stripped markup from this point on.
self.markup = self.detector.markup
u = None
for encoding in self.detector.encodings:
markup = self.detector.markup
u = self._convert_from(encoding)
if u is not None:
break
if not u:
# None of the encodings worked. As an absolute last resort,
# try them again with character replacement.
for encoding in self.detector.encodings:
if encoding != "ascii":
u = self._convert_from(encoding, "replace")
if u is not None:
self.log.warning(
"Some characters could not be decoded, and were "
"replaced with REPLACEMENT CHARACTER."
)
self.contains_replacement_characters = True
break
# If none of that worked, we could at this point force it to
# ASCII, but that would destroy so much data that I think
# giving up is better.
self.unicode_markup = u
if not u:
self.original_encoding = None
def _sub_ms_char(self, match):
"""Changes a MS smart quote character to an XML or HTML
entity, or an ASCII character."""
orig = match.group(1)
if self.smart_quotes_to == "ascii":
sub = self.MS_CHARS_TO_ASCII.get(orig).encode()
else:
sub = self.MS_CHARS.get(orig)
if type(sub) == tuple:
if self.smart_quotes_to == "xml":
sub = "&#x".encode() + sub[1].encode() + ";".encode()
else:
sub = "&".encode() + sub[0].encode() + ";".encode()
else:
sub = sub.encode()
return sub
def _convert_from(self, proposed, errors="strict"):
proposed = self.find_codec(proposed)
if not proposed or (proposed, errors) in self.tried_encodings:
return None
self.tried_encodings.append((proposed, errors))
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if (
self.smart_quotes_to is not None
and proposed in self.ENCODINGS_WITH_SMART_QUOTES
):
smart_quotes_re = b"([\x80-\x9f])"
smart_quotes_compiled = re.compile(smart_quotes_re)
markup = smart_quotes_compiled.sub(self._sub_ms_char, markup)
try:
# print "Trying to convert document to %s (errors=%s)" % (
# proposed, errors)
u = self._to_unicode(markup, proposed, errors)
self.markup = u
self.original_encoding = proposed
except Exception as e:
# print "That didn't work!"
# print e
return None
# print "Correct encoding: %s" % proposed
return self.markup
def _to_unicode(self, data, encoding, errors="strict"):
"""Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases"""
return unicode(data, encoding, errors)
@property
def declared_html_encoding(self):
if not self.is_html:
return None
return self.detector.declared_encoding
def find_codec(self, charset):
value = (
self._codec(self.CHARSET_ALIASES.get(charset, charset))
or (charset and self._codec(charset.replace("-", "")))
or (charset and self._codec(charset.replace("-", "_")))
or (charset and charset.lower())
or charset
)
if value:
return value.lower()
return None
def _codec(self, charset):
if not charset:
return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
# A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities.
MS_CHARS = {
b"\x80": ("euro", "20AC"),
b"\x81": " ",
b"\x82": ("sbquo", "201A"),
b"\x83": ("fnof", "192"),
b"\x84": ("bdquo", "201E"),
b"\x85": ("hellip", "2026"),
b"\x86": ("dagger", "2020"),
b"\x87": ("Dagger", "2021"),
b"\x88": ("circ", "2C6"),
b"\x89": ("permil", "2030"),
b"\x8A": ("Scaron", "160"),
b"\x8B": ("lsaquo", "2039"),
b"\x8C": ("OElig", "152"),
b"\x8D": "?",
b"\x8E": ("#x17D", "17D"),
b"\x8F": "?",
b"\x90": "?",
b"\x91": ("lsquo", "2018"),
b"\x92": ("rsquo", "2019"),
b"\x93": ("ldquo", "201C"),
b"\x94": ("rdquo", "201D"),
b"\x95": ("bull", "2022"),
b"\x96": ("ndash", "2013"),
b"\x97": ("mdash", "2014"),
b"\x98": ("tilde", "2DC"),
b"\x99": ("trade", "2122"),
b"\x9a": ("scaron", "161"),
b"\x9b": ("rsaquo", "203A"),
b"\x9c": ("oelig", "153"),
b"\x9d": "?",
b"\x9e": ("#x17E", "17E"),
b"\x9f": ("Yuml", ""),
}
# A parochial partial mapping of ISO-Latin-1 to ASCII. Contains
# horrors like stripping diacritical marks to turn á into a, but also
# contains non-horrors like turning “ into ".
MS_CHARS_TO_ASCII = {
b"\x80": "EUR",
b"\x81": " ",
b"\x82": ",",
b"\x83": "f",
b"\x84": ",,",
b"\x85": "...",
b"\x86": "+",
b"\x87": "++",
b"\x88": "^",
b"\x89": "%",
b"\x8a": "S",
b"\x8b": "<",
b"\x8c": "OE",
b"\x8d": "?",
b"\x8e": "Z",
b"\x8f": "?",
b"\x90": "?",
b"\x91": "'",
b"\x92": "'",
b"\x93": '"',
b"\x94": '"',
b"\x95": "*",
b"\x96": "-",
b"\x97": "--",
b"\x98": "~",
b"\x99": "(TM)",
b"\x9a": "s",
b"\x9b": ">",
b"\x9c": "oe",
b"\x9d": "?",
b"\x9e": "z",
b"\x9f": "Y",
b"\xa0": " ",
b"\xa1": "!",
b"\xa2": "c",
b"\xa3": "GBP",
b"\xa4": "$", # This approximation is especially parochial--this is the
# generic currency symbol.
b"\xa5": "YEN",
b"\xa6": "|",
b"\xa7": "S",
b"\xa8": "..",
b"\xa9": "",
b"\xaa": "(th)",
b"\xab": "<<",
b"\xac": "!",
b"\xad": " ",
b"\xae": "(R)",
b"\xaf": "-",
b"\xb0": "o",
b"\xb1": "+-",
b"\xb2": "2",
b"\xb3": "3",
b"\xb4": ("'", "acute"),
b"\xb5": "u",
b"\xb6": "P",
b"\xb7": "*",
b"\xb8": ",",
b"\xb9": "1",
b"\xba": "(th)",
b"\xbb": ">>",
b"\xbc": "1/4",
b"\xbd": "1/2",
b"\xbe": "3/4",
b"\xbf": "?",
b"\xc0": "A",
b"\xc1": "A",
b"\xc2": "A",
b"\xc3": "A",
b"\xc4": "A",
b"\xc5": "A",
b"\xc6": "AE",
b"\xc7": "C",
b"\xc8": "E",
b"\xc9": "E",
b"\xca": "E",
b"\xcb": "E",
b"\xcc": "I",
b"\xcd": "I",
b"\xce": "I",
b"\xcf": "I",
b"\xd0": "D",
b"\xd1": "N",
b"\xd2": "O",
b"\xd3": "O",
b"\xd4": "O",
b"\xd5": "O",
b"\xd6": "O",
b"\xd7": "*",
b"\xd8": "O",
b"\xd9": "U",
b"\xda": "U",
b"\xdb": "U",
b"\xdc": "U",
b"\xdd": "Y",
b"\xde": "b",
b"\xdf": "B",
b"\xe0": "a",
b"\xe1": "a",
b"\xe2": "a",
b"\xe3": "a",
b"\xe4": "a",
b"\xe5": "a",
b"\xe6": "ae",
b"\xe7": "c",
b"\xe8": "e",
b"\xe9": "e",
b"\xea": "e",
b"\xeb": "e",
b"\xec": "i",
b"\xed": "i",
b"\xee": "i",
b"\xef": "i",
b"\xf0": "o",
b"\xf1": "n",
b"\xf2": "o",
b"\xf3": "o",
b"\xf4": "o",
b"\xf5": "o",
b"\xf6": "o",
b"\xf7": "/",
b"\xf8": "o",
b"\xf9": "u",
b"\xfa": "u",
b"\xfb": "u",
b"\xfc": "u",
b"\xfd": "y",
b"\xfe": "b",
b"\xff": "y",
}
# A map used when removing rogue Windows-1252/ISO-8859-1
# characters in otherwise UTF-8 documents.
#
# Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in
# Windows-1252.
WINDOWS_1252_TO_UTF8 = {
0x80: b"\xe2\x82\xac", # €
0x82: b"\xe2\x80\x9a", # ‚
0x83: b"\xc6\x92", # ƒ
0x84: b"\xe2\x80\x9e", # „
0x85: b"\xe2\x80\xa6", # …
0x86: b"\xe2\x80\xa0", # †
0x87: b"\xe2\x80\xa1", # ‡
0x88: b"\xcb\x86", # ˆ
0x89: b"\xe2\x80\xb0", # ‰
0x8A: b"\xc5\xa0", # Š
0x8B: b"\xe2\x80\xb9", # ‹
0x8C: b"\xc5\x92", # Œ
0x8E: b"\xc5\xbd", # Ž
0x91: b"\xe2\x80\x98", # ‘
0x92: b"\xe2\x80\x99", # ’
0x93: b"\xe2\x80\x9c", # “
0x94: b"\xe2\x80\x9d", # ”
0x95: b"\xe2\x80\xa2", # •
0x96: b"\xe2\x80\x93", # –
0x97: b"\xe2\x80\x94", # —
0x98: b"\xcb\x9c", # ˜
0x99: b"\xe2\x84\xa2", # ™
0x9A: b"\xc5\xa1", # š
0x9B: b"\xe2\x80\xba", # ›
0x9C: b"\xc5\x93", # œ
0x9E: b"\xc5\xbe", # ž
0x9F: b"\xc5\xb8", # Ÿ
0xA0: b"\xc2\xa0", #
0xA1: b"\xc2\xa1", # ¡
0xA2: b"\xc2\xa2", # ¢
0xA3: b"\xc2\xa3", # £
0xA4: b"\xc2\xa4", # ¤
0xA5: b"\xc2\xa5", # ¥
0xA6: b"\xc2\xa6", # ¦
0xA7: b"\xc2\xa7", # §
0xA8: b"\xc2\xa8", # ¨
0xA9: b"\xc2\xa9", # ©
0xAA: b"\xc2\xaa", # ª
0xAB: b"\xc2\xab", # «
0xAC: b"\xc2\xac", # ¬
0xAD: b"\xc2\xad", #
0xAE: b"\xc2\xae", # ®
0xAF: b"\xc2\xaf", # ¯
0xB0: b"\xc2\xb0", # °
0xB1: b"\xc2\xb1", # ±
0xB2: b"\xc2\xb2", # ²
0xB3: b"\xc2\xb3", # ³
0xB4: b"\xc2\xb4", # ´
0xB5: b"\xc2\xb5", # µ
0xB6: b"\xc2\xb6", # ¶
0xB7: b"\xc2\xb7", # ·
0xB8: b"\xc2\xb8", # ¸
0xB9: b"\xc2\xb9", # ¹
0xBA: b"\xc2\xba", # º
0xBB: b"\xc2\xbb", # »
0xBC: b"\xc2\xbc", # ¼
0xBD: b"\xc2\xbd", # ½
0xBE: b"\xc2\xbe", # ¾
0xBF: b"\xc2\xbf", # ¿
0xC0: b"\xc3\x80", # À
0xC1: b"\xc3\x81", # Á
0xC2: b"\xc3\x82", # Â
0xC3: b"\xc3\x83", # Ã
0xC4: b"\xc3\x84", # Ä
0xC5: b"\xc3\x85", # Å
0xC6: b"\xc3\x86", # Æ
0xC7: b"\xc3\x87", # Ç
0xC8: b"\xc3\x88", # È
0xC9: b"\xc3\x89", # É
0xCA: b"\xc3\x8a", # Ê
0xCB: b"\xc3\x8b", # Ë
0xCC: b"\xc3\x8c", # Ì
0xCD: b"\xc3\x8d", # Í
0xCE: b"\xc3\x8e", # Î
0xCF: b"\xc3\x8f", # Ï
0xD0: b"\xc3\x90", # Ð
0xD1: b"\xc3\x91", # Ñ
0xD2: b"\xc3\x92", # Ò
0xD3: b"\xc3\x93", # Ó
0xD4: b"\xc3\x94", # Ô
0xD5: b"\xc3\x95", # Õ
0xD6: b"\xc3\x96", # Ö
0xD7: b"\xc3\x97", # ×
0xD8: b"\xc3\x98", # Ø
0xD9: b"\xc3\x99", # Ù
0xDA: b"\xc3\x9a", # Ú
0xDB: b"\xc3\x9b", # Û
0xDC: b"\xc3\x9c", # Ü
0xDD: b"\xc3\x9d", # Ý
0xDE: b"\xc3\x9e", # Þ
0xDF: b"\xc3\x9f", # ß
0xE0: b"\xc3\xa0", # à
0xE1: b"\xa1", # á
0xE2: b"\xc3\xa2", # â
0xE3: b"\xc3\xa3", # ã
0xE4: b"\xc3\xa4", # ä
0xE5: b"\xc3\xa5", # å
0xE6: b"\xc3\xa6", # æ
0xE7: b"\xc3\xa7", # ç
0xE8: b"\xc3\xa8", # è
0xE9: b"\xc3\xa9", # é
0xEA: b"\xc3\xaa", # ê
0xEB: b"\xc3\xab", # ë
0xEC: b"\xc3\xac", # ì
0xED: b"\xc3\xad", # í
0xEE: b"\xc3\xae", # î
0xEF: b"\xc3\xaf", # ï
0xF0: b"\xc3\xb0", # ð
0xF1: b"\xc3\xb1", # ñ
0xF2: b"\xc3\xb2", # ò
0xF3: b"\xc3\xb3", # ó
0xF4: b"\xc3\xb4", # ô
0xF5: b"\xc3\xb5", # õ
0xF6: b"\xc3\xb6", # ö
0xF7: b"\xc3\xb7", # ÷
0xF8: b"\xc3\xb8", # ø
0xF9: b"\xc3\xb9", # ù
0xFA: b"\xc3\xba", # ú
0xFB: b"\xc3\xbb", # û
0xFC: b"\xc3\xbc", # ü
0xFD: b"\xc3\xbd", # ý
0xFE: b"\xc3\xbe", # þ
}
MULTIBYTE_MARKERS_AND_SIZES = [
(0xC2, 0xDF, 2), # 2-byte characters start with a byte C2-DF
(0xE0, 0xEF, 3), # 3-byte characters start with E0-EF
(0xF0, 0xF4, 4), # 4-byte characters start with F0-F4
]
FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0]
LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1]
@classmethod
def detwingle(
cls, in_bytes, main_encoding="utf8", embedded_encoding="windows-1252"
):
"""Fix characters from one encoding embedded in some other encoding.
Currently the only situation supported is Windows-1252 (or its
subset ISO-8859-1), embedded in UTF-8.
The input must be a bytestring. If you've already converted
the document to Unicode, you're too late.
The output is a bytestring in which `embedded_encoding`
characters have been converted to their `main_encoding`
equivalents.
"""
if embedded_encoding.replace("_", "-").lower() not in (
"windows-1252",
"windows_1252",
):
raise NotImplementedError(
"Windows-1252 and ISO-8859-1 are the only currently supported "
"embedded encodings."
)
if main_encoding.lower() not in ("utf8", "utf-8"):
raise NotImplementedError(
"UTF-8 is the only currently supported main encoding."
)
byte_chunks = []
chunk_start = 0
pos = 0
while pos < len(in_bytes):
byte = in_bytes[pos]
if not isinstance(byte, int):
# Python 2.x
byte = ord(byte)
if byte >= cls.FIRST_MULTIBYTE_MARKER and byte <= cls.LAST_MULTIBYTE_MARKER:
# This is the start of a UTF-8 multibyte character. Skip
# to the end.
for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES:
if byte >= start and byte <= end:
pos += size
break
elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8:
# We found a Windows-1252 character!
# Save the string up to this point as a chunk.
byte_chunks.append(in_bytes[chunk_start:pos])
# Now translate the Windows-1252 character into UTF-8
# and add it as another, one-byte chunk.
byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte])
pos += 1
chunk_start = pos
else:
# Go on to the next character.
pos += 1
if chunk_start == 0:
# The string is unchanged.
return in_bytes
else:
# Store the final chunk.
byte_chunks.append(in_bytes[chunk_start:])
return b"".join(byte_chunks)
|
widgets | notebook | # Copyright (C) 2010 Adam Olsen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from typing import Optional
from gi.repository import Gdk, GdkPixbuf, GObject, Gtk, Pango
from xl import providers
from xl.nls import gettext as _
from xlgui import guiutil
from xlgui.widgets import menu
# Custom tab style; fixes some Adwaita ugliness
TAB_CSS = Gtk.CssProvider()
TAB_CSS.load_from_data(
b"""
/* Most themes don't handle vertical notebooks well,
so we override everything. */
notebook.vertical tab {
padding: 3px 4px 3px 4px;
}
notebook.vertical tab * {
margin: 0;
padding: 0;
}
notebook.vertical tab label {
margin: 0 0 3px 0;
}
/* Remove gap between tabs */
header.top tab, header.bottom tab {
margin-left: -1px;
margin-right: -1px;
}
header.left tab, header.right tab {
margin-top: -1px;
margin-bottom: -1px;
}
"""
)
def apply_css(widget):
sc = widget.get_style_context()
sc.add_provider(TAB_CSS, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
return sc
class SmartNotebook(Gtk.Notebook):
def __init__(self, vertical: bool = False):
Gtk.Notebook.__init__(self)
self.set_scrollable(True)
self.connect("button-press-event", self.on_button_press)
self.connect("popup-menu", self.on_popup_menu)
self.connect("notify::tab-pos", self.__on_notify_tab_pos)
self._add_tab_on_empty = True
sc = apply_css(self)
if vertical:
sc.add_class("vertical")
self.set_tab_pos(Gtk.PositionType.LEFT)
def get_current_tab(self):
current_page = self.get_current_page()
if current_page == -1:
return None
return self.get_nth_page(current_page)
def add_tab(
self,
tab: "NotebookTab",
page: "NotebookPage",
position: int = -1,
switch: bool = True,
) -> None:
"""
Add a tab to the notebook.
:param tab: The tab to use
:param page: The page to use
:param position: Index to insert page at, or -1 for append
:param switch: Switch focus to the new tab
"""
self.insert_page(page, tab, position=position)
tab.notebook = self
self.set_tab_reorderable(page, page.reorderable)
tab_pos = self.get_tab_pos()
if tab_pos in (Gtk.PositionType.TOP, Gtk.PositionType.BOTTOM):
self.child_set_property(page, "tab-expand", True)
tab.adjust_label_width(tab_pos)
if switch:
self.set_current_page(self.page_num(page))
def add_default_tab(self):
"""
Action taken when a generic "new tab" option is triggered.
Subclasses need to override this if they want new tab
functionality to work automatically.
:return: The NotebookTab created, or None
"""
pass
def remove_page(self, page_num: int) -> None:
"""
Overrides Gtk.Notebook.remove_page
"""
if page_num == -1:
page_num = self.get_n_pages() - 1
tab = self.get_tab_label(self.get_nth_page(page_num))
Gtk.Notebook.remove_page(self, page_num)
tab.notebook = None
if self._add_tab_on_empty and self.get_n_pages() == 0:
self.add_default_tab()
def remove_tab(self, tab: "NotebookTab") -> None:
"""
Remove a specific NotebookTab from the notebook
"""
page_num = self.page_num(tab.page)
if page_num >= 0:
self.remove_page(page_num)
def set_add_tab_on_empty(self, add_tab_on_empty: bool) -> None:
"""
If set True, the SmartNotebook will always maintain at
least one tab in the notebook
"""
self._add_tab_on_empty = add_tab_on_empty
def on_button_press(self, widget, event):
if (
event.type == Gdk.EventType.BUTTON_PRESS
and event.button == Gdk.BUTTON_MIDDLE
):
self.add_default_tab()
def on_popup_menu(self, widget):
page = self.get_current_tab()
tab_label = self.get_tab_label(self.get_current_tab())
page.tab_menu.popup(None, None, guiutil.position_menu, tab_label, 0, 0)
return True
def __on_notify_tab_pos(self, _widget, _param):
tab_pos = self.get_tab_pos()
expand = tab_pos in (Gtk.PositionType.TOP, Gtk.PositionType.BOTTOM)
for i in range(self.get_n_pages()):
page = self.get_nth_page(i)
self.child_set_property(page, "tab-expand", expand)
tab = self.get_tab_label(page) # type: NotebookTab
tab.adjust_label_width(tab_pos)
class NotebookTab(Gtk.EventBox):
"""
Class to represent a generic tab in a Gtk.Notebook.
"""
reorderable = True
def __init__(
self, notebook: SmartNotebook, page: "NotebookPage", vertical: bool = False
):
"""
:param notebook: The notebook this tab will belong to
:param page: The page this tab will be associated with
:param vertical: Whether the tab contents are to be laid out vertically
"""
Gtk.EventBox.__init__(self)
self.set_visible_window(False)
self.closable = True
self.notebook = notebook
self.page = page
self.page.tab_menu.attach_to_widget(self.page, None)
self.connect("button-press-event", self.on_button_press)
self.vertical = vertical
if vertical:
box = Gtk.Box(spacing=2, orientation=Gtk.Orientation.VERTICAL)
else:
box = Gtk.Box(spacing=2)
self.add(box)
apply_css(box)
self.icon = Gtk.Image()
self.icon.set_no_show_all(True)
apply_css(self.icon)
self.label = Gtk.Label(label=self.page.get_page_name())
self.label.set_tooltip_text(self.page.get_page_name())
apply_css(self.label)
if vertical:
self.label.set_angle(90)
self.label.props.valign = Gtk.Align.CENTER
# Don't ellipsize but give a sane maximum length.
self.label.set_max_width_chars(20)
else:
self.label.props.halign = Gtk.Align.CENTER
self.label.set_ellipsize(Pango.EllipsizeMode.END)
self.adjust_label_width(Gtk.PositionType.TOP)
if self.can_rename():
self.entry = entry = Gtk.Entry()
entry.set_width_chars(self.label.get_max_width_chars())
entry.set_text(self.label.get_text())
border = Gtk.Border.new()
border.left = 1
border.right = 1
entry.connect("activate", self.on_entry_activate)
entry.connect("focus-out-event", self.on_entry_focus_out_event)
entry.connect("key-press-event", self.on_entry_key_press_event)
entry.set_no_show_all(True)
apply_css(entry)
self.button = button = Gtk.Button()
button.set_relief(Gtk.ReliefStyle.NONE)
button.set_halign(Gtk.Align.CENTER)
button.set_valign(Gtk.Align.CENTER)
button.set_focus_on_click(False)
button.set_tooltip_text(_("Close Tab"))
button.add(Gtk.Image.new_from_icon_name("window-close", Gtk.IconSize.MENU))
button.connect("clicked", self.close)
button.connect("button-press-event", self.on_button_press)
apply_css(button)
# pack the widgets in
if vertical:
box.pack_start(button, False, False, 0)
box.pack_end(self.icon, False, False, 0)
box.pack_end(self.label, True, True, 0)
if self.can_rename():
box.pack_end(self.entry, True, True, 0)
else:
box.pack_start(self.icon, False, False, 0)
box.pack_start(self.label, True, True, 0)
if self.can_rename():
box.pack_start(self.entry, True, True, 0)
box.pack_end(button, False, False, 0)
page.set_tab(self)
page.connect("name-changed", self.on_name_changed)
box.show_all()
def adjust_label_width(self, tab_pos: Gtk.PositionType) -> None:
"""Change the label's minimum width according to tab position"""
if self.vertical:
# Vertical tabs don't care about tab position.
return
if tab_pos in (Gtk.PositionType.TOP, Gtk.PositionType.BOTTOM):
# The number of characters here seems to be approximate.
# 4 is enough for around 2 characters and an ellipsis.
self.label.set_width_chars(4)
else:
self.label.set_width_chars(20)
def set_icon(self, pixbuf: Optional[GdkPixbuf.Pixbuf]) -> None:
"""
Set the primary icon for the tab.
:param pixbuf: The icon to use, or None to hide
"""
if pixbuf is None:
self.icon.set_property("visible", False)
else:
self.icon.set_from_pixbuf(pixbuf)
self.icon.set_property("visible", True)
def set_closable(self, closable: bool) -> None:
self.closable = closable
self.button.set_sensitive(closable)
def on_button_press(self, widget, event):
"""
Handles mouse button events on the tab.
Typically triggers renaming, closing and menu.
"""
if (
event.button == Gdk.BUTTON_PRIMARY
and event.type == Gdk.EventType._2BUTTON_PRESS
):
self.start_rename()
elif event.button == Gdk.BUTTON_MIDDLE:
self.close()
elif event.triggers_context_menu():
self.page.tab_menu.popup(None, None, None, None, event.button, event.time)
return True
def on_entry_activate(self, entry):
"""
Handles end of editing and triggers the actual rename.
"""
self.entry.props.editing_canceled = False
self.end_rename()
def on_entry_focus_out_event(self, widget, event):
"""
Make defocusing the rename box equivalent to activating it.
"""
if not self.entry.props.editing_canceled:
widget.activate()
def on_entry_key_press_event(self, widget, event):
"""
Cancel rename if Escape is pressed
"""
if event.keyval == Gdk.KEY_Escape:
self.entry.props.editing_canceled = True
self.end_rename()
return True
def on_name_changed(self, *args):
self.label.set_text(self.page.get_page_name())
def start_rename(self):
"""
Initiates the renaming of a tab, if the page supports this.
"""
if not self.can_rename():
return
self.entry.set_text(self.page.get_page_name())
self.label.hide()
self.button.hide()
self.entry.show()
self.entry.select_region(0, -1)
self.entry.grab_focus()
def end_rename(self, cancel=False):
"""
Finishes or cancels the renaming
"""
name = self.entry.get_text()
if name.strip() != "" and not self.entry.props.editing_canceled:
self.page.set_page_name(name)
self.label.set_text(name)
self.label.set_tooltip_text(name)
self.entry.hide()
self.label.show()
self.button.show()
self.entry.props.editing_canceled = False
def can_rename(self):
return hasattr(self.page, "set_page_name")
def close(self, *args):
if self.closable and not self.page.emit("closing"):
self.notebook.remove_page(self.notebook.page_num(self.page))
class NotebookPage(Gtk.Box):
"""
Base class representing a page. Should never be used directly.
"""
menu_provider_name = "tab-context" # override this in subclasses
reorderable = True
__gsignals__ = {
"name-changed": (GObject.SignalFlags.RUN_LAST, None, ()),
"closing": (GObject.SignalFlags.RUN_LAST, GObject.TYPE_BOOLEAN, ()),
}
def __init__(self, child=None, page_name=None, menu_provider_name=None):
Gtk.Box.__init__(self, orientation=Gtk.Orientation.VERTICAL)
# sometimes you just want to create a page
if menu_provider_name is not None:
self.menu_provider_name = menu_provider_name
self.tab = None
self.tab_menu = menu.ProviderMenu(self.menu_provider_name, self)
if child is not None:
self.pack_start(child, True, True, 0)
if page_name is not None:
self.page_name = page_name
def focus(self):
"""
Grabs focus for this page. Should be overridden in subclasses.
"""
self.grab_focus()
def get_page_name(self):
"""
Returns the name of this tab. Should be overridden in subclasses.
Subclasses can also implement set_page_name(self, name) to allow
renaming, but this is not mandatory.
"""
if hasattr(self, "page_name"):
return self.page_name
return "UNNAMED PAGE"
def set_tab(self, tab):
"""
Set the tab that holds this page. This will be called directly
from the tab itself when it is created, and should not be used
outside of that.
"""
self.tab = tab
def is_current_page(self):
"""
Returns True if this page is the currently-visible page in
the Notebook.
"""
return self.tab.get_nth_page(self.tab.get_current_page()) == self
def name_changed(self):
self.emit("name-changed")
class NotebookAction:
"""
A custom action to be placed to the left or right of tabs in a notebook
"""
name = None
position = Gtk.PackType.END
def __init__(self, notebook):
self.notebook = notebook
class NotebookActionService(providers.ProviderHandler):
"""
Provides interface for action widgets to be dynamically attached
detached from notebooks.
Actions are widgets placed to the left or right of tabs on a notebook.
"""
def __init__(self, notebook, servicename):
"""
:param notebook: Notebook to attach to
:param servicename: Provider service name to use
"""
providers.ProviderHandler.__init__(self, servicename, notebook)
self.notebook = notebook
# Try to set up action widgets
lw = Gtk.Box(spacing=3)
lw.pack_end(Gtk.Separator.new(Gtk.Orientation.HORIZONTAL), False, False, 0)
rw = Gtk.Box(spacing=3)
lw.pack_start(Gtk.Separator.new(Gtk.Orientation.HORIZONTAL), False, False, 0)
notebook.set_action_widget(lw, Gtk.PackType.START)
notebook.set_action_widget(rw, Gtk.PackType.END)
self.__actions = {}
for provider in self.get_providers():
self.on_provider_added(provider)
def on_provider_added(self, provider):
"""
Adds actions on provider addition
"""
try:
actions_box = self.notebook.get_action_widget(provider.position)
except AttributeError:
pass
else:
self.__actions[provider.name] = provider(self.notebook)
actions_box.pack_start(self.__actions[provider.name], False, False, 0)
actions_box.show_all()
def on_provider_removed(self, provider):
"""
Removes actions on provider removal
"""
try:
actions_box = self.notebook.get_action_widget(provider.position)
except AttributeError:
pass
else:
action = self.__actions[provider.name]
actions_box.remove(action)
action.destroy()
del self.__actions[provider.name]
|
draftgeoutils | general | # ***************************************************************************
# * Copyright (c) 2009, 2010 Yorik van Havre <yorik@uncreated.net> *
# * Copyright (c) 2009, 2010 Ken Cline <cline@frii.com> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides general functions to work with topological shapes."""
## @package general
# \ingroup draftgeoutils
# \brief Provides general functions to work with topological shapes.
import math
import DraftVecUtils
import FreeCAD as App
import lazy_loader.lazy_loader as lz
# Delay import of module until first use because it is heavy
Part = lz.LazyLoader("Part", globals(), "Part")
## \addtogroup draftgeoutils
# @{
PARAMGRP = App.ParamGet("User parameter:BaseApp/Preferences/Mod/Draft")
# Default normal direction for all geometry operations
NORM = App.Vector(0, 0, 1)
def precision():
"""Return the Draft precision setting."""
# Set precision level with a cap to avoid overspecification that:
# 1 - whilst it is precise enough (e.g. that OCC would consider
# 2 points are coincident)
# (not sure what it should be 10 or otherwise);
# 2 - but FreeCAD/OCC can handle 'internally'
# (e.g. otherwise user may set something like
# 15 that the code would never consider 2 points are coincident
# as internal float is not that precise)
precisionMax = 10
precisionInt = PARAMGRP.GetInt("precision", 6)
precisionInt = precisionInt if precisionInt <= 10 else precisionMax
return precisionInt # return PARAMGRP.GetInt("precision", 6)
def vec(edge, use_orientation=False):
"""Return a vector from an edge or a Part.LineSegment.
If use_orientation is True, it takes into account the edges orientation.
If edge is not straight, you'll get strange results!
"""
if isinstance(edge, Part.Shape):
if (
use_orientation
and isinstance(edge, Part.Edge)
and edge.Orientation == "Reversed"
):
return edge.Vertexes[0].Point.sub(edge.Vertexes[-1].Point)
else:
return edge.Vertexes[-1].Point.sub(edge.Vertexes[0].Point)
elif isinstance(edge, Part.LineSegment):
return edge.EndPoint.sub(edge.StartPoint)
else:
return None
def edg(p1, p2):
"""Return an edge from 2 vectors."""
if isinstance(p1, App.Vector) and isinstance(p2, App.Vector):
if DraftVecUtils.equals(p1, p2):
return None
return Part.LineSegment(p1, p2).toShape()
def getVerts(shape):
"""Return a list containing vectors of each vertex of the shape."""
if not hasattr(shape, "Vertexes"):
return []
p = []
for v in shape.Vertexes:
p.append(v.Point)
return p
def v1(edge):
"""Return the first point of an edge."""
return edge.Vertexes[0].Point
def isNull(something):
"""Return True if the given shape, vector, or placement is Null.
If the vector is (0, 0, 0), it will return True.
"""
if isinstance(something, Part.Shape):
return something.isNull()
elif isinstance(something, App.Vector):
if something == App.Vector(0, 0, 0):
return True
else:
return False
elif isinstance(something, App.Placement):
if something.Base == App.Vector(0, 0, 0) and something.Rotation.Q == (
0,
0,
0,
1,
):
return True
else:
return False
def isPtOnEdge(pt, edge):
"""Test if a point lies on an edge."""
v = Part.Vertex(pt)
try:
d = v.distToShape(edge)
except Part.OCCError:
return False
else:
if d:
if round(d[0], precision()) == 0:
return True
return False
def hasCurves(shape):
"""Check if the given shape has curves."""
for e in shape.Edges:
if not isinstance(e.Curve, (Part.LineSegment, Part.Line)):
return True
return False
def isAligned(edge, axis="x"):
"""Check if the given edge or line is aligned to the given axis.
The axis can be 'x', 'y' or 'z'.
"""
def is_same(a, b):
return round(a, precision()) == round(b, precision())
if axis == "x":
if isinstance(edge, Part.Edge):
if len(edge.Vertexes) == 2:
return is_same(edge.Vertexes[0].X, edge.Vertexes[-1].X)
elif isinstance(edge, Part.LineSegment):
return is_same(edge.StartPoint.x, edge.EndPoint.x)
elif axis == "y":
if isinstance(edge, Part.Edge):
if len(edge.Vertexes) == 2:
return is_same(edge.Vertexes[0].Y, edge.Vertexes[-1].Y)
elif isinstance(edge, Part.LineSegment):
return is_same(edge.StartPoint.y, edge.EndPoint.y)
elif axis == "z":
if isinstance(edge, Part.Edge):
if len(edge.Vertexes) == 2:
return is_same(edge.Vertexes[0].Z, edge.Vertexes[-1].Z)
elif isinstance(edge, Part.LineSegment):
return is_same(edge.StartPoint.z, edge.EndPoint.z)
return False
def getQuad(face):
"""Return a list of 3 vectors if the face is a quad, ortherwise None.
Returns
-------
basepoint, Xdir, Ydir
If the face is a quad.
None
If the face is not a quad.
"""
if len(face.Edges) != 4:
return None
v1 = vec(face.Edges[0]) # Warning redefinition of function v1
v2 = vec(face.Edges[1])
v3 = vec(face.Edges[2])
v4 = vec(face.Edges[3])
angles90 = [round(math.pi * 0.5, precision()), round(math.pi * 1.5, precision())]
angles180 = [0, round(math.pi, precision()), round(math.pi * 2, precision())]
for ov in [v2, v3, v4]:
if not (round(v1.getAngle(ov), precision()) in angles90 + angles180):
return None
for ov in [v2, v3, v4]:
if round(v1.getAngle(ov), precision()) in angles90:
v1.normalize()
ov.normalize()
return [face.Edges[0].Vertexes[0].Point, v1, ov]
def areColinear(e1, e2):
"""Return True if both edges are colinear."""
if not isinstance(e1.Curve, (Part.LineSegment, Part.Line)):
return False
if not isinstance(e2.Curve, (Part.LineSegment, Part.Line)):
return False
v1 = vec(e1)
v2 = vec(e2)
a = round(v1.getAngle(v2), precision())
if (a == 0) or (a == round(math.pi, precision())):
v3 = e2.Vertexes[0].Point.sub(e1.Vertexes[0].Point)
if DraftVecUtils.isNull(v3):
return True
else:
a2 = round(v1.getAngle(v3), precision())
if (a2 == 0) or (a2 == round(math.pi, precision())):
return True
return False
def hasOnlyWires(shape):
"""Return True if all edges are inside a wire."""
ne = 0
for w in shape.Wires:
ne += len(w.Edges)
if ne == len(shape.Edges):
return True
return False
def geomType(edge):
"""Return the type of geometry this edge is based on."""
try:
if isinstance(edge.Curve, (Part.LineSegment, Part.Line)):
return "Line"
elif isinstance(edge.Curve, Part.Circle):
return "Circle"
elif isinstance(edge.Curve, Part.BSplineCurve):
return "BSplineCurve"
elif isinstance(edge.Curve, Part.BezierCurve):
return "BezierCurve"
elif isinstance(edge.Curve, Part.Ellipse):
return "Ellipse"
else:
return "Unknown"
except Exception: # catch all errors, no only TypeError
return "Unknown"
def isValidPath(shape):
"""Return True if the shape can be used as an extrusion path."""
if shape.isNull():
return False
if shape.Faces:
return False
if len(shape.Wires) > 1:
return False
if shape.Wires:
if shape.Wires[0].isClosed():
return False
if shape.isClosed():
return False
return True
def findClosest(base_point, point_list):
"""Find closest point in a list of points to the base point.
Returns
-------
int
An index from the list of points is returned.
None
If point_list is empty.
"""
npoint = None
if not point_list:
return None
smallest = 1000000
for n in range(len(point_list)):
new = base_point.sub(point_list[n]).Length
if new < smallest:
smallest = new
npoint = n
return npoint
def getBoundaryAngles(angle, alist):
"""Return the 2 closest angles that encompass the given angle."""
negs = True
while negs:
negs = False
for i in range(len(alist)):
if alist[i] < 0:
alist[i] = 2 * math.pi + alist[i]
negs = True
if angle < 0:
angle = 2 * math.pi + angle
negs = True
lower = None
for a in alist:
if a < angle:
if lower is None:
lower = a
else:
if a > lower:
lower = a
if lower is None:
lower = 0
for a in alist:
if a > lower:
lower = a
higher = None
for a in alist:
if a > angle:
if higher is None:
higher = a
else:
if a < higher:
higher = a
if higher is None:
higher = 2 * math.pi
for a in alist:
if a < higher:
higher = a
return lower, higher
## @}
|
model | folder | # Copyright (C) 2011 Chris Dekter
# Copyright (C) 2019-2020 Thomas Hess <thomas.hess@udo.edu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import errno
import glob
import json
import os
import typing
from autokey.configmanager import configmanager_constants as cm_constants
from autokey.model.abstract_abbreviation import AbstractAbbreviation
from autokey.model.abstract_hotkey import AbstractHotkey
from autokey.model.abstract_window_filter import AbstractWindowFilter
from autokey.model.helpers import TriggerMode, get_safe_path
from autokey.model.phrase import Phrase
from autokey.model.script import Script
logger = __import__("autokey.logger").logger.get_logger(__name__)
class Folder(AbstractAbbreviation, AbstractHotkey, AbstractWindowFilter):
"""
Manages a collection of subfolders/phrases/scripts, which may be associated
with an abbreviation or hotkey.
"""
def __init__(self, title: str, show_in_tray_menu: bool = False, path: str = None):
AbstractAbbreviation.__init__(self)
AbstractHotkey.__init__(self)
AbstractWindowFilter.__init__(self)
self.title = title
self.folders = []
self.items = []
self.modes = [] # type: typing.List[TriggerMode]
self.usageCount = 0
self.show_in_tray_menu = show_in_tray_menu
self.parent = None # type: typing.Optional[Folder]
self.path = path
self.temporary = False
def build_path(self, base_name=None):
if base_name is None:
base_name = self.title
if self.parent is not None:
self.path = get_safe_path(self.parent.path, base_name)
else:
self.path = get_safe_path(cm_constants.CONFIG_DEFAULT_FOLDER, base_name)
def persist(self):
if self.path is None:
self.build_path()
if not os.path.exists(self.path):
os.mkdir(self.path)
with open(self.path + "/folder.json", "w") as outFile:
json.dump(self.get_serializable(), outFile, indent=4)
def get_serializable(self):
d = {
"type": "folder",
"title": self.title,
"modes": [
mode.value for mode in self.modes
], # Store the enum value for compatibility with old user data.
"usageCount": self.usageCount,
"showInTrayMenu": self.show_in_tray_menu,
"abbreviation": AbstractAbbreviation.get_serializable(self),
"hotkey": AbstractHotkey.get_serializable(self),
"filter": AbstractWindowFilter.get_serializable(self),
}
return d
def load(self, parent=None):
self.parent = parent
if os.path.exists(self.get_json_path()):
self.load_from_serialized()
else:
self.title = os.path.basename(self.path)
self.load_children()
def load_children(self):
entries = glob.glob(self.path + "/*")
self.folders = []
self.items = []
for entryPath in entries:
# entryPath = self.path + '/' + entry
if os.path.isdir(entryPath):
f = Folder("", path=entryPath)
f.load(self)
self.folders.append(f)
if os.path.isfile(entryPath):
i = None
if entryPath.endswith(".txt"):
i = Phrase("", "", path=entryPath)
elif entryPath.endswith(".py"):
i = Script("", "", path=entryPath)
if i is not None:
i.load(self)
self.items.append(i)
def load_from_serialized(self):
try:
with open(self.path + "/folder.json", "r") as inFile:
data = json.load(inFile)
self.inject_json_data(data)
except Exception:
logger.exception("Error while loading json data for " + self.title)
logger.error("JSON data not loaded (or loaded incomplete)")
def inject_json_data(self, data):
self.title = data["title"]
self.modes = [TriggerMode(item) for item in data["modes"]]
self.usageCount = data["usageCount"]
self.show_in_tray_menu = data["showInTrayMenu"]
AbstractAbbreviation.load_from_serialized(self, data["abbreviation"])
AbstractHotkey.load_from_serialized(self, data["hotkey"])
AbstractWindowFilter.load_from_serialized(self, data["filter"])
def rebuild_path(self):
if self.path is not None:
oldName = self.path
self.path = get_safe_path(os.path.split(oldName)[0], self.title)
self.update_children()
os.rename(oldName, self.path)
else:
self.build_path()
def update_children(self):
for childFolder in self.folders:
childFolder.build_path(os.path.basename(childFolder.path))
childFolder.update_children()
for childItem in self.items:
childItem.build_path(os.path.basename(childItem.path))
def get_child_folders(self):
out = []
for folder in self.folders:
out.append(folder)
out.extend(folder.get_child_folders())
return out
def remove_data(self):
if self.path is not None:
for child in self.items:
child.remove_data()
for child in self.folders:
child.remove_data()
try:
# The json file must be removed first. Otherwise the rmdir will fail.
if os.path.exists(self.get_json_path()):
os.remove(self.get_json_path())
os.rmdir(self.path)
except OSError as err:
# There may be user data in the removed directory. Only swallow the error, if it is caused by
# residing user data. Other errors should propagate.
if err.errno != errno.ENOTEMPTY:
raise
def get_json_path(self):
return self.path + "/folder.json"
def get_tuple(self):
return (
"folder",
self.title,
self.get_abbreviations(),
self.get_hotkey_string(),
self,
)
def set_modes(self, modes: typing.List[TriggerMode]):
self.modes = modes
def add_folder(self, folder):
folder.parent = self
# self.folders[folder.title] = folder
self.folders.append(folder)
def remove_folder(self, folder):
# del self.folders[folder.title]
self.folders.remove(folder)
def add_item(self, item):
"""
Add a new script or phrase to the folder.
"""
item.parent = self
# self.phrases[phrase.description] = phrase
self.items.append(item)
def remove_item(self, item):
"""
Removes the given phrase or script from the folder.
"""
# del self.phrases[phrase.description]
self.items.remove(item)
def check_input(self, buffer, window_info):
if TriggerMode.ABBREVIATION in self.modes:
return self._should_trigger_abbreviation(
buffer
) and self._should_trigger_window_title(window_info)
else:
return False
def increment_usage_count(self):
self.usageCount += 1
if self.parent is not None:
self.parent.increment_usage_count()
def get_backspace_count(self, buffer):
"""
Given the input buffer, calculate how many backspaces are needed to erase the text
that triggered this folder.
"""
if TriggerMode.ABBREVIATION in self.modes and self.backspace:
if self._should_trigger_abbreviation(buffer):
abbr = self._get_trigger_abbreviation(buffer)
stringBefore, typedAbbr, stringAfter = self._partition_input(
buffer, abbr
)
return len(abbr) + len(stringAfter)
if self.parent is not None:
return self.parent.get_backspace_count(buffer)
return 0
def calculate_input(self, buffer):
"""
Calculate how many keystrokes were used in triggering this folder (if applicable).
"""
if TriggerMode.ABBREVIATION in self.modes and self.backspace:
if self._should_trigger_abbreviation(buffer):
if self.immediate:
return len(self._get_trigger_abbreviation(buffer))
else:
return len(self._get_trigger_abbreviation(buffer)) + 1
if self.parent is not None:
return self.parent.calculate_input(buffer)
return 0
"""def __cmp__(self, other):
if self.usageCount != other.usageCount:
return cmp(self.usageCount, other.usageCount)
else:
return cmp(other.title, self.title)"""
def __str__(self):
return "folder '{}'".format(self.title)
def __repr__(self):
return str(self)
|
nfoview | actions | # -*- coding: utf-8 -*-
# Copyright (C) 2015 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import nfoview
__all__ = (
"AboutAction",
"CloseAction",
"ExportImageAction",
"OpenAction",
"PreferencesAction",
"QuitAction",
"WrapLinesAction",
)
class AboutAction(nfoview.Action):
def __init__(self):
nfoview.Action.__init__(self, "about")
class CloseAction(nfoview.Action):
def __init__(self):
nfoview.Action.__init__(self, "close")
self.accelerators = ["<Control>W", "Escape"]
class ExportImageAction(nfoview.Action):
def __init__(self):
nfoview.Action.__init__(self, "export-image")
self.accelerators = ["<Control>E"]
def _affirm_doable(self, window):
# XXX: Disable for now.
nfoview.util.affirm(False)
# nfoview.util.affirm(window.path is not None)
# nfoview.util.affirm(window.view is not None)
# nfoview.util.affirm(window.view.get_sensitive())
class OpenAction(nfoview.Action):
def __init__(self):
nfoview.Action.__init__(self, "open")
self.accelerators = ["<Control>O"]
class PreferencesAction(nfoview.Action):
def __init__(self):
nfoview.Action.__init__(self, "preferences")
class QuitAction(nfoview.Action):
def __init__(self):
nfoview.Action.__init__(self, "quit")
self.accelerators = ["<Control>Q"]
class WrapLinesAction(nfoview.ToggleAction):
def __new__(cls):
action = nfoview.ToggleAction.new("wrap-lines")
action.__class__ = cls
return action
def __init__(self):
nfoview.Action.__init__(self, "wrap-lines")
self.accelerators = ["<Control>R"]
def _affirm_doable(self, window):
nfoview.util.affirm(window.view is not None)
nfoview.util.affirm(window.view.get_sensitive())
nfoview.util.affirm(window.view.get_text())
|
nyaa | backend | import json
import os
import re
from datetime import datetime, timedelta
from ipaddress import ip_address
import flask
import sqlalchemy
from nyaa import models, utils
from nyaa.extensions import db
from orderedset import OrderedSet
from werkzeug import secure_filename
app = flask.current_app
# Blacklists for _validate_torrent_filenames
# TODO: consider moving to config.py?
CHARACTER_BLACKLIST = [
"\u202E", # RIGHT-TO-LEFT OVERRIDE
]
FILENAME_BLACKLIST = [
# Windows reserved filenames
"con",
"nul",
"prn",
"aux",
"com0",
"com1",
"com2",
"com3",
"com4",
"com5",
"com6",
"com7",
"com8",
"com9",
"lpt0",
"lpt1",
"lpt2",
"lpt3",
"lpt4",
"lpt5",
"lpt6",
"lpt7",
"lpt8",
"lpt9",
]
# Invalid RSS characters regex, used to sanitize some strings
ILLEGAL_XML_CHARS_RE = re.compile(
"[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]"
)
def sanitize_string(string, replacement="\uFFFD"):
"""Simply replaces characters based on a regex"""
return ILLEGAL_XML_CHARS_RE.sub(replacement, string)
class TorrentExtraValidationException(Exception):
def __init__(self, errors={}):
self.errors = errors
@utils.cached_function
def get_category_id_map():
"""Reads database for categories and turns them into a dict with
ids as keys and name list as the value, ala
{'1_0': ['Anime'], '1_2': ['Anime', 'English-translated'], ...}"""
cat_id_map = {}
for main_cat in models.MainCategory.query:
cat_id_map[main_cat.id_as_string] = [main_cat.name]
for sub_cat in main_cat.sub_categories:
cat_id_map[sub_cat.id_as_string] = [main_cat.name, sub_cat.name]
return cat_id_map
def _replace_utf8_values(dict_or_list):
"""Will replace 'property' with 'property.utf-8' and remove latter if it exists.
Thanks, bitcomet! :/"""
did_change = False
if isinstance(dict_or_list, dict):
for key in [key for key in dict_or_list.keys() if key.endswith(".utf-8")]:
dict_or_list[key.replace(".utf-8", "")] = dict_or_list.pop(key)
did_change = True
for value in dict_or_list.values():
did_change = _replace_utf8_values(value) or did_change
elif isinstance(dict_or_list, list):
for item in dict_or_list:
did_change = _replace_utf8_values(item) or did_change
return did_change
def _recursive_dict_iterator(source):
"""Iterates over a given dict, yielding (key, value) pairs,
recursing inside any dicts."""
# TODO Make a proper dict-filetree walker
for key, value in source.items():
yield (key, value)
if isinstance(value, dict):
for kv in _recursive_dict_iterator(value):
yield kv
def _validate_torrent_filenames(torrent):
"""Checks path parts of a torrent's filetree against blacklisted characters
and filenames, returning False on rejection"""
file_tree = json.loads(torrent.filelist.filelist_blob.decode("utf-8"))
for path_part, value in _recursive_dict_iterator(file_tree):
if path_part.rsplit(".", 1)[0].lower() in FILENAME_BLACKLIST:
return False
if any(True for c in CHARACTER_BLACKLIST if c in path_part):
return False
return True
def validate_torrent_post_upload(torrent, upload_form=None):
"""Validates a Torrent instance before it's saved to the database.
Enforcing user-and-such-based validations is more flexible here vs WTForm context"""
errors = {"torrent_file": []}
# Encorce minimum size for userless uploads
minimum_anonymous_torrent_size = app.config["MINIMUM_ANONYMOUS_TORRENT_SIZE"]
if torrent.user is None and torrent.filesize < minimum_anonymous_torrent_size:
errors["torrent_file"].append("Torrent too small for an anonymous uploader")
if not _validate_torrent_filenames(torrent):
errors["torrent_file"].append("Torrent has forbidden characters in filenames")
# Remove keys with empty lists
errors = {k: v for k, v in errors.items() if v}
if errors:
if upload_form:
# Add error messages to the form fields
for field_name, field_errors in errors.items():
getattr(upload_form, field_name).errors.extend(field_errors)
# Clear out the wtforms dict to force a regeneration
upload_form._errors = None
raise TorrentExtraValidationException(errors)
def check_uploader_ratelimit(user):
"""Figures out if user (or IP address from flask.request) may
upload within upload ratelimit.
Returns a tuple of current datetime, count of torrents uploaded
within burst duration and timestamp for next allowed upload."""
now = datetime.utcnow()
next_allowed_time = now
Torrent = models.Torrent
def filter_uploader(query):
if user:
return query.filter(
sqlalchemy.or_(
Torrent.user == user,
Torrent.uploader_ip == ip_address(flask.request.remote_addr).packed,
)
)
else:
return query.filter(
Torrent.uploader_ip == ip_address(flask.request.remote_addr).packed
)
time_range_start = datetime.utcnow() - timedelta(
seconds=app.config["UPLOAD_BURST_DURATION"]
)
# Count torrents uploaded by user/ip within given time period
torrent_count_query = db.session.query(sqlalchemy.func.count(Torrent.id))
torrent_count = (
filter_uploader(torrent_count_query)
.filter(Torrent.created_time >= time_range_start)
.scalar()
)
# If user has reached burst limit...
if torrent_count >= app.config["MAX_UPLOAD_BURST"]:
# Check how long ago their latest torrent was (we know at least one will exist)
last_torrent = (
filter_uploader(Torrent.query).order_by(Torrent.created_time.desc()).first()
)
after_timeout = last_torrent.created_time + timedelta(
seconds=app.config["UPLOAD_TIMEOUT"]
)
if now < after_timeout:
next_allowed_time = after_timeout
return now, torrent_count, next_allowed_time
def handle_torrent_upload(upload_form, uploading_user=None, fromAPI=False):
"""Stores a torrent to the database.
May throw TorrentExtraValidationException if the form/torrent fails
post-WTForm validation! Exception messages will also be added to their
relevant fields on the given form."""
torrent_data = upload_form.torrent_file.parsed_data
# Anonymous uploaders and non-trusted uploaders
no_or_new_account = not uploading_user or (
uploading_user.age < app.config["RATELIMIT_ACCOUNT_AGE"]
and not uploading_user.is_trusted
)
if app.config["RATELIMIT_UPLOADS"] and no_or_new_account:
now, torrent_count, next_time = check_uploader_ratelimit(uploading_user)
if next_time > now:
# This will flag the dialog in upload.html red and tell API users what's wrong
upload_form.ratelimit.errors = ["You've gone over the upload ratelimit."]
raise TorrentExtraValidationException()
if not uploading_user:
if app.config["RAID_MODE_LIMIT_UPLOADS"]:
# XXX TODO: rename rangebanned to something more generic
upload_form.rangebanned.errors = [app.config["RAID_MODE_UPLOADS_MESSAGE"]]
raise TorrentExtraValidationException()
elif models.RangeBan.is_rangebanned(
ip_address(flask.request.remote_addr).packed
):
upload_form.rangebanned.errors = [
"Your IP is banned from " "uploading anonymously."
]
raise TorrentExtraValidationException()
# Delete existing torrent which is marked as deleted
if torrent_data.db_id is not None:
old_torrent = models.Torrent.by_id(torrent_data.db_id)
db.session.delete(old_torrent)
db.session.commit()
# Delete physical file after transaction has been committed
_delete_info_dict(old_torrent)
# The torrent has been validated and is safe to access with ['foo'] etc - all relevant
# keys and values have been checked for (see UploadForm in forms.py for details)
info_dict = torrent_data.torrent_dict["info"]
changed_to_utf8 = _replace_utf8_values(torrent_data.torrent_dict)
# Use uploader-given name or grab it from the torrent
display_name = (
upload_form.display_name.data.strip()
or info_dict["name"].decode("utf8").strip()
)
information = (upload_form.information.data or "").strip()
description = (upload_form.description.data or "").strip()
# Sanitize fields
display_name = sanitize_string(display_name)
information = sanitize_string(information)
description = sanitize_string(description)
torrent_filesize = info_dict.get("length") or sum(
f["length"] for f in info_dict.get("files")
)
# In case no encoding, assume UTF-8.
torrent_encoding = torrent_data.torrent_dict.get("encoding", b"utf-8").decode(
"utf-8"
)
torrent = models.Torrent(
id=torrent_data.db_id,
info_hash=torrent_data.info_hash,
display_name=display_name,
torrent_name=torrent_data.filename,
information=information,
description=description,
encoding=torrent_encoding,
filesize=torrent_filesize,
user=uploading_user,
uploader_ip=ip_address(flask.request.remote_addr).packed,
)
# Store bencoded info_dict
info_dict_path = torrent.info_dict_path
info_dict_dir = os.path.dirname(info_dict_path)
os.makedirs(info_dict_dir, exist_ok=True)
with open(info_dict_path, "wb") as out_file:
out_file.write(torrent_data.bencoded_info_dict)
torrent.stats = models.Statistic()
torrent.has_torrent = True
# Fields with default value will be None before first commit, so set .flags
torrent.flags = 0
torrent.anonymous = upload_form.is_anonymous.data if uploading_user else True
torrent.hidden = upload_form.is_hidden.data
torrent.remake = upload_form.is_remake.data
torrent.complete = upload_form.is_complete.data
# Copy trusted status from user if possible
can_mark_trusted = uploading_user and uploading_user.is_trusted
# To do, automatically mark trusted if user is trusted unless user specifies otherwise
torrent.trusted = upload_form.is_trusted.data if can_mark_trusted else False
# Only allow mods to upload locked torrents
can_mark_locked = uploading_user and uploading_user.is_moderator
torrent.comment_locked = (
upload_form.is_comment_locked.data if can_mark_locked else False
)
# Set category ids
(
torrent.main_category_id,
torrent.sub_category_id,
) = upload_form.category.parsed_data.get_category_ids()
# To simplify parsing the filelist, turn single-file torrent into a list
torrent_filelist = info_dict.get("files")
used_path_encoding = changed_to_utf8 and "utf-8" or torrent_encoding
parsed_file_tree = dict()
if not torrent_filelist:
# If single-file, the root will be the file-tree (no directory)
file_tree_root = parsed_file_tree
torrent_filelist = [{"length": torrent_filesize, "path": [info_dict["name"]]}]
else:
# If multi-file, use the directory name as root for files
file_tree_root = parsed_file_tree.setdefault(
info_dict["name"].decode(used_path_encoding), {}
)
# Parse file dicts into a tree
for file_dict in torrent_filelist:
# Decode path parts from utf8-bytes
path_parts = [
path_part.decode(used_path_encoding) for path_part in file_dict["path"]
]
filename = path_parts.pop()
current_directory = file_tree_root
for directory in path_parts:
current_directory = current_directory.setdefault(directory, {})
# Don't add empty filenames (BitComet directory)
if filename:
current_directory[filename] = file_dict["length"]
parsed_file_tree = utils.sorted_pathdict(parsed_file_tree)
json_bytes = json.dumps(parsed_file_tree, separators=(",", ":")).encode("utf8")
torrent.filelist = models.TorrentFilelist(filelist_blob=json_bytes)
db.session.add(torrent)
db.session.flush()
# Store the users trackers
trackers = OrderedSet()
announce = torrent_data.torrent_dict.get("announce", b"").decode("ascii")
if announce:
trackers.add(announce)
# List of lists with single item
announce_list = torrent_data.torrent_dict.get("announce-list", [])
for announce in announce_list:
trackers.add(announce[0].decode("ascii"))
# Store webseeds
# qBittorrent doesn't omit url-list but sets it as '' even when there are no webseeds
webseed_list = torrent_data.torrent_dict.get("url-list") or []
if isinstance(webseed_list, bytes):
webseed_list = [webseed_list] # qB doesn't contain a sole url in a list
webseeds = OrderedSet(webseed.decode("utf-8") for webseed in webseed_list)
# Remove our trackers, maybe? TODO ?
# Search for/Add trackers in DB
db_trackers = OrderedSet()
for announce in trackers:
tracker = models.Trackers.by_uri(announce)
# Insert new tracker if not found
if not tracker:
tracker = models.Trackers(uri=announce)
db.session.add(tracker)
db.session.flush()
elif tracker.is_webseed:
# If we have an announce marked webseed (user error, malicy?), reset it.
# Better to have "bad" announces than "hiding" proper announces in webseeds/url-list.
tracker.is_webseed = False
db.session.flush()
db_trackers.add(tracker)
# Same for webseeds
for webseed_url in webseeds:
webseed = models.Trackers.by_uri(webseed_url)
if not webseed:
webseed = models.Trackers(uri=webseed_url, is_webseed=True)
db.session.add(webseed)
db.session.flush()
# Don't add trackers into webseeds
if webseed.is_webseed:
db_trackers.add(webseed)
# Store tracker refs in DB
for order, tracker in enumerate(db_trackers):
torrent_tracker = models.TorrentTrackers(
torrent_id=torrent.id, tracker_id=tracker.id, order=order
)
db.session.add(torrent_tracker)
# Before final commit, validate the torrent again
validate_torrent_post_upload(torrent, upload_form)
# Add to tracker whitelist
db.session.add(models.TrackerApi(torrent.info_hash, "insert"))
db.session.commit()
# Store the actual torrent file as well
torrent_file = upload_form.torrent_file.data
if app.config.get("BACKUP_TORRENT_FOLDER"):
torrent_file.seek(0, 0)
torrent_dir = app.config["BACKUP_TORRENT_FOLDER"]
os.makedirs(torrent_dir, exist_ok=True)
torrent_path = os.path.join(
torrent_dir,
"{}.{}".format(torrent.id, secure_filename(torrent_file.filename)),
)
torrent_file.save(torrent_path)
torrent_file.close()
return torrent
def _delete_info_dict(torrent):
info_dict_path = torrent.info_dict_path
if os.path.exists(info_dict_path):
os.remove(info_dict_path)
|
downloaders | BezvadataCz | # -*- coding: utf-8 -*-
import base64
import re
from datetime import timedelta
from ..base.simple_downloader import SimpleDownloader
class BezvadataCz(SimpleDownloader):
__name__ = "BezvadataCz"
__type__ = "downloader"
__version__ = "0.35"
__status__ = "testing"
__pattern__ = r"http://(?:www\.)?bezvadata\.cz/stahnout/.+"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """BezvaData.cz downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
NAME_PATTERN = r"<p><b>Soubor: (?P<N>.+?)</b></p>"
SIZE_PATTERN = r"<li><strong>Velikost:</strong> (?P<S>.+?)</li>"
OFFLINE_PATTERN = r"<title>BezvaData \| Soubor nenalezen</title>"
def setup(self):
self.resume_download = True
self.multi_dl = True
def handle_free(self, pyfile):
#: Download button
m = re.search(r'<a class="stahnoutSoubor".*?href="(.*?)"', self.data)
if m is None:
self.error(self._("Page 1 URL not found"))
url = "http://bezvadata.cz{}".format(m.group(1))
#: Captcha form
self.data = self.load(url)
self.check_errors()
action, inputs = self.parse_html_form("frm-stahnoutFreeForm")
if not inputs:
self.error(self._("FreeForm"))
m = re.search(r'<img src="data:image/png;base64,(.*?)"', self.data)
if m is None:
self.retry_captcha()
inputs["captcha"] = self.captcha.decrypt_image(
base64.b64decode(m.group(1)), input_type="png"
)
#: Download url
self.data = self.load("http://bezvadata.cz{}".format(action, post=inputs))
self.check_errors()
m = re.search(r'<a class="stahnoutSoubor2" href="(.*?)">', self.data)
if m is None:
self.error(self._("Page 2 URL not found"))
url = "http://bezvadata.cz{}".format(m.group(1))
self.log_debug(f"DL URL {url}")
#: countdown
m = re.search(r'id="countdown">(\d\d):(\d\d)<', self.data)
wait_time = (
(timedelta(minutes=int(m.group(1))).total_seconds() + int(m.group(2)))
if m
else 120
)
self.wait(wait_time, False)
self.link = url
def check_errors(self):
if "images/button-download-disable.png" in self.data:
#: Parallel dl limit
self.retry(
timedelta(minutes=5).total_seconds(),
24,
self._("Download limit reached"),
)
elif '<div class="infobox' in self.data:
self.temp_offline()
else:
return SimpleDownloader.check_errors(self)
|
api | feature_flag_role_access | from ee.api.role import RoleSerializer
from ee.models.feature_flag_role_access import FeatureFlagRoleAccess
from ee.models.organization_resource_access import OrganizationResourceAccess
from ee.models.role import Role
from posthog.api.feature_flag import FeatureFlagSerializer
from posthog.api.routing import StructuredViewSetMixin
from posthog.models import FeatureFlag
from posthog.models.organization import OrganizationMembership
from rest_framework import exceptions, mixins, serializers, viewsets
from rest_framework.permissions import SAFE_METHODS, BasePermission, IsAuthenticated
class FeatureFlagRoleAccessPermissions(BasePermission):
message = "You can't edit roles for this feature flag."
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
if (
request.user.organization_memberships.get(
organization=request.user.organization
).level
>= OrganizationMembership.Level.ADMIN
):
return True
try:
resource_access = OrganizationResourceAccess.objects.get(
resource=OrganizationResourceAccess.Resources.FEATURE_FLAGS,
organization=request.user.organization,
)
if (
resource_access.access_level
== OrganizationResourceAccess.AccessLevel.CAN_ALWAYS_EDIT
):
return True
except (
OrganizationResourceAccess.DoesNotExist
): # no organization resource access for this means full default edit access
return True
try:
feature_flag: FeatureFlag = FeatureFlag.objects.get(
id=view.parents_query_dict["feature_flag_id"]
)
if feature_flag.created_by.uuid == request.user.uuid:
return True
except FeatureFlag.DoesNotExist:
raise exceptions.NotFound("Feature flag not found.")
has_role_membership_with_access = request.user.role_memberships.filter(
role__feature_flags_access_level=OrganizationResourceAccess.AccessLevel.CAN_ALWAYS_EDIT
).exists()
return has_role_membership_with_access
class FeatureFlagRoleAccessSerializer(serializers.ModelSerializer):
feature_flag = FeatureFlagSerializer(read_only=True)
role = RoleSerializer(read_only=True)
role_id = serializers.PrimaryKeyRelatedField(
write_only=True, source="role", queryset=Role.objects.all()
)
class Meta:
model = FeatureFlagRoleAccess
fields = ["id", "feature_flag", "role", "role_id", "added_at", "updated_at"]
read_only_fields = ["id", "added_at", "updated_at"]
def create(self, validated_data):
validated_data["feature_flag_id"] = self.context["feature_flag_id"]
return super().create(validated_data)
class FeatureFlagRoleAccessViewSet(
StructuredViewSetMixin,
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.DestroyModelMixin,
mixins.RetrieveModelMixin,
viewsets.GenericViewSet,
):
permission_classes = [IsAuthenticated, FeatureFlagRoleAccessPermissions]
serializer_class = FeatureFlagRoleAccessSerializer
queryset = FeatureFlagRoleAccess.objects.select_related("feature_flag")
filter_rewrite_rules = {"team_id": "feature_flag__team_id"}
def get_queryset(self):
filters = self.request.GET.dict()
return super().get_queryset().filter(**filters)
|
extractor | webofstories | # coding: utf-8
from __future__ import unicode_literals
import re
from ..utils import int_or_none, orderedSet
from .common import InfoExtractor
class WebOfStoriesIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?webofstories\.com/play/(?:[^/]+/)?(?P<id>[0-9]+)"
_VIDEO_DOMAIN = "http://eu-mobile.webofstories.com/"
_GREAT_LIFE_STREAMER = "rtmp://eu-cdn1.webofstories.com/cfx/st/"
_USER_STREAMER = "rtmp://eu-users.webofstories.com/cfx/st/"
_TESTS = [
{
"url": "http://www.webofstories.com/play/hans.bethe/71",
"md5": "373e4dd915f60cfe3116322642ddf364",
"info_dict": {
"id": "4536",
"ext": "mp4",
"title": "The temperature of the sun",
"thumbnail": r"re:^https?://.*\.jpg$",
"description": "Hans Bethe talks about calculating the temperature of the sun",
"duration": 238,
},
},
{
"url": "http://www.webofstories.com/play/55908",
"md5": "2985a698e1fe3211022422c4b5ed962c",
"info_dict": {
"id": "55908",
"ext": "mp4",
"title": "The story of Gemmata obscuriglobus",
"thumbnail": r"re:^https?://.*\.jpg$",
"description": "Planctomycete talks about The story of Gemmata obscuriglobus",
"duration": 169,
},
"skip": "notfound",
},
{
# malformed og:title meta
"url": "http://www.webofstories.com/play/54215?o=MS",
"info_dict": {
"id": "54215",
"ext": "mp4",
"title": '"A Leg to Stand On"',
"thumbnail": r"re:^https?://.*\.jpg$",
"description": "Oliver Sacks talks about the death and resurrection of a limb",
"duration": 97,
},
"params": {
"skip_download": True,
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
# Sometimes og:title meta is malformed
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
r"(?s)<strong>Title:\s*</strong>(.+?)<", webpage, "title"
)
description = self._html_search_meta("description", webpage)
thumbnail = self._og_search_thumbnail(webpage)
embed_params = [
s.strip(" \r\n\t'")
for s in self._search_regex(
r'(?s)\$\("#embedCode"\).html\(getEmbedCode\((.*?)\)',
webpage,
"embed params",
).split(",")
]
(
_,
speaker_id,
story_id,
story_duration,
speaker_type,
great_life,
_thumbnail,
_has_subtitles,
story_filename,
_story_order,
) = embed_params
is_great_life_series = great_life == "true"
duration = int_or_none(story_duration)
# URL building, see: http://www.webofstories.com/scripts/player.js
ms_prefix = ""
if speaker_type.lower() == "ms":
ms_prefix = "mini_sites/"
if is_great_life_series:
mp4_url = "{0:}lives/{1:}/{2:}.mp4".format(
self._VIDEO_DOMAIN, speaker_id, story_filename
)
rtmp_ext = "flv"
streamer = self._GREAT_LIFE_STREAMER
play_path = "stories/{0:}/{1:}".format(speaker_id, story_filename)
else:
mp4_url = "{0:}{1:}{2:}/{3:}.mp4".format(
self._VIDEO_DOMAIN, ms_prefix, speaker_id, story_filename
)
rtmp_ext = "mp4"
streamer = self._USER_STREAMER
play_path = "mp4:{0:}{1:}/{2}.mp4".format(
ms_prefix, speaker_id, story_filename
)
formats = [
{
"format_id": "mp4_sd",
"url": mp4_url,
},
{
"format_id": "rtmp_sd",
"page_url": url,
"url": streamer,
"ext": rtmp_ext,
"play_path": play_path,
},
]
self._sort_formats(formats)
return {
"id": story_id,
"title": title,
"formats": formats,
"thumbnail": thumbnail,
"description": description,
"duration": duration,
}
class WebOfStoriesPlaylistIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?webofstories\.com/playAll/(?P<id>[^/]+)"
_TEST = {
"url": "http://www.webofstories.com/playAll/donald.knuth",
"info_dict": {
"id": "donald.knuth",
"title": "Donald Knuth (Scientist)",
},
"playlist_mincount": 97,
}
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
entries = [
self.url_result(
"http://www.webofstories.com/play/%s" % video_id,
"WebOfStories",
video_id=video_id,
)
for video_id in orderedSet(re.findall(r'\bid=["\']td_(\d+)', webpage))
]
title = self._search_regex(
r'<div id="speakerName">\s*<span>([^<]+)</span>',
webpage,
"speaker",
default=None,
)
if title:
field = self._search_regex(
r'<span id="primaryField">([^<]+)</span>',
webpage,
"field",
default=None,
)
if field:
title += " (%s)" % field
if not title:
title = self._search_regex(
r"<title>Play\s+all\s+stories\s*-\s*([^<]+)\s*-\s*Web\s+of\s+Stories</title>",
webpage,
"title",
)
return self.playlist_result(entries, playlist_id, title)
|
database | main | # neubot/database/main.py
#
# Copyright (c) 2011 Simone Basso <bassosimone@gmail.com>,
# NEXA Center for Internet & Society at Politecnico di Torino
#
# This file is part of Neubot <http://www.neubot.org/>.
#
# Neubot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Neubot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Neubot. If not, see <http://www.gnu.org/licenses/>.
#
import getopt
import sys
from neubot import compat, utils
from neubot.database import DATABASE, table_config, table_speedtest
USAGE = """\
Neubot database -- Low-level database operations
Usage: neubot database [-f FILE]
neubot database [-f FILE] delete_all
neubot database [-f FILE] dump
neubot database [-f FILE] prune
neubot database [-f FILE] regen_uuid
neubot database [-f FILE] show
"""
def main(args):
try:
options, arguments = getopt.getopt(args[1:], "f:")
except getopt.GetoptError:
sys.stderr.write(USAGE)
sys.exit(1)
for key, value in options:
if key == "-f":
DATABASE.set_path(value)
DATABASE.connect()
if not arguments:
sys.stdout.write("%s\n" % DATABASE.path)
elif arguments[0] == "regen_uuid":
if DATABASE.readonly:
sys.exit("ERROR: readonly database")
table_config.update(
DATABASE.connection(), {"uuid": utils.get_uuid()}.iteritems()
)
elif arguments[0] == "prune":
if DATABASE.readonly:
sys.exit("ERROR: readonly database")
table_speedtest.prune(DATABASE.connection())
elif arguments[0] == "delete_all":
if DATABASE.readonly:
sys.exit("ERROR: readonly database")
table_speedtest.prune(DATABASE.connection(), until=utils.timestamp())
DATABASE.connection().execute("VACUUM;")
elif arguments[0] in ("show", "dump"):
d = {
"config": table_config.dictionarize(DATABASE.connection()),
"speedtest": table_speedtest.listify(DATABASE.connection()),
}
if arguments[0] == "show":
compat.json.dump(d, sys.stdout, indent=4)
elif arguments[0] == "dump":
compat.json.dump(d, sys.stdout)
else:
sys.stdout.write(USAGE)
sys.exit(0)
|
tornado | conn | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
"""
sockjs.tornado.conn
~~~~~~~~~~~~~~~~~~~
SockJS connection interface
"""
class SockJSConnection(object):
def __init__(self, session):
"""Connection constructor.
`session`
Associated session
"""
self.session = session
# Public API
def on_open(self, request):
"""Default on_open() handler.
Override when you need to do some initialization or request validation.
If you return False, connection will be rejected.
You can also throw Tornado HTTPError to close connection.
`request`
``ConnectionInfo`` object which contains caller IP address, query string
parameters and cookies associated with this request (if any).
"""
pass
def on_message(self, message):
"""Default on_message handler. Must be overridden in your application"""
raise NotImplementedError()
def on_close(self):
"""Default on_close handler."""
pass
def send(self, message, binary=False):
"""Send message to the client.
`message`
Message to send.
"""
if not self.is_closed:
self.session.send_message(message, binary=binary)
def broadcast(self, clients, message):
"""Broadcast message to the one or more clients.
Use this method if you want to send same message to lots of clients, as
it contains several optimizations and will work fast than just having loop
in your code.
`clients`
Clients iterable
`message`
Message to send.
"""
self.session.broadcast(clients, message)
def close(self):
self.session.close()
@property
def is_closed(self):
"""Check if connection was closed"""
return self.session.is_closed
|
utils | exportHtml | import threading
import time
# noinspection PyPackageRequirements
import wx
from eos.db import getFit
from logbook import Logger
from service.const import PortEftOptions
from service.fit import Fit
from service.market import Market
from service.port import Port
from service.settings import HTMLExportSettings
pyfalog = Logger(__name__)
class exportHtml:
_instance = None
@classmethod
def getInstance(cls):
if cls._instance is None:
cls._instance = exportHtml()
return cls._instance
def __init__(self):
self.thread = exportHtmlThread()
def refreshFittingHtml(self, force=False, callback=False):
settings = HTMLExportSettings.getInstance()
if force or settings.getEnabled():
self.thread.stop()
self.thread = exportHtmlThread(callback)
self.thread.start()
class exportHtmlThread(threading.Thread):
def __init__(self, callback=False):
threading.Thread.__init__(self)
self.name = "HTMLExport"
self.callback = callback
self.stopRunning = False
def stop(self):
self.stopRunning = True
def run(self):
# wait 1 second just in case a lot of modifications get made
time.sleep(1)
if self.stopRunning:
return
sMkt = Market.getInstance()
sFit = Fit.getInstance()
settings = HTMLExportSettings.getInstance()
minimal = settings.getMinimalEnabled()
dnaUrl = "https://o.smium.org/loadout/dna/"
if minimal:
HTML = self.generateMinimalHTML(sMkt, sFit, dnaUrl)
else:
HTML = self.generateFullHTML(sMkt, sFit, dnaUrl)
try:
FILE = open(settings.getPath(), "w", encoding="utf-8")
FILE.write(HTML)
FILE.close()
except IOError as ex:
pyfalog.warning("Failed to write to " + settings.getPath())
pass
except (KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
pass
if self.callback:
wx.CallAfter(self.callback, -1)
def generateFullHTML(self, sMkt, sFit, dnaUrl):
"""Generate the complete HTML with styling and javascript"""
timestamp = time.localtime(time.time())
localDate = "%d/%02d/%02d %02d:%02d" % (
timestamp[0],
timestamp[1],
timestamp[2],
timestamp[3],
timestamp[4],
)
HTML = """
<!DOCTYPE html>
<html>
<head>
<title>Pyfa Fittings</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta charset="utf-8" />
<link rel="stylesheet" href="https://code.jquery.com/mobile/1.4.2/jquery.mobile-1.4.2.min.css" />
<script src="https://code.jquery.com/jquery-1.11.0.min.js"></script>
<script>
//http://stackoverflow.com/questions/32453806/uncaught-securityerror-failed-to-execute-replacestate-on-history-cannot-be
$(document).bind('mobileinit',function(){
$.mobile.changePage.defaults.changeHash = false;
$.mobile.hashListeningEnabled = false;
$.mobile.pushStateEnabled = false;
});
</script>
<script src="https://code.jquery.com/mobile/1.4.2/jquery.mobile-1.4.2.min.js"></script>
<style>
/* Basic settings */
.ui-li-static.ui-collapsible {
padding: 0;
}
.ui-li-static.ui-collapsible > .ui-collapsible-content > .ui-listview,
.ui-li-static.ui-collapsible > .ui-collapsible-heading {
margin: 0;
}
.ui-li-static.ui-collapsible > .ui-collapsible-content {
padding-top: 0;
padding-bottom: 0;
padding-right: 0;
border-bottom-width: 0;
}
/* collapse vertical borders */
.ui-li-static.ui-collapsible > .ui-collapsible-content > .ui-listview > li.ui-last-child,
.ui-li-static.ui-collapsible.ui-collapsible-collapsed > .ui-collapsible-heading > a.ui-btn {
border-bottom-width: 0;
}
.ui-li-static.ui-collapsible > .ui-collapsible-content > .ui-listview > li.ui-first-child,
.ui-li-static.ui-collapsible > .ui-collapsible-content > .ui-listview > li.ui-first-child > a.ui-btn,
.ui-li-static.ui-collapsible > .ui-collapsible-heading > a.ui-btn {
border-top-width: 0;
}
/* Remove right borders */
.ui-li-static.ui-collapsible > .ui-collapsible-heading > a.ui-btn,
.ui-li-static.ui-collapsible > .ui-collapsible-content > .ui-listview > .ui-li-static,
.ui-li-static.ui-collapsible > .ui-collapsible-content > .ui-listview > li > a.ui-btn,
.ui-li-static.ui-collapsible > .ui-collapsible-content {
border-right-width: 0;
}
/* Remove left borders */
/* Here, we need class ui-listview-outer to identify the outermost listview */
.ui-listview-outer > .ui-li-static.ui-collapsible .ui-li-static.ui-collapsible.ui-collapsible,
.ui-listview-outer > .ui-li-static.ui-collapsible > .ui-collapsible-heading > a.ui-btn,
.ui-li-static.ui-collapsible > .ui-collapsible-content {
border-left-width: 0;
}
.ui-content { max-width: 800px !important; margin: 0 auto !important; }
.ui-listview > .ui-li-static.ui-li-has-count { padding-right: 0px }
</style>
<script>
$(document).ready(function() {
var start = new Date(%d * 1000);
setInterval(function() {
var diff = (new Date - start) / 1000;
var days = Math.floor((diff %% 31536000) / 86400);
var hours = Math.floor(((diff %% 31536000) %% 86400) / 3600);
var minutes = Math.floor((((diff %% 31536000) %% 86400) %% 3600) / 60);
var seconds = Math.floor(((diff %% 31536000) %% 86400) %% 3600) %% 60;
$('.timer').text(days+":"+hours+":"+minutes+":"+seconds+" ago");
}, 1000);
$('a[data-dna]').each(function( index ) {
var dna = $(this).data('dna');
if (typeof CCPEVE !== 'undefined') { // inside IGB
$(this).attr('href', 'javascript:CCPEVE.showFitting("'+dna+'");');}
else { // outside IGB
$(this).attr('href', '%s'+dna); }
});
});
</script>
</head>
<body>
<div id="canvas" data-role="page">
<div data-role="header">
<h1>Pyfa fits</h1>
</div>
<div data-role="content">
<div style="text-align: center;"><strong>Last updated:</strong> %s <small>(<span class="timer"></span>)</small></div>
""" % (
time.time(),
dnaUrl,
localDate,
)
HTML += ' <ul data-role="listview" class="ui-listview-outer" data-inset="true" data-filter="true">\n'
categoryList = list(sMkt.getShipRoot())
categoryList.sort(key=lambda _ship: _ship.name)
count = 0
for group in categoryList:
# init market group string to give ships something to attach to
HTMLgroup = ""
ships = list(sMkt.getShipList(group.ID))
ships.sort(key=lambda _ship: _ship.name)
# Keep track of how many ships per group
groupFits = 0
for ship in ships:
fits = sFit.getFitsWithShip(ship.ID)
if len(fits) > 0:
groupFits += len(fits)
HTMLship = (
' <li data-role="collapsible" data-iconpos="right" data-shadow="false" '
'data-corners="false">\n'
" <h2>"
+ ship.name
+ ' <span class="ui-li-count">'
+ str(len(fits))
+ "</span></h2>\n"
' <ul data-role="listview" data-shadow="false" data-inset="true" '
'data-corners="false">\n'
)
for fit in fits:
if self.stopRunning:
return
try:
eftFit = Port.exportEft(
getFit(fit[0]),
options={
PortEftOptions.IMPLANTS: True,
PortEftOptions.MUTATIONS: True,
PortEftOptions.LOADED_CHARGES: True,
},
)
HTMLfit = (
' <li data-role="collapsible" data-iconpos="right" data-shadow="false" '
'data-corners="false">\n'
" <h2>" + fit[1] + "</h2>\n"
' <ul data-role="listview" data-shadow="false" data-inset="true" '
'data-corners="false">\n'
)
HTMLfit += (
" <li><pre>"
+ eftFit
+ "\n </pre></li>\n"
)
HTMLfit += " </ul>\n </li>\n"
HTMLship += HTMLfit
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.warning("Failed to export line")
continue
finally:
if self.callback:
wx.CallAfter(self.callback, count)
count += 1
HTMLgroup += HTMLship + (" </ul>\n" " </li>\n")
if groupFits > 0:
# Market group header
HTML += (
' <li data-role="collapsible" data-iconpos="right" data-shadow="false" data-corners="false">\n'
" <h2>"
+ group.groupName
+ ' <span class="ui-li-count">'
+ str(groupFits)
+ "</span></h2>\n"
' <ul data-role="listview" data-shadow="false" data-inset="true" data-corners="false">\n'
+ HTMLgroup
+ " </ul>\n"
" </li>"
)
HTML += """
</ul>
</div>
</div>
</body>
</html>"""
return HTML
def generateMinimalHTML(self, sMkt, sFit, dnaUrl):
"""Generate a minimal HTML version of the fittings, without any javascript or styling"""
categoryList = list(sMkt.getShipRoot())
categoryList.sort(key=lambda _ship: _ship.name)
count = 0
HTML = ""
for group in categoryList:
# init market group string to give ships something to attach to
ships = list(sMkt.getShipList(group.ID))
ships.sort(key=lambda _ship: _ship.name)
ships.sort(key=lambda _ship: _ship.name)
for ship in ships:
fits = sFit.getFitsWithShip(ship.ID)
for fit in fits:
if self.stopRunning:
return
try:
dnaFit = Port.exportDna(getFit(fit[0]))
HTML += (
'<a class="outOfGameBrowserLink" target="_blank" href="'
+ dnaUrl
+ dnaFit
+ '">'
+ ship.name
+ ": "
+ fit[1]
+ "</a><br> \n"
)
except (KeyboardInterrupt, SystemExit):
raise
except:
pyfalog.error("Failed to export line")
continue
finally:
if self.callback:
wx.CallAfter(self.callback, count)
count += 1
return HTML
|
deep-learning | model | from collections import OrderedDict
import torch
import torch.nn as nn
SIZE = 48
class Unet3D(nn.Module):
# Based on https://github.com/mateuszbuda/brain-segmentation-pytorch/blob/master/unet.py
def __init__(self, in_channels=1, out_channels=1, init_features=8):
super().__init__()
features = init_features
self.encoder1 = self._block(
in_channels, features=features, padding=2, name="enc1"
)
self.pool1 = nn.MaxPool3d(kernel_size=2, stride=2)
self.encoder2 = self._block(
features, features=features * 2, padding=2, name="enc2"
)
self.pool2 = nn.MaxPool3d(kernel_size=2, stride=2)
self.encoder3 = self._block(
features * 2, features=features * 4, padding=2, name="enc3"
)
self.pool3 = nn.MaxPool3d(kernel_size=2, stride=2)
self.encoder4 = self._block(
features * 4, features=features * 8, padding=2, name="enc4"
)
self.pool4 = nn.MaxPool3d(kernel_size=2, stride=2)
self.bottleneck = self._block(
features * 8, features=features * 16, padding=2, name="bottleneck"
)
self.upconv4 = nn.ConvTranspose3d(
features * 16, features * 8, kernel_size=4, stride=2, padding=1
)
self.decoder4 = self._block(
features * 16, features=features * 8, padding=2, name="dec4"
)
self.upconv3 = nn.ConvTranspose3d(
features * 8, features * 4, kernel_size=4, stride=2, padding=1
)
self.decoder3 = self._block(
features * 8, features=features * 4, padding=2, name="dec4"
)
self.upconv2 = nn.ConvTranspose3d(
features * 4, features * 2, kernel_size=4, stride=2, padding=1
)
self.decoder2 = self._block(
features * 4, features=features * 2, padding=2, name="dec4"
)
self.upconv1 = nn.ConvTranspose3d(
features * 2, features, kernel_size=4, stride=2, padding=1
)
self.decoder1 = self._block(
features * 2, features=features, padding=2, name="dec4"
)
self.conv = nn.Conv3d(
in_channels=features, out_channels=out_channels, kernel_size=1
)
def forward(self, img):
enc1 = self.encoder1(img)
enc2 = self.encoder2(self.pool1(enc1))
enc3 = self.encoder3(self.pool2(enc2))
enc4 = self.encoder4(self.pool3(enc3))
bottleneck = self.bottleneck(self.pool4(enc4))
upconv4 = self.upconv4(bottleneck)
dec4 = torch.cat((upconv4, enc4), dim=1)
dec4 = self.decoder4(dec4)
upconv3 = self.upconv3(dec4)
dec3 = torch.cat((upconv3, enc3), dim=1)
dec3 = self.decoder3(dec3)
upconv2 = self.upconv2(dec3)
dec2 = torch.cat((upconv2, enc2), dim=1)
dec2 = self.decoder2(dec2)
upconv1 = self.upconv1(dec2)
dec1 = torch.cat((upconv1, enc1), dim=1)
dec1 = self.decoder1(dec1)
conv = self.conv(dec1)
sigmoid = torch.sigmoid(conv)
return sigmoid
def _block(self, in_channels, features, padding=1, kernel_size=5, name="block"):
return nn.Sequential(
OrderedDict(
(
(
f"{name}_conv1",
nn.Conv3d(
in_channels=in_channels,
out_channels=features,
kernel_size=kernel_size,
padding=padding,
bias=True,
),
),
(f"{name}_norm1", nn.BatchNorm3d(num_features=features)),
(f"{name}_relu1", nn.ReLU(inplace=True)),
(
f"{name}_conv2",
nn.Conv3d(
in_channels=features,
out_channels=features,
kernel_size=kernel_size,
padding=padding,
bias=True,
),
),
(f"{name}_norm2", nn.BatchNorm3d(num_features=features)),
(f"{name}_relu2", nn.ReLU(inplace=True)),
)
)
)
class WrapModel(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, img):
output = self.model(img)
return torch.sigmoid(output[:, 1])
def main():
import torchviz
dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = Unet3D()
model.to(dev)
model.eval()
print(next(model.parameters()).is_cuda) # True
img = torch.randn(1, SIZE, SIZE, SIZE, 1).to(dev)
out = model(img)
dot = torchviz.make_dot(
out, params=dict(model.named_parameters()), show_attrs=True, show_saved=True
)
dot.render("unet", format="png")
torch.save(model, "model.pth")
print(dot)
if __name__ == "__main__":
main()
|
downloaders | FilepupNet | # -*- coding: utf-8 -*-
#
# Test links:
# http://www.filepup.net/files/k5w4ZVoF1410184283.html
# http://www.filepup.net/files/R4GBq9XH1410186553.html
import re
from ..base.simple_downloader import SimpleDownloader
class FilepupNet(SimpleDownloader):
__name__ = "FilepupNet"
__type__ = "downloader"
__version__ = "0.08"
__status__ = "testing"
__pattern__ = r"http://(?:www\.)?filepup\.net/files/\w+"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Filepup.net downloader plugin"""
__license__ = "GPLv3"
__authors__ = [
("zapp-brannigan", "fuerst.reinje@web.de"),
("Walter Purcaro", "vuolter@gmail.com"),
]
NAME_PATTERN = r">(?P<N>.+?)</h1>"
SIZE_PATTERN = r'class="fa fa-archive"></i> \((?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r">This file has been deleted"
LINK_FREE_PATTERN = r"(http://www\.filepup\.net/get/.+?)\'"
def setup(self):
self.multi_dl = False
self.chunk_limit = 1
def handle_free(self, pyfile):
m = re.search(self.LINK_FREE_PATTERN, self.data)
if m is not None:
dl_link = m.group(1)
self.download(dl_link, post={"task": "download"})
|
qltk | wlw | # Copyright 2005 Joe Wreschnig, Michael Urman
# 2016 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import math
import time
from gi.repository import Gdk, Gtk, Pango
from quodlibet import _
from quodlibet.qltk import Button, Icons, ToggleButton, get_top_parent
from quodlibet.util import format_int_locale, format_time_display
class WaitLoadBase:
"""Abstract class providing a label, a progressbar, pause/stop buttons,
and the stepping logic."""
def __init__(self, count=0, text="", initial={}, limit=3):
"""count: the total amount of items expected, 0 for unknown/indefinite
text: text to display in the label; may contain % formats
initial: initial values for % formats (text % initial)
limit: count must be greater than limit (or 0) for pause/stop to appear
The current iteration of the counter can be gotten as
self.current. count can be gotten as self.count.
"""
super().__init__()
self._label = Gtk.Label()
self._label.set_use_markup(True)
self._progress = Gtk.ProgressBar()
self._progress.set_pulse_step(0.08)
self.pulse = self._progress.pulse
self.set_fraction = self._progress.set_fraction
self.set_text = self._label.set_markup
self.setup(count, text, initial)
if self.count > limit or self.count == 0:
# Add stop/pause buttons. count = 0 means an indefinite
# number of steps.
self._cancel_button = Button(_("_Stop"), Icons.PROCESS_STOP)
self._pause_button = ToggleButton(_("P_ause"), Icons.MEDIA_PLAYBACK_PAUSE)
self._cancel_button.connect("clicked", self.__cancel_clicked)
self._pause_button.connect("clicked", self.__pause_clicked)
else:
self._cancel_button = None
self._pause_button = None
def setup(self, count=0, text="", initial=None):
self.current = 0
self.count = count
self._text = text
self.paused = False
self.quit = False
self._start_time = time.time()
initial = initial or {}
initial.setdefault("total", self.count)
initial.setdefault("current", self.current)
initial.setdefault("remaining", _("Unknown"))
def localeify(k, v):
foo = "%(" + k + ")d"
if foo in self._text:
self._text = self._text.replace(foo, "%(" + k + ")s")
return k, format_int_locale(int(v))
return k, v
localed = dict(localeify(k, v) for k, v in initial.items())
self._label.set_markup(self._text % localed)
self._progress.set_fraction(0.0)
def __pause_clicked(self, button):
self.paused = button.get_active()
def __cancel_clicked(self, button):
self.quit = True
def step(self, **values):
"""Advance the counter by one. Arguments are applied to the
originally-supplied text as a format string.
This function doesn't return if the dialog is paused (though
the GTK main loop will still run), and returns True if stop
was pressed.
"""
if self.count:
self.current += 1
self._progress.set_fraction(
max(0, min(1, self.current / float(self.count)))
)
else:
self._progress.pulse()
values.setdefault("total", format_int_locale(self.count))
values.setdefault("current", format_int_locale(self.current))
if self.count:
t = (time.time() - self._start_time) / self.current
remaining = math.ceil((self.count - self.current) * t)
values.setdefault("remaining", format_time_display(remaining))
self._label.set_markup(self._text % values)
while not self.quit and (self.paused or Gtk.events_pending()):
Gtk.main_iteration()
return self.quit
class WaitLoadWindow(WaitLoadBase, Gtk.Window):
"""A window with a progress bar and some nice updating text,
as well as pause/stop buttons.
Example:
w = WaitLoadWindow(None, 5, "%(current)d/%(total)d")
for i in range(1, 6): w.step()
w.destroy()
"""
def __init__(self, parent, *args):
"""parent: the parent window, or None"""
Gtk.Window.__init__(self, type=Gtk.WindowType.TOPLEVEL)
self.set_decorated(False)
WaitLoadBase.__init__(self)
self.setup(*args)
parent = get_top_parent(parent)
if parent:
sig = parent.connect("configure-event", self.__recenter)
self.connect("destroy", self.__reset_cursor, parent)
self.connect("destroy", self.__disconnect, sig, parent)
sig_vis = parent.connect("visibility-notify-event", self.__update_visible)
self.connect("destroy", self.__disconnect, sig_vis, parent)
self.set_transient_for(parent)
window = parent.get_window()
if window:
window.set_cursor(Gdk.Cursor.new(Gdk.CursorType.WATCH))
# Note that this should not be modal as popups occuring during
# progress will not be clickable
self.add(Gtk.Frame())
self.get_child().set_shadow_type(Gtk.ShadowType.OUT)
vbox = Gtk.VBox(spacing=12)
vbox.set_border_width(12)
self._label.set_size_request(170, -1)
self._label.set_line_wrap(True)
self._label.set_justify(Gtk.Justification.CENTER)
vbox.pack_start(self._label, True, True, 0)
vbox.pack_start(self._progress, True, True, 0)
if self._cancel_button and self._pause_button:
# Display a stop/pause box. count = 0 means an indefinite
# number of steps.
hbox = Gtk.HBox(spacing=6, homogeneous=True)
hbox.pack_start(self._cancel_button, True, True, 0)
hbox.pack_start(self._pause_button, True, True, 0)
vbox.pack_start(hbox, True, True, 0)
self.get_child().add(vbox)
self.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
self.get_child().show_all()
while Gtk.events_pending():
Gtk.main_iteration()
def __update_visible(self, parent, event):
if event.state == Gdk.VisibilityState.FULLY_OBSCURED:
self.hide()
else:
self.show()
def __recenter(self, parent, event):
x, y = parent.get_position()
dx, dy = parent.get_size()
dx2, dy2 = self.get_size()
self.move(x + dx // 2 - dx2 // 2, y + dy // 2 - dy2 // 2)
def __disconnect(self, widget, sig, parent):
parent.disconnect(sig)
def __reset_cursor(self, widget, parent):
if parent.get_window():
parent.get_window().set_cursor(None)
class WritingWindow(WaitLoadWindow):
"""A WaitLoadWindow that defaults to text suitable for saving files."""
def __init__(self, parent, count):
super().__init__(
parent,
count,
(
_("Saving the songs you changed.")
+ "\n\n"
+ _("%(current)d/%(total)d songs saved\n(%(remaining)s remaining)")
),
)
def step(self):
return super().step()
class WaitLoadBar(WaitLoadBase, Gtk.HBox):
def __init__(self):
super().__init__()
self._label.set_alignment(0.0, 0.5)
self._label.set_ellipsize(Pango.EllipsizeMode.END)
self._cancel_button.remove(self._cancel_button.get_child())
self._cancel_button.add(
Gtk.Image.new_from_icon_name(Icons.PROCESS_STOP, Gtk.IconSize.MENU)
)
self._pause_button.remove(self._pause_button.get_child())
self._pause_button.add(
Gtk.Image.new_from_icon_name(Icons.MEDIA_PLAYBACK_PAUSE, Gtk.IconSize.MENU)
)
self.pack_start(self._label, True, True, 0)
self.pack_start(self._progress, False, True, 6)
self.pack_start(self._pause_button, False, True, 0)
self.pack_start(self._cancel_button, False, True, 0)
for child in self.get_children():
child.show_all()
def step(self, **values):
ret = super().step(**values)
params = {
"current": format_int_locale(self.current),
"all": format_int_locale(self.count),
}
self._progress.set_text(_("%(current)s of %(all)s") % params)
return ret
|
tools | pull_shared_translations | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2023 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import argparse
import logging
import os
import os.path
import sys
from wlc import Component, Weblate
from wlc.config import WeblateConfig
WEBLATE_URL = "https://translations.metabrainz.org/api/"
PROJECT_NAME = "musicbrainz"
PROJECT_COMPONENTS = (
"attributes",
"countries",
)
MIN_TRANSLATED_PERCENT = 10
logging.basicConfig(
force=True,
format="%(asctime)s:%(levelname)s: %(message)s",
level=logging.INFO,
stream=sys.stderr,
)
def fetch_translations(
component_name: str, user_key: str = "", config: WeblateConfig = None
):
weblate = Weblate(key=user_key, url=WEBLATE_URL, config=config)
component = Component(weblate, f"components/{PROJECT_NAME}/{component_name}/")
logging.info("Processing component %s...", component["name"])
translations = component.list()
source_language = component["source_language"]["code"]
output_dir = get_output_dir(component_name)
logging.info("Output dir: %s", output_dir)
for translation in translations:
# Skip incomplete translations and translation templates
language_name = translation["language"]["name"]
language_code = translation["language"]["code"]
if (
translation["translated_percent"] < MIN_TRANSLATED_PERCENT
or translation["is_template"]
):
logging.info("Skipping translation file for %s.", language_name)
continue
if language_code == source_language:
filename = f"{component_name}.pot"
else:
filename = f"{language_code}.po"
logging.info("Downloading translation file %s...", filename)
data = translation.download()
output_path = os.path.join(output_dir, filename)
with open(output_path, "bw") as output_file:
output_file.write(data)
def get_output_dir(component_name: str) -> str:
path = os.path.join(os.path.dirname(__file__), "..", "..", "po", component_name)
os.makedirs(path, exist_ok=True)
return path
def load_config() -> WeblateConfig:
config_path = os.path.join(os.path.dirname(__file__), "..", "..", ".weblate.ini")
if os.path.exists:
config = WeblateConfig()
config.load(config_path)
return config
else:
return None
def main():
parser = argparse.ArgumentParser(
prog="pull-shared-translations",
description=(
"Fetches the translations for attributes and countries from "
"the MusicBrainz Server project on Weblate."
),
epilog=(
"Instead of passing the --key parameter the key can also be set in "
"a file .weblate.ini in the repositories root directory. See "
"po/README.md for details."
),
)
parser.add_argument("-k", "--key", help="Weblate user key")
args = parser.parse_args()
config = None
if not args.key:
config = load_config()
if not config:
parser.print_usage()
parser.error("No Weblate user key specified. See po/README.md for details.")
url, key = config.get_url_key()
if not key or url != WEBLATE_URL:
parser.print_usage()
parser.error("Invalid .weblate.ini. See po/README.md for details.")
for component_name in PROJECT_COMPONENTS:
fetch_translations(component_name, user_key=args.key, config=config)
if __name__ == "__main__":
logging.debug("Starting...")
main()
|
escalation-snapshot | utils | import datetime
import typing
import pytz
from django.utils import timezone
def eta_for_escalation_step_notify_if_time(
from_time: datetime.time, to_time: datetime.time
) -> typing.Optional[datetime.datetime]:
"""
Counts eta for STEP_NOTIFY_IF_TIME
"""
eta: typing.Optional[datetime.datetime] = None
now = timezone.now()
current_date = now.date()
current_time = now.time()
if from_time < to_time:
if from_time > current_time:
eta = datetime.datetime.combine(current_date, from_time).astimezone(
pytz.UTC
)
elif current_time >= to_time:
eta = datetime.datetime.combine(
(now + datetime.timedelta(days=1)).date(), from_time
).astimezone(pytz.UTC)
elif from_time > to_time:
if from_time > current_time >= to_time:
eta = datetime.datetime.combine(current_date, from_time).astimezone(
pytz.UTC
)
elif from_time == to_time:
if from_time > current_time:
eta = datetime.datetime.combine(current_date, from_time).astimezone(
pytz.UTC
)
elif current_time > to_time:
eta = datetime.datetime.combine(
(now + datetime.timedelta(days=1)).date(), from_time
).astimezone(pytz.UTC)
return eta
|
utils | kronos | #!/usr/bin/python3
"""Module that provides a cron-like task scheduler.
This task scheduler is designed to be used from inside your own program.
You can schedule Python functions to be called at specific intervals or
days. It uses the standard 'sched' module for the actual task scheduling,
but provides much more:
* repeated tasks (at intervals, or on specific days)
* error handling (exceptions in tasks don't kill the scheduler)
* optional to run scheduler in its own thread or separate process
* optional to run a task in its own thread or separate process
If the threading module is available, you can use the various Threaded
variants of the scheduler and associated tasks. If threading is not
available, you could still use the forked variants. If fork is also
not available, all processing is done in a single process, sequentially.
There are three Scheduler classes:
Scheduler ThreadedScheduler ForkedScheduler
You usually add new tasks to a scheduler using the add_interval_task or
add_daytime_task methods, with the appropriate processmethod argument
to select sequential, threaded or forked processing. NOTE: it is impossible
to add new tasks to a ForkedScheduler, after the scheduler has been started!
For more control you can use one of the following Task classes
and use schedule_task or schedule_task_abs:
IntervalTask ThreadedIntervalTask ForkedIntervalTask
SingleTask ThreadedSingleTask ForkedSingleTask
WeekdayTask ThreadedWeekdayTask ForkedWeekdayTask
MonthdayTask ThreadedMonthdayTask ForkedMonthdayTask
Kronos is the Greek God of Time.
Kronos scheduler (c) Irmen de Jong.
This version has been extracted from the Turbogears source repository
and slightly changed to be completely stand-alone again. Also some fixes
have been made to make it work on Python 2.6 (sched module changes).
The version in Turbogears is based on the original stand-alone Kronos.
This is open-source software, released under the MIT Software License:
http://www.opensource.org/licenses/mit-license.php
Adapted to work on Python 3 by the SABnzbd-Team.
"""
__version__ = "2.1"
__all__ = [
"DayTaskRescheduler",
"ForkedIntervalTask",
"ForkedMonthdayTask",
"ForkedScheduler",
"ForkedSingleTask",
"ForkedTaskMixin",
"ForkedWeekdayTask",
"IntervalTask",
"MonthdayTask",
"Scheduler",
"SingleTask",
"Task",
"ThreadedIntervalTask",
"ThreadedMonthdayTask",
"ThreadedScheduler",
"ThreadedSingleTask",
"ThreadedTaskMixin",
"ThreadedWeekdayTask",
"WeekdayTask",
"method",
]
import logging
import os
import sched
import threading
import time
import weakref
class method:
sequential = "sequential"
forked = "forked"
threaded = "threaded"
class Scheduler:
"""The Scheduler itself."""
def __init__(self):
self.running = True
self.sched = sched.scheduler(time.time, self.__delayfunc)
def __delayfunc(self, delay):
# This delay function is basically a time.sleep() that is
# divided up, so that we can check the self.running flag while delaying.
# there is an additional check in here to ensure that the top item of
# the queue hasn't changed
if delay < 10:
time.sleep(delay)
else:
toptime = self._getqueuetoptime()
endtime = time.time() + delay
period = 5
stoptime = endtime - period
while (
self.running
and stoptime > time.time()
and self._getqueuetoptime() == toptime
):
time.sleep(period)
if not self.running or self._getqueuetoptime() != toptime:
return
now = time.time()
if endtime > now:
time.sleep(endtime - now)
def _acquire_lock(self):
pass
def _release_lock(self):
pass
def add_interval_task(
self,
action,
taskname,
initialdelay,
interval,
processmethod=method.sequential,
args=None,
kw=None,
):
"""Add a new Interval Task to the schedule.
A very short initialdelay or one of zero cannot be honored, you will
see a slight delay before the task is first executed. This is because
the scheduler needs to pick it up in its loop.
"""
if initialdelay < 0 or interval < 1:
raise ValueError("Delay or interval must be >0")
# Select the correct IntervalTask class. Not all types may be available!
if processmethod == method.sequential:
TaskClass = IntervalTask
elif processmethod == method.threaded:
TaskClass = ThreadedIntervalTask
elif processmethod == method.forked:
TaskClass = ForkedIntervalTask
else:
raise ValueError("Invalid processmethod")
if not args:
args = []
if not kw:
kw = {}
task = TaskClass(taskname, interval, action, args, kw)
self.schedule_task(task, initialdelay)
return task
def add_single_task(
self,
action,
taskname,
initialdelay,
processmethod=method.sequential,
args=None,
kw=None,
):
"""Add a new task to the scheduler that will only be executed once."""
if initialdelay < 0:
raise ValueError("Delay must be >0")
# Select the correct SingleTask class. Not all types may be available!
if processmethod == method.sequential:
TaskClass = SingleTask
elif processmethod == method.threaded:
TaskClass = ThreadedSingleTask
elif processmethod == method.forked:
TaskClass = ForkedSingleTask
else:
raise ValueError("Invalid processmethod")
if not args:
args = []
if not kw:
kw = {}
task = TaskClass(taskname, action, args, kw)
self.schedule_task(task, initialdelay)
return task
def add_daytime_task(
self,
action,
taskname,
weekdays,
monthdays,
timeonday,
processmethod=method.sequential,
args=None,
kw=None,
):
"""Add a new Day Task (Weekday or Monthday) to the schedule."""
if weekdays and monthdays:
raise ValueError("You can only specify weekdays or monthdays, " "not both")
if not args:
args = []
if not kw:
kw = {}
if weekdays:
# Select the correct WeekdayTask class.
# Not all types may be available!
if processmethod == method.sequential:
TaskClass = WeekdayTask
elif processmethod == method.threaded:
TaskClass = ThreadedWeekdayTask
elif processmethod == method.forked:
TaskClass = ForkedWeekdayTask
else:
raise ValueError("Invalid processmethod")
task = TaskClass(taskname, weekdays, timeonday, action, args, kw)
if monthdays:
# Select the correct MonthdayTask class.
# Not all types may be available!
if processmethod == method.sequential:
TaskClass = MonthdayTask
elif processmethod == method.threaded:
TaskClass = ThreadedMonthdayTask
elif processmethod == method.forked:
TaskClass = ForkedMonthdayTask
else:
raise ValueError("Invalid processmethod")
task = TaskClass(taskname, monthdays, timeonday, action, args, kw)
firsttime = task.get_schedule_time(True)
self.schedule_task_abs(task, firsttime)
return task
def schedule_task(self, task, delay):
"""Add a new task to the scheduler with the given delay (seconds).
Low-level method for internal use.
"""
if self.running:
# lock the sched queue, if needed
self._acquire_lock()
try:
task.event = self.sched.enter(delay, 0, task, (weakref.ref(self),))
finally:
self._release_lock()
else:
task.event = self.sched.enter(delay, 0, task, (weakref.ref(self),))
def schedule_task_abs(self, task, abstime):
"""Add a new task to the scheduler for the given absolute time value.
Low-level method for internal use.
"""
if self.running:
# lock the sched queue, if needed
self._acquire_lock()
try:
task.event = self.sched.enterabs(abstime, 0, task, (weakref.ref(self),))
finally:
self._release_lock()
else:
task.event = self.sched.enterabs(abstime, 0, task, (weakref.ref(self),))
def start(self):
"""Start the scheduler."""
self._run()
def stop(self):
"""Remove all pending tasks and stop the Scheduler."""
self.running = False
self._clearschedqueue()
def cancel(self, task):
"""Cancel given scheduled task."""
try:
self.sched.cancel(task.event)
except ValueError:
# Ignore if the task was already removed from the queue
pass
def _getqueuetoptime(self):
try:
return self.sched._queue[0].time
except IndexError:
return 0.0
def _clearschedqueue(self):
self.sched._queue[:] = []
def _run(self):
# Low-level run method to do the actual scheduling loop.
self.running = True
while self.running:
try:
self.sched.run()
except Exception as x:
logging.error(
"Error during scheduler execution: %s" % str(x), exc_info=True
)
# queue is empty; sleep a short while before checking again
if self.running:
time.sleep(5)
class Task:
"""Abstract base class of all scheduler tasks"""
def __init__(self, name, action, args, kw):
"""This is an abstract class!"""
self.name = name
self.action = action
self.args = args
self.kw = kw
def __call__(self, schedulerref):
"""Execute the task action in the scheduler's thread."""
try:
self.execute()
except Exception as x:
self.handle_exception(x)
self.reschedule(schedulerref())
def reschedule(self, scheduler):
"""This method should be defined in one of the sub classes!"""
raise NotImplementedError(
"You're using the abstract base class 'Task',"
" use a concrete class instead"
)
def execute(self):
"""Execute the actual task."""
self.action(*self.args, **self.kw)
def handle_exception(self, exc):
"""Handle any exception that occurred during task execution."""
logging.error("Error during scheduler execution: %s" % str(exc), exc_info=True)
class SingleTask(Task):
"""A task that only runs once."""
def reschedule(self, scheduler):
pass
class IntervalTask(Task):
"""A repeated task that occurs at certain intervals (in seconds)."""
def __init__(self, name, interval, action, args=None, kw=None):
Task.__init__(self, name, action, args, kw)
self.interval = interval
def reschedule(self, scheduler):
"""Reschedule this task according to its interval (in seconds)."""
scheduler.schedule_task(self, self.interval)
class DayTaskRescheduler:
"""A mixin class that contains the reschedule logic for the DayTasks."""
def __init__(self, timeonday):
self.timeonday = timeonday
def get_schedule_time(self, today):
"""Calculate the time value at which this task is to be scheduled."""
now = list(time.localtime())
if today:
# schedule for today. let's see if that is still possible
if (now[3], now[4]) >= self.timeonday:
# too bad, it will be tomorrow
now[2] += 1
else:
# tomorrow
now[2] += 1
# set new time on day (hour,minute)
now[3], now[4] = self.timeonday
# seconds
now[5] = 0
return time.mktime(tuple(now))
def reschedule(self, scheduler):
"""Reschedule this task according to the daytime for the task.
The task is scheduled for tomorrow, for the given daytime.
"""
# (The execute method in the concrete Task classes will check
# if the current day is a day on which the task must run).
abstime = self.get_schedule_time(False)
scheduler.schedule_task_abs(self, abstime)
class WeekdayTask(DayTaskRescheduler, Task):
"""A task that is called at specific days in a week (1-7), at a fixed time
on the day.
"""
def __init__(self, name, weekdays, timeonday, action, args=None, kw=None):
if type(timeonday) not in (list, tuple) or len(timeonday) != 2:
raise TypeError("timeonday must be a 2-tuple (hour,minute)")
if type(weekdays) not in (list, tuple):
raise TypeError(
"weekdays must be a sequence of weekday numbers " "1-7 (1 is Monday)"
)
DayTaskRescheduler.__init__(self, timeonday)
Task.__init__(self, name, action, args, kw)
self.days = weekdays
def execute(self):
# This is called every day, at the correct time. We only need to
# check if we should run this task today (this day of the week).
weekday = time.localtime().tm_wday + 1
if weekday in self.days:
self.action(*self.args, **self.kw)
class MonthdayTask(DayTaskRescheduler, Task):
"""A task that is called at specific days in a month (1-31), at a fixed
time on the day.
"""
def __init__(self, name, monthdays, timeonday, action, args=None, kw=None):
if type(timeonday) not in (list, tuple) or len(timeonday) != 2:
raise TypeError("timeonday must be a 2-tuple (hour,minute)")
if type(monthdays) not in (list, tuple):
raise TypeError("monthdays must be a sequence of monthdays numbers " "1-31")
DayTaskRescheduler.__init__(self, timeonday)
Task.__init__(self, name, action, args, kw)
self.days = monthdays
def execute(self):
# This is called every day, at the correct time. We only need to
# check if we should run this task today (this day of the month).
if time.localtime().tm_mday in self.days:
self.action(*self.args, **self.kw)
class ThreadedScheduler(Scheduler):
"""A Scheduler that runs in its own thread."""
def __init__(self):
super().__init__()
# we require a lock around the task queue
self._lock = threading.Lock()
def start(self):
"""Splice off a thread in which the scheduler will run."""
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
def stop(self):
"""Stop the scheduler and wait for the thread to finish."""
Scheduler.stop(self)
try:
self.thread.join()
except AttributeError:
pass
def _acquire_lock(self):
"""Lock the thread's task queue."""
self._lock.acquire()
def _release_lock(self):
"""Release the lock on th ethread's task queue."""
self._lock.release()
class ThreadedTaskMixin:
"""A mixin class to make a Task execute in a separate thread."""
def __call__(self, schedulerref):
"""Execute the task action in its own thread."""
threading.Thread(target=self.threadedcall).start()
self.reschedule(schedulerref())
def threadedcall(self):
# This method is run within its own thread, so we have to
# # do the execute() call and exception handling here.
try:
self.execute()
except Exception as x:
self.handle_exception(x)
class ThreadedIntervalTask(ThreadedTaskMixin, IntervalTask):
"""Interval Task that executes in its own thread."""
pass
class ThreadedSingleTask(ThreadedTaskMixin, SingleTask):
"""Single Task that executes in its own thread."""
pass
class ThreadedWeekdayTask(ThreadedTaskMixin, WeekdayTask):
"""Weekday Task that executes in its own thread."""
pass
class ThreadedMonthdayTask(ThreadedTaskMixin, MonthdayTask):
"""Monthday Task that executes in its own thread."""
pass
if hasattr(os, "fork"):
import signal
class ForkedScheduler(Scheduler):
"""A Scheduler that runs in its own forked process."""
def __del__(self):
if hasattr(self, "childpid"):
os.kill(self.childpid, signal.SIGKILL)
def start(self):
"""Fork off a new process in which the scheduler will run."""
pid = os.fork()
if pid == 0:
# we are the child
signal.signal(signal.SIGUSR1, self.signalhandler)
self._run()
os._exit(0)
else:
# we are the parent
self.childpid = pid
# can no longer insert in the scheduler queue
del self.sched
def stop(self):
"""Stop the scheduler and wait for the process to finish."""
os.kill(self.childpid, signal.SIGUSR1)
os.waitpid(self.childpid, 0)
def signalhandler(self, sig, stack):
Scheduler.stop(self)
class ForkedTaskMixin:
"""A mixin class to make a Task execute in a separate process."""
def __call__(self, schedulerref):
"""Execute the task action in its own process."""
pid = os.fork()
if pid == 0:
# we are the child
try:
self.execute()
except Exception as x:
self.handle_exception(x)
os._exit(0)
else:
# we are the parent
self.reschedule(schedulerref())
class ForkedIntervalTask(ForkedTaskMixin, IntervalTask):
"""Interval Task that executes in its own process."""
pass
class ForkedSingleTask(ForkedTaskMixin, SingleTask):
"""Single Task that executes in its own process."""
pass
class ForkedWeekdayTask(ForkedTaskMixin, WeekdayTask):
"""Weekday Task that executes in its own process."""
pass
class ForkedMonthdayTask(ForkedTaskMixin, MonthdayTask):
"""Monthday Task that executes in its own process."""
pass
if __name__ == "__main__":
def testaction(arg):
print((">>>TASK", arg, "sleeping 3 seconds"))
time.sleep(3)
print(("<<<END_TASK", arg))
s = ThreadedScheduler()
s.add_interval_task(
testaction, "test action 1", 0, 4, method.threaded, ["task 1"], None
)
s.start()
print("Scheduler started, waiting 15 sec....")
time.sleep(15)
print("STOP SCHEDULER")
s.stop()
print("EXITING")
|
Spreadsheet | InitGui | # ***************************************************************************
# * Copyright (c) 2002,2003 Juergen Riegel <juergen.riegel@web.de> *
# * Copyright (c) 2013 Eivind Kvedalen <eivind@kvedalen.name> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License (GPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# ***************************************************************************/
# Spreadsheet gui init module
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
class SpreadsheetWorkbench(Workbench):
"Spreadsheet workbench object"
def __init__(self):
self.__class__.Icon = (
FreeCAD.getResourceDir()
+ "Mod/Spreadsheet/Resources/icons/SpreadsheetWorkbench.svg"
)
self.__class__.MenuText = "Spreadsheet"
self.__class__.ToolTip = "Spreadsheet workbench"
def Initialize(self):
# load the module
import SpreadsheetGui
def GetClassName(self):
return "SpreadsheetGui::Workbench"
Gui.addWorkbench(SpreadsheetWorkbench())
# Append the open handler
FreeCAD.addImportType("Spreadsheet formats (*.csv)", "SpreadsheetGui")
|
localModuleCargo | cargoToLocalModule | import eos.db
import gui.mainFrame
import wx
from gui import globalEvents as GE
from gui.fitCommands.calc.cargo.add import CalcAddCargoCommand
from gui.fitCommands.calc.cargo.remove import CalcRemoveCargoCommand
from gui.fitCommands.calc.module.changeCharges import CalcChangeModuleChargesCommand
from gui.fitCommands.calc.module.localReplace import CalcReplaceLocalModuleCommand
from gui.fitCommands.helpers import (
CargoInfo,
InternalCommandHistory,
ModuleInfo,
restoreRemovedDummies,
)
from service.fit import Fit
class GuiCargoToLocalModuleCommand(wx.Command):
"""
Moves cargo to the fitting window. If target is not empty, take whatever we take off and put
into the cargo hold. If we copy, we do the same but do not remove the item from the cargo hold.
"""
def __init__(self, fitID, cargoItemID, modPosition, copy):
wx.Command.__init__(self, True, "Cargo to Local Module")
self.internalHistory = InternalCommandHistory()
self.fitID = fitID
self.srcCargoItemID = cargoItemID
self.dstModPosition = modPosition
self.copy = copy
self.removedModItemID = None
self.addedModItemID = None
self.savedRemovedDummies = None
def Do(self):
sFit = Fit.getInstance()
fit = sFit.getFit(self.fitID)
srcCargo = next((c for c in fit.cargo if c.itemID == self.srcCargoItemID), None)
if srcCargo is None:
return False
dstMod = fit.modules[self.dstModPosition]
# Moving/copying charge from cargo to fit
if srcCargo.item.isCharge and not dstMod.isEmpty:
newCargoChargeItemID = dstMod.chargeID
newCargoChargeAmount = dstMod.numCharges
newModChargeItemID = self.srcCargoItemID
newModChargeAmount = dstMod.getNumCharges(srcCargo.item)
if newCargoChargeItemID == newModChargeItemID:
return False
commands = []
if not self.copy:
commands.append(
CalcRemoveCargoCommand(
fitID=self.fitID,
cargoInfo=CargoInfo(
itemID=newModChargeItemID, amount=newModChargeAmount
),
)
)
if newCargoChargeItemID is not None:
commands.append(
CalcAddCargoCommand(
fitID=self.fitID,
cargoInfo=CargoInfo(
itemID=newCargoChargeItemID, amount=newCargoChargeAmount
),
)
)
commands.append(
CalcChangeModuleChargesCommand(
fitID=self.fitID,
projected=False,
chargeMap={self.dstModPosition: self.srcCargoItemID},
)
)
success = self.internalHistory.submitBatch(*commands)
# Moving/copying/replacing module
elif srcCargo.item.isModule:
dstModItemID = dstMod.itemID
dstModSlot = dstMod.slot
if self.srcCargoItemID == dstModItemID:
return False
# To keep all old item properties, copy them over from old module, except for mutations
newModInfo = ModuleInfo.fromModule(dstMod, unmutate=True)
newModInfo.itemID = self.srcCargoItemID
if dstMod.isEmpty:
newCargoModItemID = None
dstModChargeItemID = None
dstModChargeAmount = None
else:
# We cannot put mutated items to cargo, so use unmutated item ID
newCargoModItemID = ModuleInfo.fromModule(dstMod, unmutate=True).itemID
dstModChargeItemID = dstMod.chargeID
dstModChargeAmount = dstMod.numCharges
commands = []
# Keep cargo only in case we were copying
if not self.copy:
commands.append(
CalcRemoveCargoCommand(
fitID=self.fitID,
cargoInfo=CargoInfo(itemID=self.srcCargoItemID, amount=1),
)
)
# Add item to cargo only if we copied/moved to non-empty slot
if newCargoModItemID is not None:
commands.append(
CalcAddCargoCommand(
fitID=self.fitID,
cargoInfo=CargoInfo(itemID=newCargoModItemID, amount=1),
)
)
cmdReplace = CalcReplaceLocalModuleCommand(
fitID=self.fitID,
position=self.dstModPosition,
newModInfo=newModInfo,
unloadInvalidCharges=True,
)
commands.append(cmdReplace)
# Submit batch now because we need to have updated info on fit to keep going
success = self.internalHistory.submitBatch(*commands)
newMod = fit.modules[self.dstModPosition]
# Bail if drag happened to slot to which module cannot be dragged, will undo later
if newMod.slot != dstModSlot:
success = False
if success:
# If we had to unload charge, add it to cargo
if cmdReplace.unloadedCharge and dstModChargeItemID is not None:
cmdAddCargoCharge = CalcAddCargoCommand(
fitID=self.fitID,
cargoInfo=CargoInfo(
itemID=dstModChargeItemID, amount=dstModChargeAmount
),
)
success = self.internalHistory.submit(cmdAddCargoCharge)
# If we did not unload charge and there still was a charge, see if amount differs and process it
elif not cmdReplace.unloadedCharge and dstModChargeItemID is not None:
# How many extra charges do we need to take from cargo
extraChargeAmount = newMod.numCharges - dstModChargeAmount
if extraChargeAmount > 0:
cmdRemoveCargoExtraCharge = CalcRemoveCargoCommand(
fitID=self.fitID,
cargoInfo=CargoInfo(
itemID=dstModChargeItemID, amount=extraChargeAmount
),
)
# Do not check if operation was successful or not, we're okay if we have no such
# charges in cargo
self.internalHistory.submit(cmdRemoveCargoExtraCharge)
elif extraChargeAmount < 0:
cmdAddCargoExtraCharge = CalcAddCargoCommand(
fitID=self.fitID,
cargoInfo=CargoInfo(
itemID=dstModChargeItemID, amount=abs(extraChargeAmount)
),
)
success = self.internalHistory.submit(cmdAddCargoExtraCharge)
if success:
# Store info to properly send events later
self.removedModItemID = dstModItemID
self.addedModItemID = self.srcCargoItemID
else:
self.internalHistory.undoAll()
else:
return False
eos.db.flush()
sFit.recalc(self.fitID)
self.savedRemovedDummies = sFit.fill(self.fitID)
eos.db.commit()
events = []
if self.removedModItemID is not None:
events.append(
GE.FitChanged(
fitIDs=(self.fitID,), action="moddel", typeID=self.removedModItemID
)
)
if self.addedModItemID is not None:
events.append(
GE.FitChanged(
fitIDs=(self.fitID,), action="modadd", typeID=self.addedModItemID
)
)
if not events:
events.append(GE.FitChanged(fitIDs=(self.fitID,)))
for event in events:
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), event)
return success
def Undo(self):
sFit = Fit.getInstance()
fit = sFit.getFit(self.fitID)
restoreRemovedDummies(fit, self.savedRemovedDummies)
success = self.internalHistory.undoAll()
eos.db.flush()
sFit.recalc(self.fitID)
sFit.fill(self.fitID)
eos.db.commit()
events = []
if self.addedModItemID is not None:
events.append(
GE.FitChanged(
fitIDs=(self.fitID,), action="moddel", typeID=self.addedModItemID
)
)
if self.removedModItemID is not None:
events.append(
GE.FitChanged(
fitIDs=(self.fitID,), action="modadd", typeID=self.removedModItemID
)
)
if not events:
events.append(GE.FitChanged(fitIDs=(self.fitID,)))
for event in events:
wx.PostEvent(gui.mainFrame.MainFrame.getInstance(), event)
return success
|
dev | run_vulture | #!/usr/bin/env python
# SPDX-FileCopyrightText: Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Run vulture on the source files and filter out false-positives."""
import argparse
import inspect
import os
import re
import sys
import tempfile
import qutebrowser.app # pylint: disable=unused-import
import vulture
# pylint: enable=unused-import
from qutebrowser.browser import qutescheme
# To run the decorators from there
# pylint: disable=unused-import
from qutebrowser.browser.webkit.network import webkitqutescheme
from qutebrowser.config import configtypes
from qutebrowser.extensions import loader
from qutebrowser.misc import nativeeventfilter, objects, sql
from qutebrowser.utils import qtutils, utils, version
def whitelist_generator(): # noqa: C901
"""Generator which yields lines to add to a vulture whitelist."""
loader.load_components(skip_hooks=True)
# qutebrowser commands
for cmd in objects.commands.values():
yield utils.qualname(cmd.handler)
# PyQt properties
yield "qutebrowser.mainwindow.statusbar.bar.StatusBar.color_flags"
yield "qutebrowser.mainwindow.statusbar.url.UrlText.urltype"
# Not used yet, but soon (or when debugging)
yield "qutebrowser.utils.debug.log_events"
yield "qutebrowser.utils.debug.log_signals"
yield "qutebrowser.utils.debug.qflags_key"
yield "qutebrowser.utils.qtutils.QtOSError.qt_errno"
yield "scripts.utils.bg_colors"
yield "qutebrowser.misc.sql.SqliteErrorCode.CONSTRAINT"
yield "qutebrowser.misc.throttle.Throttle.set_delay"
yield "qutebrowser.misc.guiprocess.GUIProcess.stderr"
# Qt attributes
yield "PyQt5.QtWebKit.QWebPage.ErrorPageExtensionReturn().baseUrl"
yield "PyQt5.QtWebKit.QWebPage.ErrorPageExtensionReturn().content"
yield "PyQt5.QtWebKit.QWebPage.ErrorPageExtensionReturn().encoding"
yield "PyQt5.QtWebKit.QWebPage.ErrorPageExtensionReturn().fileNames"
yield "PyQt5.QtWidgets.QStyleOptionViewItem.backgroundColor"
## qute://... handlers
for name in qutescheme._HANDLERS: # pylint: disable=protected-access
name = name.replace("-", "_")
yield "qutebrowser.browser.qutescheme.qute_" + name
# Other false-positives
yield "qutebrowser.completion.models.listcategory.ListCategory().lessThan"
yield "qutebrowser.utils.jinja.Loader.get_source"
yield "qutebrowser.utils.log.QtWarningFilter.filter"
yield "qutebrowser.browser.pdfjs.is_available"
yield "qutebrowser.utils.usertypes.ExitStatus.reserved"
yield "QEvent.posted"
yield "log_stack" # from message.py
yield "propagate" # logging.getLogger('...).propagate = False
# vulture doesn't notice the hasattr() and thus thinks netrc_used is unused
# in NetworkManager.on_authentication_required
yield "PyQt5.QtNetwork.QNetworkReply.netrc_used"
yield "qutebrowser.browser.downloads.last_used_directory"
yield "PaintContext.clip" # from completiondelegate.py
yield "logging.LogRecord.log_color" # from logging.py
yield "scripts.utils.use_color" # from asciidoc2html.py
for attr in ["pyeval_output", "log_clipboard", "fake_clipboard"]:
yield "qutebrowser.misc.utilcmds." + attr
for attr in ["fileno", "truncate", "closed", "readable"]:
yield "qutebrowser.utils.qtutils.PyQIODevice." + attr
for attr in ["msgs", "priority", "visit_attribute"]:
yield "scripts.dev.pylint_checkers.config." + attr
for name, _member in inspect.getmembers(configtypes, inspect.isclass):
yield "qutebrowser.config.configtypes." + name
yield "qutebrowser.config.configexc.ConfigErrorDesc.traceback"
yield "qutebrowser.config.configfiles.ConfigAPI.load_autoconfig"
yield "types.ModuleType.c" # configfiles:read_config_py
for name in ["configdir", "datadir"]:
yield "qutebrowser.config.configfiles.ConfigAPI." + name
yield "include_aliases"
for attr in [
"_get_default_metavar_for_optional",
"_get_default_metavar_for_positional",
"_metavar_formatter",
]:
yield "scripts.dev.src2asciidoc.UsageFormatter." + attr
yield "scripts.dev.build_release.pefile.PE.OPTIONAL_HEADER.CheckSum"
for dist in version.Distribution:
yield "qutebrowser.utils.version.Distribution.{}".format(dist.name)
for opcode in nativeeventfilter.XcbInputOpcodes:
yield f"qutebrowser.misc.nativeeventfilter.XcbInputOpcodes.{opcode.name}"
# attrs
yield "qutebrowser.browser.webkit.network.networkmanager.ProxyId.hostname"
yield "qutebrowser.command.command.ArgInfo._validate_exclusive"
yield "scripts.get_coredumpctl_traces.Line.uid"
yield "scripts.get_coredumpctl_traces.Line.gid"
yield "scripts.importer.import_moz_places.places.row_factory"
# component hooks
yield "qutebrowser.components.hostblock.on_lists_changed"
yield "qutebrowser.components.braveadblock.on_lists_changed"
yield "qutebrowser.components.hostblock.on_method_changed"
yield "qutebrowser.components.braveadblock.on_method_changed"
# used in type comments
yield "pending_download_type"
yield "world_id_type"
yield "ParserDictType"
yield "qutebrowser.config.configutils.Values._VmapKeyType"
# used in tests
yield "qutebrowser.qt.machinery.SelectionReason.fake"
# ELF
yield "qutebrowser.misc.elf.Endianness.big"
for name in ["phoff", "ehsize", "phentsize", "phnum"]:
yield f"qutebrowser.misc.elf.Header.{name}"
for name in ["addr", "addralign", "entsize"]:
yield f"qutebrowser.misc.elf.SectionHeader.{name}"
# For completeness
for name in list(qtutils.LibraryPath):
yield f"qutebrowser.utils.qtutils.LibraryPath.{name}"
for name in list(sql.SqliteErrorCode):
yield f"qutebrowser.misc.sql.SqliteErrorCode.{name}"
def filter_func(item):
"""Check if a missing function should be filtered or not.
Return:
True if the missing function should be filtered/ignored, False
otherwise.
"""
return bool(re.fullmatch(r"[a-z]+[A-Z][a-zA-Z]+", item.name))
def report(items):
"""Generate a report based on the given vulture.Item's.
Based on vulture.Vulture.report, but we can't use that as we can't set the
properties which get used for the items.
"""
output = []
for item in sorted(items, key=lambda e: (str(e.filename).lower(), e.first_lineno)):
output.append(item.get_report())
return output
def run(files):
"""Run vulture over the given files."""
with tempfile.NamedTemporaryFile(mode="w", delete=False) as whitelist_file:
for line in whitelist_generator():
whitelist_file.write(line + "\n")
whitelist_file.close()
vult = vulture.Vulture(verbose=False)
vult.scavenge(
files + [whitelist_file.name],
exclude=["qutebrowser/qt/_core_pyqtproperty.py"],
)
os.remove(whitelist_file.name)
filters = {
"unused_funcs": filter_func,
"unused_props": lambda item: False,
"unused_vars": lambda item: False,
"unused_attrs": lambda item: False,
}
items = []
for attr, func in filters.items():
sub_items = getattr(vult, attr)
for item in sub_items:
filtered = func(item)
if not filtered:
items.append(item)
return report(items)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"files", nargs="*", default=["qutebrowser", "scripts", "setup.py"]
)
args = parser.parse_args()
out = run(args.files)
for line in out:
print(line)
sys.exit(bool(out))
if __name__ == "__main__":
main()
|
versions | 500117641608_add_bans | """Add bans table
Revision ID: 500117641608
Revises: b79d2fcafd88
Create Date: 2017-08-17 01:44:39.205126
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "500117641608"
down_revision = "b79d2fcafd88"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"bans",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("created_time", sa.DateTime(), nullable=True),
sa.Column("admin_id", sa.Integer(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("user_ip", sa.Binary(length=16), nullable=True),
sa.Column("reason", sa.String(length=2048), nullable=False),
sa.ForeignKeyConstraint(
["admin_id"],
["users.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["users.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("user_ip_16", "bans", ["user_ip"], unique=True, mysql_length=16)
op.create_index("user_ip_4", "bans", ["user_ip"], unique=True, mysql_length=4)
def downgrade():
op.drop_index("user_ip_4", table_name="bans")
op.drop_index("user_ip_16", table_name="bans")
op.drop_table("bans")
|
email | alert_rendering | from apps.alerts.incident_appearance.renderers.constants import DEFAULT_BACKUP_TITLE
from apps.alerts.incident_appearance.templaters.alert_templater import AlertTemplater
from common.utils import convert_md_to_html, str_or_backup
from django.template.loader import render_to_string
from emoji.core import emojize
class AlertEmailTemplater(AlertTemplater):
RENDER_FOR_EMAIL = "email"
def _render_for(self):
return self.RENDER_FOR_EMAIL
def _postformat(self, templated_alert):
templated_alert.title = self._slack_format_for_email(templated_alert.title)
templated_alert.message = self._slack_format_for_email(templated_alert.message)
return templated_alert
def _slack_format_for_email(self, data):
sf = self.slack_formatter
sf.hyperlink_mention_format = "{title} - {url}"
return sf.format(data)
def build_subject_and_message(alert_group, emails_left):
alert = alert_group.alerts.first()
templated_alert = AlertEmailTemplater(alert).render()
title_fallback = (
f"#{alert_group.inside_organization_number} "
f"{DEFAULT_BACKUP_TITLE} via {alert_group.channel.verbal_name}"
)
# default templates are the same as web templates, which are in Markdown format
message = templated_alert.message
if message:
message = (
convert_md_to_html(templated_alert.message)
if templated_alert.message
else ""
)
content = render_to_string(
"email_notification.html",
{
"url": alert_group.slack_permalink or alert_group.web_link,
"title": str_or_backup(templated_alert.title, title_fallback),
"message": str_or_backup(
message, ""
), # not render message at all if smth goes wrong
"organization": alert_group.channel.organization.org_title,
"integration": emojize(alert_group.channel.short_name, language="alias"),
"limit_notification": emails_left <= 20,
"emails_left": emails_left,
},
)
title = str_or_backup(templated_alert.title, title_fallback)
subject = f"[{title}] You are invited to check an alert group".replace("\n", "")
return subject, content
|
core | region_similarity_calculator | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta, abstractmethod
import tensorflow as tf
from app.object_detection.core import box_list_ops
class RegionSimilarityCalculator(object):
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overriden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
with tf.name_scope(scope, "Compare", [boxlist1, boxlist2]) as scope:
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return box_list_ops.iou(boxlist1, boxlist2)
class NegSqDistSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on the squared distance metric.
This class computes pairwise similarity between two BoxLists based on the
negative squared distance metric.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute matrix of (negated) sq distances.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing negated pairwise squared distance.
"""
return -1 * box_list_ops.sq_dist(boxlist1, boxlist2)
class IoaSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Area (IOA) metric.
This class computes pairwise similarity between two BoxLists based on their
pairwise intersections divided by the areas of second BoxLists.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOA similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise IOA scores.
"""
return box_list_ops.ioa(boxlist1, boxlist2)
|
readers | read_epub | # This file is a part of Lector, a Qt based ebook reader
# Copyright (C) 2017-2019 BasioMeusPuga
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# TODO
# See if inserting chapters not in the toc.ncx can be avoided
# Account for stylesheets... eventually
import collections
import logging
import os
import zipfile
from urllib.parse import unquote
import xmltodict
from bs4 import BeautifulSoup
from PyQt5 import QtGui
logger = logging.getLogger(__name__)
class EPUB:
def __init__(self, book_filename, temp_dir):
self.book_filename = book_filename
self.temp_dir = temp_dir
self.zip_file = None
self.file_list = None
self.opf_dict = None
self.cover_image_name = None
self.split_chapters = {}
self.metadata = None
self.content = []
self.generate_references()
def generate_references(self):
self.zip_file = zipfile.ZipFile(self.book_filename, mode="r", allowZip64=True)
self.file_list = self.zip_file.namelist()
# Book structure relies on parsing the .opf file
# in the book. Now that might be the usual content.opf
# or package.opf or it might be named after your favorite
# eldritch abomination. The point is we have to check
# the container.xml
container = self.find_file("container.xml")
if container:
container_xml = self.zip_file.read(container)
container_dict = xmltodict.parse(container_xml)
packagefile = container_dict["container"]["rootfiles"]["rootfile"][
"@full-path"
]
else:
presumptive_names = ("content.opf", "package.opf", "volume.opf")
for i in presumptive_names:
packagefile = self.find_file(i)
if packagefile:
logger.info("Using presumptive package file: " + self.book_filename)
break
packagefile_data = self.zip_file.read(packagefile)
self.opf_dict = xmltodict.parse(packagefile_data)
def find_file(self, filename):
# Get rid of special characters
filename = unquote(filename)
# First, look for the file in the root of the book
if filename in self.file_list:
return filename
# Then search for it elsewhere
else:
file_basename = os.path.basename(filename)
for i in self.file_list:
if os.path.basename(i) == file_basename:
return i
# If the file isn't found
logger.warning(filename + " not found in " + self.book_filename)
return False
def generate_toc(self):
def find_alternative_toc():
toc_filename = None
toc_filename_alternative = None
manifest = self.opf_dict["package"]["manifest"]["item"]
for i in manifest:
# Behold the burning hoops we're jumping through
if i["@id"] == "ncx":
toc_filename = i["@href"]
if ("ncx" in i["@id"]) or ("toc" in i["@id"]):
toc_filename_alternative = i["@href"]
if toc_filename and toc_filename_alternative:
break
if not toc_filename:
if not toc_filename_alternative:
logger.warning("No ToC found for: " + self.book_filename)
else:
toc_filename = toc_filename_alternative
logger.info("Using alternate ToC for: " + self.book_filename)
return toc_filename
# Find the toc.ncx file from the manifest
# EPUBs will name literally anything, anything so try
# a less stringent approach if the first one doesn't work
# The idea is to prioritize 'toc.ncx' since this should work
# for the vast majority of books
toc_filename = "toc.ncx"
does_toc_exist = self.find_file(toc_filename)
if not does_toc_exist:
toc_filename = find_alternative_toc()
tocfile = self.find_file(toc_filename)
tocfile_data = self.zip_file.read(tocfile)
toc_dict = xmltodict.parse(tocfile_data)
def recursor(level, nav_node):
if isinstance(nav_node, list):
these_contents = [
[level + 1, i["navLabel"]["text"], i["content"]["@src"]]
for i in nav_node
]
self.content.extend(these_contents)
return
if "navPoint" in nav_node.keys():
recursor(level, nav_node["navPoint"])
else:
self.content.append(
[
level + 1,
nav_node["navLabel"]["text"],
nav_node["content"]["@src"],
]
)
navpoints = toc_dict["ncx"]["navMap"]["navPoint"]
for top_level_nav in navpoints:
# Just one chapter
if isinstance(top_level_nav, str):
self.content.append(
[1, navpoints["navLabel"]["text"], navpoints["content"]["@src"]]
)
break
# Multiple chapters
self.content.append(
[1, top_level_nav["navLabel"]["text"], top_level_nav["content"]["@src"]]
)
if "navPoint" in top_level_nav.keys():
recursor(1, top_level_nav)
def get_chapter_content(self, chapter_file):
this_file = self.find_file(chapter_file)
if this_file:
chapter_content = self.zip_file.read(this_file).decode()
# Generate a None return for a blank chapter
# These will be removed from the contents later
contentDocument = QtGui.QTextDocument(None)
contentDocument.setHtml(chapter_content)
contentText = contentDocument.toPlainText().replace("\n", "")
if contentText == "":
chapter_content = None
return chapter_content
else:
return "Possible parse error: " + chapter_file
def parse_split_chapters(self, chapters_with_split_content):
# For split chapters, get the whole chapter first, then split
# between ids using their anchors, then "heal" the resultant text
# by creating a BeautifulSoup object. Write its str to the content
for i in chapters_with_split_content.items():
chapter_file = i[0]
self.split_chapters[chapter_file] = {}
chapter_content = self.get_chapter_content(chapter_file)
soup = BeautifulSoup(chapter_content, "lxml")
split_anchors = i[1]
for this_anchor in reversed(split_anchors):
this_tag = soup.find(attrs={"id": lambda x: x == this_anchor})
markup_split = str(soup).split(str(this_tag))
soup = BeautifulSoup(markup_split[0], "lxml")
# If the tag is None, it probably means the content is overlapping
# Skipping the insert is the way forward
if this_tag:
this_markup = BeautifulSoup(
str(this_tag).strip() + markup_split[1], "lxml"
)
self.split_chapters[chapter_file][this_anchor] = str(this_markup)
# Remaining markup is assigned here
self.split_chapters[chapter_file]["top_level"] = str(soup)
def generate_content(self):
# Find all the chapters mentioned in the opf spine
# These are simply ids that correspond to the actual item
# as mentioned in the manifest - which is a comprehensive
# list of files
try:
# Multiple chapters
chapters_in_spine = [
i["@idref"] for i in self.opf_dict["package"]["spine"]["itemref"]
]
except TypeError:
# Single chapter - Large xml
chapters_in_spine = [self.opf_dict["package"]["spine"]["itemref"]["@idref"]]
# Next, find items and ids from the manifest
# This might error out in case there's only one item in
# the manifest. Remember that for later.
chapters_from_manifest = {
i["@id"]: i["@href"] for i in self.opf_dict["package"]["manifest"]["item"]
}
# Finally, check which items are supposed to be in the spine
# on the basis of the id and change the toc accordingly
spine_final = []
for i in chapters_in_spine:
try:
spine_final.append(chapters_from_manifest.pop(i))
except KeyError:
pass
toc_chapters = [unquote(i[2].split("#")[0]) for i in self.content]
for i in spine_final:
if not i in toc_chapters:
spine_index = spine_final.index(i)
if spine_index == 0: # Or chapter insertion circles back to the end
previous_chapter_toc_index = -1
else:
previous_chapter = spine_final[spine_final.index(i) - 1]
previous_chapter_toc_index = toc_chapters.index(previous_chapter)
toc_chapters.insert(previous_chapter_toc_index + 1, i)
self.content.insert(previous_chapter_toc_index + 1, [1, None, i])
# Parse split chapters as below
# They can be picked up during the iteration through the toc
chapters_with_split_content = {}
for i in self.content:
if "#" in i[2]:
this_split = i[2].split("#")
chapter = this_split[0]
anchor = this_split[1]
try:
chapters_with_split_content[chapter].append(anchor)
except KeyError:
chapters_with_split_content[chapter] = []
chapters_with_split_content[chapter].append(anchor)
self.parse_split_chapters(chapters_with_split_content)
# Now we iterate over the ToC as presented in the toc.ncx
# and add chapters to the content list
# In case a split chapter is encountered, get its content
# from the split_chapters dictionary
# What could possibly go wrong?
toc_copy = self.content[:]
# Put the book into the book
for count, i in enumerate(toc_copy):
chapter_file = i[2]
# Get split content according to its corresponding id attribute
if "#" in chapter_file:
this_split = chapter_file.split("#")
chapter_file_proper = this_split[0]
this_anchor = this_split[1]
try:
chapter_content = self.split_chapters[chapter_file_proper][
this_anchor
]
except KeyError:
chapter_content = "Parse Error"
error_string = (
f"Error parsing {self.book_filename}: {chapter_file_proper}"
)
logger.error(error_string)
# Get content that remained at the end of the pillaging above
elif chapter_file in self.split_chapters.keys():
try:
chapter_content = self.split_chapters[chapter_file]["top_level"]
except KeyError:
chapter_content = "Parse Error"
error_string = f"Error parsing {self.book_filename}: {chapter_file}"
logger.error(error_string)
# Vanilla non split chapters
else:
chapter_content = self.get_chapter_content(chapter_file)
self.content[count][2] = chapter_content
# Cleanup content by removing null chapters
unnamed_chapter_title = 1
content_copy = []
for i in self.content:
if i[2]:
chapter_title = i[1]
if not chapter_title:
chapter_title = unnamed_chapter_title
content_copy.append((i[0], str(chapter_title), i[2]))
unnamed_chapter_title += 1
self.content = content_copy
# Get cover image and put it in its place
# I imagine this involves saying nasty things to it
# There's no point shifting this to the parser
# The performance increase is negligible
cover_image = self.generate_book_cover()
if cover_image:
cover_path = (
os.path.join(self.temp_dir, os.path.basename(self.book_filename))
+ " - cover"
)
with open(cover_path, "wb") as cover_temp:
cover_temp.write(cover_image)
# This is probably stupid, but I can't stand the idea of
# having to look at two book covers
cover_replacement_conditions = (
self.cover_image_name.lower() + ".jpg" in self.content[0][2].lower(),
self.cover_image_name.lower() + ".png" in self.content[0][2].lower(),
"cover" in self.content[0][1].lower(),
)
if True in cover_replacement_conditions:
logger.info(
f"Replacing cover {cover_replacement_conditions}: {self.book_filename}"
)
self.content[0] = (
1,
"Cover",
f'<center><img src="{cover_path}" alt="Cover"></center>',
)
else:
logger.info("Adding cover: " + self.book_filename)
self.content.insert(
0,
(
1,
"Cover",
f'<center><img src="{cover_path}" alt="Cover"></center>',
),
)
def generate_metadata(self):
book_metadata = self.opf_dict["package"]["metadata"]
def flattener(this_object):
if isinstance(this_object, collections.OrderedDict):
return this_object["#text"]
if isinstance(this_object, list):
if isinstance(this_object[0], collections.OrderedDict):
return this_object[0]["#text"]
else:
return this_object[0]
if isinstance(this_object, str):
return this_object
# There are no exception types specified below
# This is on purpose and makes me long for the days
# of simpler, happier things.
# Book title
try:
title = flattener(book_metadata["dc:title"])
except:
logger.warning("Title not found: " + self.book_filename)
title = os.path.splitext(os.path.basename(self.book_filename))[0]
# Book author
try:
author = flattener(book_metadata["dc:creator"])
except:
logger.warning("Author not found: " + self.book_filename)
author = "Unknown"
# Book year
try:
year = int(flattener(book_metadata["dc:date"])[:4])
except:
logger.warning("Year not found: " + self.book_filename)
year = 9999
# Book isbn
# Both one and multiple schema
isbn = None
try:
scheme = book_metadata["dc:identifier"]["@opf:scheme"].lower()
if scheme.lower() == "isbn":
isbn = book_metadata["dc:identifier"]["#text"]
except (TypeError, KeyError):
try:
for i in book_metadata["dc:identifier"]:
if i["@opf:scheme"].lower() == "isbn":
isbn = i["#text"]
break
except:
logger.warning("ISBN not found: " + self.book_filename)
# Book tags
try:
tags = book_metadata["dc:subject"]
if isinstance(tags, str):
tags = [tags]
except:
tags = []
# Book cover
cover = self.generate_book_cover()
# Named tuple? Named tuple.
Metadata = collections.namedtuple(
"Metadata", ["title", "author", "year", "isbn", "tags", "cover"]
)
self.metadata = Metadata(title, author, year, isbn, tags, cover)
def generate_book_cover(self):
# This is separate because the book cover needs to
# be found and extracted both during addition / reading
book_cover = None
try:
cover_image = [
i["@href"]
for i in self.opf_dict["package"]["manifest"]["item"]
if i["@media-type"].split("/")[0] == "image" and "cover" in i["@id"]
][0]
book_cover = self.zip_file.read(self.find_file(cover_image))
except:
logger.warning("Cover not found in opf: " + self.book_filename)
# Find book cover the hard way
if not book_cover:
biggest_image_size = 0
cover_image = None
for j in self.zip_file.filelist:
if os.path.splitext(j.filename)[1] in [".jpg", ".jpeg", ".png", ".gif"]:
if j.file_size > biggest_image_size:
cover_image = j.filename
biggest_image_size = j.file_size
if cover_image:
book_cover = self.zip_file.read(self.find_file(cover_image))
if not book_cover:
self.cover_image_name = ""
logger.warning("Cover not found: " + self.book_filename)
else:
self.cover_image_name = os.path.splitext(os.path.basename(cover_image))[0]
return book_cover
|
plugins | unique_prices_test | __copyright__ = "Copyright (C) 2014-2016 Martin Blais"
__license__ = "GNU GPLv2"
import unittest
from beancount import loader
from beancount.parser import cmptest
from beancount.plugins import implicit_prices, unique_prices
class TestValidateAmbiguousPrices(cmptest.TestCase):
@loader.load_doc()
def test_validate_unique_prices__different(self, entries, errors, options_map):
"""
2000-01-01 price HOOL 500.00 USD
2000-01-01 price HOOL 500.01 USD
"""
self.assertEqual([], errors)
_, valid_errors = unique_prices.validate_unique_prices(entries, options_map)
self.assertEqual([unique_prices.UniquePricesError], list(map(type, valid_errors)))
self.assertRegex(valid_errors[0].message, "Disagreeing price")
@loader.load_doc()
def test_validate_unique_prices__same(self, entries, errors, options_map):
"""
2000-01-01 price HOOL 500.00 USD
2000-01-01 price HOOL 500.00 USD
"""
self.assertEqual([], errors)
_, valid_errors = unique_prices.validate_unique_prices(entries, options_map)
self.assertEqual([], valid_errors)
@loader.load_doc()
def test_validate_unique_prices__from_costs(self, entries, errors, options_map):
"""
2014-01-01 open Income:Misc
2014-01-01 open Assets:Account1
2014-01-01 open Liabilities:Account1
2014-01-15 *
Income:Misc -201 USD
Assets:Account1 1 HOUSE {100 USD}
Liabilities:Account1 1 HOUSE {101 USD}
"""
self.assertEqual([], errors)
entries, errors = implicit_prices.add_implicit_prices(entries, options_map)
self.assertEqual([], errors)
new_entries, valid_errors = unique_prices.validate_unique_prices(
entries, options_map
)
self.assertGreater(len(new_entries), 0)
self.assertEqual([unique_prices.UniquePricesError], list(map(type, valid_errors)))
self.assertRegex(valid_errors[0].message, "Disagreeing price ent")
if __name__ == "__main__":
unittest.main()
|
plugins | menu_qrcode | # -*- coding: utf-8 -*-
"""
A menu plugin showing QR-Code for bitmessage address in modal dialog.
"""
import urllib
import qrcode
from pybitmessage.tr import _translate
from PyQt4 import QtCore, QtGui
# http://stackoverflow.com/questions/20452486
class Image(qrcode.image.base.BaseImage): # pylint: disable=abstract-method
"""Image output class for qrcode using QPainter"""
def __init__(self, border, width, box_size):
# pylint: disable=super-init-not-called
self.border = border
self.width = width
self.box_size = box_size
size = (width + border * 2) * box_size
self._image = QtGui.QImage(size, size, QtGui.QImage.Format_RGB16)
self._image.fill(QtCore.Qt.white)
def pixmap(self):
"""Get image pixmap"""
return QtGui.QPixmap.fromImage(self._image)
def drawrect(self, row, col):
"""Draw a single rectangle - implementation"""
painter = QtGui.QPainter(self._image)
painter.fillRect(
(col + self.border) * self.box_size,
(row + self.border) * self.box_size,
self.box_size,
self.box_size,
QtCore.Qt.black,
)
class QRCodeDialog(QtGui.QDialog):
"""The dialog"""
def __init__(self, parent):
super(QRCodeDialog, self).__init__(parent)
self.image = QtGui.QLabel(self)
self.label = QtGui.QLabel(self)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
buttonBox = QtGui.QDialogButtonBox(self)
buttonBox.setOrientation(QtCore.Qt.Horizontal)
buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
buttonBox.accepted.connect(self.accept)
layout = QtGui.QVBoxLayout(self)
layout.addWidget(self.image)
layout.addWidget(self.label)
layout.addWidget(buttonBox)
self.retranslateUi()
def retranslateUi(self):
"""A conventional Qt Designer method for dynamic l10n"""
self.setWindowTitle(_translate("QRCodeDialog", "QR-code"))
def render(self, text):
"""Draw QR-code and address in labels"""
pixmap = qrcode.make(text, image_factory=Image).pixmap()
self.image.setPixmap(pixmap)
self.label.setText(text)
self.label.setToolTip(text)
self.label.setFixedWidth(pixmap.width())
self.setFixedSize(QtGui.QWidget.sizeHint(self))
def connect_plugin(form):
"""Plugin entry point"""
def on_action_ShowQR():
"""A slot for popup menu action"""
try:
dialog = form.qrcode_dialog
except AttributeError:
form.qrcode_dialog = dialog = QRCodeDialog(form)
account = form.getContactSelected()
try:
label = account._getLabel() # pylint: disable=protected-access
except AttributeError:
try:
label = account.getLabel()
except AttributeError:
return
dialog.render(
"bitmessage:%s" % account.address
+ (
"?" + urllib.urlencode({"label": label.encode("utf-8")})
if label != account.address
else ""
)
)
dialog.exec_()
return on_action_ShowQR, _translate("MainWindow", "Show QR-code")
|
femobjects | material_reinforced | # ***************************************************************************
# * Copyright (c) 2019 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM reinforced material"
__author__ = "Bernd Hahnebach"
__url__ = "https://www.freecad.org"
## @package material_reinforced
# \ingroup FEM
# \brief reinforced object
from . import base_fempythonobject
class MaterialReinforced(base_fempythonobject.BaseFemPythonObject):
"""
The MaterialReinforced object
"""
Type = "Fem::MaterialReinforced"
def __init__(self, obj):
super(MaterialReinforced, self).__init__(obj)
obj.addProperty(
"App::PropertyLinkSubList",
"References",
"Material",
"List of material shapes",
)
obj.addProperty(
"App::PropertyMap",
"Reinforcement",
"Composites",
"Reinforcement material properties",
)
obj.addProperty(
"App::PropertyEnumeration",
"Category",
"Material",
"Matrix material properties",
)
obj.Category = ["Solid"]
obj.Category = "Solid"
|
ui | ui_cdlookup | # -*- coding: utf-8 -*-
# Automatically generated - don't edit.
# Use `python setup.py build_ui` to update it.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(720, 320)
self.vboxlayout = QtWidgets.QVBoxLayout(Dialog)
self.vboxlayout.setContentsMargins(9, 9, 9, 9)
self.vboxlayout.setSpacing(6)
self.vboxlayout.setObjectName("vboxlayout")
self.results_view = QtWidgets.QStackedWidget(Dialog)
self.results_view.setObjectName("results_view")
self.results_page = QtWidgets.QWidget()
self.results_page.setObjectName("results_page")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.results_page)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label = QtWidgets.QLabel(self.results_page)
self.label.setObjectName("label")
self.verticalLayout_4.addWidget(self.label)
self.release_list = QtWidgets.QTreeWidget(self.results_page)
self.release_list.setObjectName("release_list")
self.release_list.headerItem().setText(0, "1")
self.verticalLayout_4.addWidget(self.release_list)
self.results_view.addWidget(self.results_page)
self.no_results_page = QtWidgets.QWidget()
self.no_results_page.setObjectName("no_results_page")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.no_results_page)
self.verticalLayout_3.setObjectName("verticalLayout_3")
spacerItem = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
self.verticalLayout_3.addItem(spacerItem)
self.no_results_label = QtWidgets.QLabel(self.no_results_page)
self.no_results_label.setStyleSheet("margin-bottom: 9px;")
self.no_results_label.setObjectName("no_results_label")
self.verticalLayout_3.addWidget(
self.no_results_label, 0, QtCore.Qt.AlignHCenter
)
self.submit_button = QtWidgets.QToolButton(self.no_results_page)
self.submit_button.setStyleSheet("")
icon = QtGui.QIcon.fromTheme("media-optical")
self.submit_button.setIcon(icon)
self.submit_button.setIconSize(QtCore.QSize(128, 128))
self.submit_button.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.submit_button.setObjectName("submit_button")
self.verticalLayout_3.addWidget(self.submit_button, 0, QtCore.Qt.AlignHCenter)
spacerItem1 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
self.verticalLayout_3.addItem(spacerItem1)
self.results_view.addWidget(self.no_results_page)
self.vboxlayout.addWidget(self.results_view)
self.hboxlayout = QtWidgets.QHBoxLayout()
self.hboxlayout.setContentsMargins(0, 0, 0, 0)
self.hboxlayout.setSpacing(6)
self.hboxlayout.setObjectName("hboxlayout")
spacerItem2 = QtWidgets.QSpacerItem(
111, 31, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.hboxlayout.addItem(spacerItem2)
self.ok_button = QtWidgets.QPushButton(Dialog)
self.ok_button.setEnabled(False)
self.ok_button.setObjectName("ok_button")
self.hboxlayout.addWidget(self.ok_button)
self.lookup_button = QtWidgets.QPushButton(Dialog)
self.lookup_button.setObjectName("lookup_button")
self.hboxlayout.addWidget(self.lookup_button)
self.cancel_button = QtWidgets.QPushButton(Dialog)
self.cancel_button.setObjectName("cancel_button")
self.hboxlayout.addWidget(self.cancel_button)
self.vboxlayout.addLayout(self.hboxlayout)
self.retranslateUi(Dialog)
self.results_view.setCurrentIndex(0)
self.ok_button.clicked.connect(Dialog.accept) # type: ignore
self.cancel_button.clicked.connect(Dialog.reject) # type: ignore
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.release_list, self.submit_button)
Dialog.setTabOrder(self.submit_button, self.ok_button)
Dialog.setTabOrder(self.ok_button, self.lookup_button)
Dialog.setTabOrder(self.lookup_button, self.cancel_button)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_("CD Lookup"))
self.label.setText(_("The following releases on MusicBrainz match the CD:"))
self.no_results_label.setText(_("No matching releases found for this disc."))
self.submit_button.setText(_("Submit disc ID"))
self.ok_button.setText(_("&Load into Picard"))
self.lookup_button.setText(_("&Submit disc ID"))
self.cancel_button.setText(_("&Cancel"))
|
Http | ClusterPrinterMaterialStation | # Copyright (c) 2019 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, Dict, List, Union
from ..BaseModel import BaseModel
from .ClusterPrinterMaterialStationSlot import ClusterPrinterMaterialStationSlot
class ClusterPrinterMaterialStation(BaseModel):
"""Class representing the data of a Material Station in the cluster."""
def __init__(
self,
status: str,
supported: bool = False,
material_slots: List[
Union[ClusterPrinterMaterialStationSlot, Dict[str, Any]]
] = None,
**kwargs,
) -> None:
"""Creates a new Material Station status.
:param status: The status of the material station.
:param: supported: Whether the material station is supported on this machine or not.
:param material_slots: The active slots configurations of this material station.
"""
self.status = status
self.supported = supported
self.material_slots = (
self.parseModels(ClusterPrinterMaterialStationSlot, material_slots)
if material_slots
else []
) # type: List[ClusterPrinterMaterialStationSlot]
super().__init__(**kwargs)
|
migrations | 0099_plugin_attachment | # Generated by Django 3.0.7 on 2020-11-04 14:54
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0098_events_property_usage"),
]
operations = [
migrations.CreateModel(
name="PluginAttachment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("key", models.CharField(max_length=200)),
("content_type", models.CharField(max_length=200)),
("file_name", models.CharField(max_length=200)),
("file_size", models.IntegerField()),
("contents", models.BinaryField()),
(
"plugin_config",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="posthog.PluginConfig",
),
),
(
"team",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="posthog.Team",
),
),
],
),
]
|
gdist | appdata | # Copyright 2013-2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
AppData Specification: https://www.freedesktop.org/software/appstream/docs/
"""
import os
from distutils.dep_util import newer
from .gettextutil import merge_file
from .util import Command
class build_appdata(Command):
"""Build .appdata.xml files
Move .appdata.xml files to the appropriate location in the build tree.
If there is a .appdata.xml.in file, process it with gettext.
"""
description = "build .appdata.xml files"
user_options = []
def initialize_options(self):
self.build_base = None
self.appdata = None
self.po_build_dir = None
def finalize_options(self):
self.appdata = self.distribution.appdata
self.set_undefined_options("build", ("build_base", "build_base"))
self.set_undefined_options("build_po", ("po_build_dir", "po_build_dir"))
def run(self):
self.run_command("build_po")
basepath = os.path.join(self.build_base, "share", "metainfo")
self.mkpath(basepath)
for appdata in self.appdata:
if os.path.exists(appdata + ".in"):
fullpath = os.path.join(basepath, os.path.basename(appdata))
if newer(appdata + ".in", fullpath):
merge_file(self.po_build_dir, "xml", appdata + ".in", fullpath)
else:
self.copy_file(appdata, os.path.join(basepath, appdata))
class install_appdata(Command):
"""Install .appdata.xml files
Install any .appdata.xml files from the build tree to their final
location, under $prefix/share/metainfo.
"""
description = "install .appdata.xml files"
user_options = []
def initialize_options(self):
self.install_dir = None
self.skip_build = None
self.appdata = None
self.build_base = None
self.outfiles = []
def finalize_options(self):
self.set_undefined_options("build", ("build_base", "build_base"))
self.set_undefined_options(
"install", ("install_data", "install_dir"), ("skip_build", "skip_build")
)
self.set_undefined_options("build_appdata", ("appdata", "appdata"))
def get_outputs(self):
return self.outfiles
def run(self):
if not self.skip_build:
self.run_command("build_appdata")
basepath = os.path.join(self.install_dir, "share", "metainfo")
srcpath = os.path.join(self.build_base, "share", "metainfo")
out = self.mkpath(basepath)
self.outfiles.extend(out or [])
for appdata in self.appdata:
appdata = os.path.basename(appdata)
fullsrc = os.path.join(srcpath, appdata)
fullpath = os.path.join(basepath, appdata)
(out, _) = self.copy_file(fullsrc, fullpath)
self.outfiles.append(out)
__all__ = ["build_appdata", "install_appdata"]
|
engines | wikipedia | # SPDX-License-Identifier: AGPL-3.0-or-later
"""
Wikipedia (Web)
"""
from json import loads
from urllib.parse import quote
from lxml.html import fromstring
from searx.raise_for_httperror import raise_for_httperror
from searx.utils import match_language, searx_useragent
# about
about = {
"website": "https://www.wikipedia.org/",
"wikidata_id": "Q52",
"official_api_documentation": "https://en.wikipedia.org/api/",
"use_official_api": True,
"require_api_key": False,
"results": "JSON",
}
# search-url
search_url = "https://{language}.wikipedia.org/api/rest_v1/page/summary/{title}"
supported_languages_url = "https://meta.wikimedia.org/wiki/List_of_Wikipedias"
language_variants = {"zh": ("zh-cn", "zh-hk", "zh-mo", "zh-my", "zh-sg", "zh-tw")}
# set language in base_url
def url_lang(lang):
lang_pre = lang.split("-")[0]
if (
lang_pre == "all"
or lang_pre not in supported_languages
and lang_pre not in language_aliases
):
return "en"
return match_language(lang, supported_languages, language_aliases).split("-")[0]
# do search-request
def request(query, params):
if query.islower():
query = query.title()
language = url_lang(params["language"])
params["url"] = search_url.format(title=quote(query), language=language)
if params["language"].lower() in language_variants.get(language, []):
params["headers"]["Accept-Language"] = params["language"].lower()
params["headers"]["User-Agent"] = searx_useragent()
params["raise_for_httperror"] = False
params["soft_max_redirects"] = 2
return params
# get response from search-request
def response(resp):
if resp.status_code == 404:
return []
if resp.status_code == 400:
try:
api_result = loads(resp.text)
except:
pass
else:
if (
api_result["type"]
== "https://mediawiki.org/wiki/HyperSwitch/errors/bad_request"
and api_result["detail"] == "title-invalid-characters"
):
return []
raise_for_httperror(resp)
results = []
api_result = loads(resp.text)
# skip disambiguation pages
if api_result.get("type") != "standard":
return []
title = api_result["title"]
wikipedia_link = api_result["content_urls"]["desktop"]["page"]
results.append({"url": wikipedia_link, "title": title})
results.append(
{
"infobox": title,
"id": wikipedia_link,
"content": api_result.get("extract", ""),
"img_src": api_result.get("thumbnail", {}).get("source"),
"urls": [{"title": "Wikipedia", "url": wikipedia_link}],
}
)
return results
# get supported languages from their site
def _fetch_supported_languages(resp):
supported_languages = {}
dom = fromstring(resp.text)
tables = dom.xpath('//table[contains(@class,"sortable")]')
for table in tables:
# exclude header row
trs = table.xpath(".//tr")[1:]
for tr in trs:
td = tr.xpath("./td")
code = td[3].xpath("./a")[0].text
name = td[2].xpath("./a")[0].text
english_name = td[1].xpath("./a")[0].text
articles = int(td[4].xpath("./a/b")[0].text.replace(",", ""))
# exclude languages with too few articles
if articles >= 100:
supported_languages[code] = {"name": name, "english_name": english_name}
return supported_languages
|
downloaders | TenluaVn | # -*- coding: utf-8 -*-
import json
import random
import re
from ..base.simple_downloader import SimpleDownloader
def gen_r():
return "0." + "".join(random.choice("0123456789") for x in range(16))
class TenluaVn(SimpleDownloader):
__name__ = "TenluaVn"
__type__ = "downloader"
__version__ = "0.04"
__status__ = "testing"
__pattern__ = r"https?://(?:www\.)?tenlua\.vn(?!/folder)/.+?/(?P<ID>[0-9a-f]+)/"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """Tenlua.vn downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
API_URL = "https://api2.tenlua.vn/"
def api_request(self, method, **kwargs):
kwargs["a"] = method
sid = kwargs.pop("sid", None)
return json.loads(
self.load(
self.API_URL,
get={"sid": sid} if sid is not None else {},
post=json.dumps([kwargs]),
)
)
def api_info(self, url):
file_id = re.match(self.__pattern__, url).group("ID")
file_info = self.api_request(
"filemanager_builddownload_getinfo", n=file_id, r=gen_r()
)[0]
if file_info["type"] == "none":
return {"status": 1}
else:
return {
"name": file_info["n"],
"size": file_info["real_size"],
"status": 2,
"tenlua": {
"link": file_info["dlink"],
"password": bool(file_info["passwd"]),
},
}
def handle_free(self, pyfile):
self.handle_download()
def handle_premium(self, pyfile):
sid = self.account.info["data"]["sid"]
self.handle_download(sid)
def handle_download(self, sid=None):
if self.info["tenlua"]["password"]:
password = self.get_password()
if password:
file_id = self.info["pattern"]["ID"]
args = dict(n=file_id, p=password, r=gen_r())
if sid is not None:
args["sid"] = sid
password_status = self.api_request(
"filemanager_builddownload_checkpassword", **args
)
if password_status["status"] == "0":
self.fail(self._("Wrong password"))
else:
url = password_status["url"]
else:
self.fail(self._("Download is password protected"))
else:
url = self.info["tenlua"]["link"]
if sid is None:
self.wait(30)
self.link = url
|
rest | base_api_test | import json
from json import JSONDecodeError
from typing import Dict, Optional
from aiohttp import ClientSession
from tribler.core.components.restapi.rest import get_param
from tribler.core.utilities.path_util import Path
from tribler.core.version import version_id
def path_to_str(obj):
if isinstance(obj, dict):
return {path_to_str(k): path_to_str(v) for k, v in obj.items()}
if isinstance(obj, list):
return [path_to_str(i) for i in obj]
if isinstance(obj, Path):
return str(obj)
return obj
async def do_real_request(
port,
endpoint,
expected_code=200,
expected_json=None,
request_type="GET",
post_data=None,
headers=None,
json_response=True,
):
post_data = post_data or {}
data = (
json.dumps(path_to_str(post_data))
if isinstance(post_data, (dict, list))
else post_data
)
is_url = endpoint.startswith("http://") or endpoint.startswith("https://")
url = endpoint if is_url else f"http://localhost:{port}/{endpoint}"
headers = headers or {"User-Agent": "Tribler " + version_id}
async with ClientSession() as session:
async with session.request(
request_type, url, data=data, headers=headers, ssl=False
) as response:
status, response = (
response.status,
(
await response.json(content_type=None)
if json_response
else await response.read()
),
)
assert status == expected_code, response
if response is not None and expected_json is not None:
assert expected_json == response
return response
async def do_request(
test_client,
url,
expected_code=200,
expected_json=None,
request_type="GET",
post_data=None,
headers=None,
json_response=True,
params: Optional[Dict] = None,
):
post_data = post_data or {}
data = (
json.dumps(path_to_str(post_data))
if isinstance(post_data, (dict, list))
else post_data
)
headers = headers or {"User-Agent": "Tribler " + version_id}
async with test_client.request(
request_type, url, data=data, headers=headers, ssl=False, params=params
) as response:
status = response.status
try:
response = (
await response.json(content_type=None)
if json_response
else await response.read()
)
except JSONDecodeError:
response = None
if status == 500 and expected_code != 500:
if "message" in response["error"]:
print(response["error"]["message"])
else:
print(response["error"])
assert status == expected_code, response
if response is not None and expected_json is not None:
assert response == expected_json
return response
def test_get_parameters():
"""
Test the get_parameters method
"""
parameters = {"abc": [3]}
assert get_param(parameters, "abcd") is None
assert get_param(parameters, "abc") is not None
|
PyObjCTest | test_cgeventsource | from PyObjCTools.TestSupport import *
from Quartz.CoreGraphics import *
try:
long
except NameError:
long = int
class TestCGEventSource(TestCase):
def testTypes(self):
self.assertIsCFType(CGEventSourceRef)
def testFunctions(self):
self.assertIsInstance(CGEventSourceGetTypeID(), (int, long))
src = CGEventSourceCreate(0)
self.assertIsInstance(src, CGEventSourceRef)
v = CGEventSourceGetKeyboardType(src)
self.assertIsInstance(v, (int, long))
CGEventSourceSetKeyboardType(src, v)
CGEventSourceSetPixelsPerLine(src, 23)
v = CGEventSourceGetPixelsPerLine(src)
self.assertIsInstance(v, float)
self.assertEqual(v, 23)
v = CGEventSourceGetSourceStateID(src)
self.assertIsInstance(v, (int, long))
self.assertResultHasType(CGEventSourceButtonState, objc._C_BOOL)
v = CGEventSourceButtonState(0, 0)
self.assertIsInstance(v, bool)
self.assertResultHasType(CGEventSourceKeyState, objc._C_BOOL)
v = CGEventSourceKeyState(0, 64)
self.assertIsInstance(v, bool)
v = CGEventSourceFlagsState(0)
self.assertIsInstance(v, (int, long))
v = CGEventSourceSecondsSinceLastEventType(0, kCGEventLeftMouseDown)
self.assertIsInstance(v, float)
v = CGEventSourceCounterForEventType(0, kCGEventLeftMouseDown)
self.assertIsInstance(v, (int, long))
CGEventSourceSetUserData(src, 0xABBCCDD00112233)
v = CGEventSourceGetUserData(src)
self.assertIsInstance(v, (int, long))
self.assertEqual(v, 0xABBCCDD00112233)
CGEventSourceSetLocalEventsFilterDuringSuppressionState(
src,
kCGEventFlagMaskControl | kCGEventFlagMaskCommand,
kCGEventSuppressionStateRemoteMouseDrag,
)
m = CGEventSourceGetLocalEventsFilterDuringSuppressionState(
src, kCGEventSuppressionStateRemoteMouseDrag
)
self.assertIsInstance(m, (int, long))
CGEventSourceSetLocalEventsSuppressionInterval(src, 1.5)
v = CGEventSourceGetLocalEventsSuppressionInterval(src)
self.assertEqual(v, 1.5)
if __name__ == "__main__":
main()
|
accounts | UploadgigCom | # -*- coding: utf-8 -*-
import json
import re
import time
from pyload.core.datatypes.pyfile import PyFile
from ..anticaptchas.ReCaptcha import ReCaptcha
from ..base.account import BaseAccount
from ..helpers import parse_html_form
class UploadgigCom(BaseAccount):
__name__ = "UploadgigCom"
__type__ = "account"
__version__ = "0.07"
__status__ = "testing"
__description__ = """UploadgigCom account plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
LOGIN_SKIP_PATTERN = r"You are currently logged in."
PREMIUM_PATTERN = (
r'<dt>Premium download:</dt>\s*<dd class="text-success">Active</dd>'
)
VALID_UNTIL_PATTERN = r"<dt>Package expire date:</dt>\s*<dd>([\d/]+)"
TRAFFIC_LEFT_PATTERN = r"<dt>Daily traffic usage:</dt>\s*<dd>(?P<S1>[\d.,]+) (?:(?P<U1>[\w^_]+) )?/ (?P<S2>[\d.,]+) (?P<U2>[\w^_]+)"
def grab_info(self, user, password, data):
html = self.load("https://uploadgig.com/user/my_account")
premium = re.search(self.PREMIUM_PATTERN, html) is not None
m = re.search(self.TRAFFIC_LEFT_PATTERN, html)
if m is None:
trafficleft = None
else:
trafficleft = self.parse_traffic(
m.group("S2"), m.group("U2")
) - self.parse_traffic(m.group("S1"), m.group("U1") or m.group("U2"))
m = re.search(self.VALID_UNTIL_PATTERN, html)
if m is None:
validuntil = None
else:
validuntil = time.mktime(time.strptime(m.group(1), "%Y/%m/%d"))
return {
"premium": premium,
"trafficleft": trafficleft,
"validuntil": validuntil,
}
def signin(self, user, password, data):
html = self.load("https://uploadgig.com/login/form")
if self.LOGIN_SKIP_PATTERN in html:
self.skip_login()
url, inputs = parse_html_form('id="login_form"', html)
if inputs is None:
self.fail_login("Login form not found")
inputs["email"] = user
inputs["pass"] = password
if '<div class="row" id="parent_captcha_container">' in html:
# dummy pyfile
pyfile = PyFile(
self.pyload.files,
-1,
"https://uploadgig.com",
"https://uploadgig.com",
0,
0,
"",
self.classname,
-1,
-1,
)
pyfile.plugin = self
recaptcha = ReCaptcha(pyfile)
captcha_key = recaptcha.detect_key(html)
if captcha_key:
self.captcha = recaptcha
response = recaptcha.challenge(captcha_key, html)
inputs["g-recaptcha-response"] = response
else:
self.log_error(self._("ReCaptcha key not found"))
self.fail_login(self._("ReCaptcha key not found"))
html = self.load(url, post=inputs)
json_data = json.loads(html)
if json_data.get("state") != "1":
self.log_error(json_data["msg"])
self.fail_login()
@property
def logged(self):
"""
Checks if user is still logged in
"""
if not self.user:
return False
self.sync()
if (
self.info["login"]["timestamp"] == 0
or self.timeout != -1
and self.info["login"]["timestamp"] + self.timeout < time.time()
or self.req
and not self.req.cj.parse_cookie("fs_secure")
):
self.log_debug("Reached login timeout for user `%s`" % self.user)
return False
else:
return True
"""
@NOTE: below are methods
necessary for captcha to work with account plugins
"""
def check_status(self):
pass
def retry_captcha(self, attempts=10, wait=1, msg="Max captcha retries reached"):
self.captcha.invalid()
self.fail_login(msg=self._("Invalid captcha"))
|
posthog | middleware | import time
from ipaddress import ip_address, ip_network
from typing import Any, Callable, List, Optional, cast
import structlog
from corsheaders.middleware import CorsMiddleware
from django.conf import settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import MiddlewareNotUsed
from django.db import connection
from django.db.models import QuerySet
from django.http import HttpRequest, HttpResponse
from django.middleware.csrf import CsrfViewMiddleware
from django.urls import resolve
from django.utils.cache import add_never_cache_headers
from django_prometheus.middleware import (
Metrics,
PrometheusAfterMiddleware,
PrometheusBeforeMiddleware,
)
from posthog.api.capture import get_event
from posthog.api.decide import get_decide
from posthog.clickhouse.client.execute import clickhouse_query_counter
from posthog.clickhouse.query_tagging import QueryCounter, reset_query_tags, tag_queries
from posthog.cloud_utils import is_cloud
from posthog.exceptions import generate_exception_response
from posthog.metrics import LABEL_TEAM_ID
from posthog.models import Action, Cohort, Dashboard, FeatureFlag, Insight, Team, User
from posthog.rate_limit import DecideRateThrottle
from posthog.settings import SITE_URL
from posthog.settings.statsd import STATSD_HOST
from posthog.user_permissions import UserPermissions
from rest_framework import status
from statshog.defaults.django import statsd
from .auth import PersonalAPIKeyAuthentication
from .utils_cors import cors_response
ALWAYS_ALLOWED_ENDPOINTS = [
"decide",
"engage",
"track",
"capture",
"batch",
"e",
"s",
"static",
"_health",
]
default_cookie_options = {
"max_age": 365 * 24 * 60 * 60, # one year
"expires": None,
"path": "/",
"domain": "posthog.com",
"secure": True,
"samesite": "Strict",
}
cookie_api_paths_to_ignore = {"e", "s", "capture", "batch", "decide", "api", "track"}
class AllowIPMiddleware:
trusted_proxies: List[str] = []
def __init__(self, get_response):
if not settings.ALLOWED_IP_BLOCKS:
# this will make Django skip this middleware for all future requests
raise MiddlewareNotUsed()
self.ip_blocks = settings.ALLOWED_IP_BLOCKS
if settings.TRUSTED_PROXIES:
self.trusted_proxies = [
item.strip() for item in settings.TRUSTED_PROXIES.split(",")
]
self.get_response = get_response
def get_forwarded_for(self, request: HttpRequest):
forwarded_for = request.META.get("HTTP_X_FORWARDED_FOR")
if forwarded_for is not None:
return [ip.strip() for ip in forwarded_for.split(",")]
else:
return []
def extract_client_ip(self, request: HttpRequest):
client_ip = request.META["REMOTE_ADDR"]
if getattr(settings, "USE_X_FORWARDED_HOST", False):
forwarded_for = self.get_forwarded_for(request)
if forwarded_for:
closest_proxy = client_ip
client_ip = forwarded_for.pop(0)
if settings.TRUST_ALL_PROXIES:
return client_ip
proxies = [closest_proxy] + forwarded_for
for proxy in proxies:
if proxy not in self.trusted_proxies:
return None
return client_ip
def __call__(self, request: HttpRequest):
response: HttpResponse = self.get_response(request)
if request.path.split("/")[1] in ALWAYS_ALLOWED_ENDPOINTS:
return response
ip = self.extract_client_ip(request)
if ip and any(
ip_address(ip) in ip_network(block, strict=False)
for block in self.ip_blocks
):
return response
return HttpResponse(
"Your IP is not allowed. Check your ALLOWED_IP_BLOCKS settings. If you are behind a proxy, you need to set TRUSTED_PROXIES. See https://posthog.com/docs/deployment/running-behind-proxy",
status=403,
)
class CsrfOrKeyViewMiddleware(CsrfViewMiddleware):
"""Middleware accepting requests that either contain a valid CSRF token or a personal API key."""
def process_view(self, request, callback, callback_args, callback_kwargs):
result = super().process_view(
request, callback, callback_args, callback_kwargs
) # None if request accepted
# if super().process_view did not find a valid CSRF token, try looking for a personal API key
if (
result is not None
and PersonalAPIKeyAuthentication.find_key_with_source(request) is not None
):
return self._accept(request)
return result
def _accept(self, request):
request.csrf_processing_done = True
return None
# Work around cloudflare by default caching csv files
class CsvNeverCacheMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if request.path.endswith("csv"):
add_never_cache_headers(response)
return response
class AutoProjectMiddleware:
"""Automatic switching of the user's current project to that of the item being accessed if possible.
Sometimes you get sent a link to PostHog that points to an item from a different project than the one you currently
are in. With this middleware, if you have access to the target project, you are seamlessly switched to it,
instead of seeing a 404 eror.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request: HttpRequest):
if request.user.is_authenticated:
target_queryset = self.get_target_queryset(request)
if target_queryset is not None:
self.switch_team_if_needed_and_possible(request, target_queryset)
response = self.get_response(request)
return response
def get_target_queryset(self, request: HttpRequest) -> Optional[QuerySet]:
path_parts = request.path.strip("/").split("/")
# Sync the paths with urls.ts!
if len(path_parts) >= 2:
if path_parts[0] == "dashboard":
dashboard_id = path_parts[1]
if dashboard_id.isnumeric():
return Dashboard.objects.filter(deleted=False, id=dashboard_id)
elif path_parts[0] == "insights":
insight_short_id = path_parts[1]
return Insight.objects.filter(deleted=False, short_id=insight_short_id)
elif path_parts[0] == "feature_flags":
feature_flag_id = path_parts[1]
if feature_flag_id.isnumeric():
return FeatureFlag.objects.filter(deleted=False, id=feature_flag_id)
elif path_parts[0] == "action":
action_id = path_parts[1]
if action_id.isnumeric():
return Action.objects.filter(deleted=False, id=action_id)
elif path_parts[0] == "cohorts":
cohort_id = path_parts[1]
if cohort_id.isnumeric():
return Cohort.objects.filter(deleted=False, id=cohort_id)
return None
def switch_team_if_needed_and_possible(
self, request: HttpRequest, target_queryset: QuerySet
):
user = cast(User, request.user)
current_team = user.team
if (
current_team is not None
and not target_queryset.filter(team=current_team).exists()
):
actual_item = target_queryset.only("team").select_related("team").first()
if actual_item is not None:
actual_item_team = cast(Team, actual_item.team)
user_permissions = UserPermissions(user)
# :KLUDGE: This is more inefficient than needed, doing several expensive lookups
# However this should be a rare operation!
if (
user_permissions.team(actual_item_team).effective_membership_level
is not None
):
user.current_team = actual_item_team
user.team = user.current_team # Update cached property
user.current_organization_id = actual_item_team.organization_id
user.save()
# Information for POSTHOG_APP_CONTEXT
request.switched_team = current_team.id # type: ignore
class CHQueries:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request: HttpRequest):
"""Install monkey-patch on demand.
If monkey-patch has not been run in for this process (assuming multiple preforked processes),
then do it now.
"""
route = resolve(request.path)
route_id = f"{route.route} ({route.func.__name__})"
user = cast(User, request.user)
tag_queries(
user_id=user.pk,
kind="request",
id=request.path,
route_id=route.route,
client_query_id=self._get_param(request, "client_query_id"),
session_id=self._get_param(request, "session_id"),
container_hostname=settings.CONTAINER_HOSTNAME,
http_referer=request.META.get("HTTP_REFERER"),
http_user_agent=request.META.get("HTTP_USER_AGENT"),
)
if hasattr(user, "current_team_id") and user.current_team_id:
tag_queries(team_id=user.current_team_id)
try:
response: HttpResponse = self.get_response(request)
if "api/" in request.path and "capture" not in request.path:
statsd.incr(
"http_api_request_response",
tags={"id": route_id, "status_code": response.status_code},
)
return response
finally:
reset_query_tags()
def _get_param(self, request: HttpRequest, name: str):
if name in request.GET:
return request.GET[name]
if name in request.POST:
return request.POST[name]
return None
class QueryTimeCountingMiddleware:
ALLOW_LIST_ROUTES = [
"dashboard",
"insight",
"property_definitions",
"properties",
"person",
]
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request: HttpRequest):
if not (
settings.CAPTURE_TIME_TO_SEE_DATA
and "api" in request.path
and any(key in request.path for key in self.ALLOW_LIST_ROUTES)
):
return self.get_response(request)
pg_query_counter, ch_query_counter = QueryCounter(), QueryCounter()
start_time = time.perf_counter()
with connection.execute_wrapper(pg_query_counter), clickhouse_query_counter(
ch_query_counter
):
response: HttpResponse = self.get_response(request)
response.headers["Server-Timing"] = self._construct_header(
django=time.perf_counter() - start_time,
pg=pg_query_counter.query_time_ms,
ch=ch_query_counter.query_time_ms,
)
return response
def _construct_header(self, **kwargs):
return ", ".join(
f"{key};dur={round(duration)}" for key, duration in kwargs.items()
)
def shortcircuitmiddleware(f):
"""view decorator, the sole purpose to is 'rename' the function
'_shortcircuitmiddleware'"""
def _shortcircuitmiddleware(*args, **kwargs):
return f(*args, **kwargs)
return _shortcircuitmiddleware
class ShortCircuitMiddleware:
def __init__(self, get_response):
self.get_response = get_response
self.decide_throttler = DecideRateThrottle(
replenish_rate=settings.DECIDE_BUCKET_REPLENISH_RATE,
bucket_capacity=settings.DECIDE_BUCKET_CAPACITY,
)
def __call__(self, request: HttpRequest):
if request.path == "/decide/" or request.path == "/decide":
try:
# :KLUDGE: Manually tag ClickHouse queries as CHMiddleware is skipped
tag_queries(
kind="request",
id=request.path,
route_id=resolve(request.path).route,
container_hostname=settings.CONTAINER_HOSTNAME,
http_referer=request.META.get("HTTP_REFERER"),
http_user_agent=request.META.get("HTTP_USER_AGENT"),
)
if self.decide_throttler.allow_request(request, None):
return get_decide(request)
else:
return cors_response(
request,
generate_exception_response(
"decide",
f"Rate limit exceeded ",
code="rate_limit_exceeded",
status_code=status.HTTP_429_TOO_MANY_REQUESTS,
),
)
finally:
reset_query_tags()
response: HttpResponse = self.get_response(request)
return response
class CaptureMiddleware:
"""
Middleware to serve up capture responses. We specifically want to avoid
doing any unnecessary work in these endpoints as they are hit very
frequently, and we want to provide the best availability possible, which
translates to keeping dependencies to a minimum.
"""
def __init__(self, get_response):
self.get_response = get_response
middlewares: List[Any] = []
# based on how we're using these middlewares, only middlewares that
# have a process_request and process_response attribute can be valid here.
# Or, middlewares that inherit from `middleware.util.deprecation.MiddlewareMixin` which
# reconciles the old style middleware with the new style middleware.
for middleware_class in (
CorsMiddleware,
PrometheusAfterMiddlewareWithTeamIds,
):
try:
# Some middlewares raise MiddlewareNotUsed if they are not
# needed. In this case we want to avoid the default middlewares
# being used.
middlewares.append(middleware_class(get_response=None))
except MiddlewareNotUsed:
pass
# List of middlewares we want to run, that would've been shortcircuited otherwise
self.CAPTURE_MIDDLEWARE = middlewares
if STATSD_HOST is not None:
# import here to avoid log-spew about failure to connect to statsd,
# as this connection is created on import
from django_statsd.middleware import StatsdMiddlewareTimer
self.CAPTURE_MIDDLEWARE.append(StatsdMiddlewareTimer())
def __call__(self, request: HttpRequest):
if request.path in (
"/e",
"/e/",
"/s",
"/s/",
"/track",
"/track/",
"/capture",
"/capture/",
"/batch",
"/batch/",
"/engage/",
"/engage",
):
try:
# :KLUDGE: Manually tag ClickHouse queries as CHMiddleware is skipped
tag_queries(
kind="request",
id=request.path,
route_id=resolve(request.path).route,
container_hostname=settings.CONTAINER_HOSTNAME,
http_referer=request.META.get("HTTP_REFERER"),
http_user_agent=request.META.get("HTTP_USER_AGENT"),
)
for middleware in self.CAPTURE_MIDDLEWARE:
middleware.process_request(request)
# call process_view for PrometheusAfterMiddleware to get the right metrics in place
# simulate how django prepares the url
resolver_match = resolve(request.path)
request.resolver_match = resolver_match
for middleware in self.CAPTURE_MIDDLEWARE:
middleware.process_view(
request,
resolver_match.func,
resolver_match.args,
resolver_match.kwargs,
)
response: HttpResponse = get_event(request)
for middleware in self.CAPTURE_MIDDLEWARE[::-1]:
middleware.process_response(request, response)
return response
finally:
reset_query_tags()
response = self.get_response(request)
return response
def per_request_logging_context_middleware(
get_response: Callable[[HttpRequest], HttpResponse]
) -> Callable[[HttpRequest], HttpResponse]:
"""
We get some default logging context from the django-structlog middleware,
see
https://django-structlog.readthedocs.io/en/latest/getting_started.html#extending-request-log-metadata
for details. They include e.g. request_id, user_id. In some cases e.g. we
add the team_id to the context like the get_events and decide endpoints.
This middleware adds some additional context at the beggining of the
request. Feel free to add anything that's relevant for the request here.
"""
def middleware(request: HttpRequest) -> HttpResponse:
# Add in the host header, and the x-forwarded-for header if it exists.
# We add these such that we can see if there are any requests on cloud
# that do not use Host header app.posthog.com. This is important as we
# roll out CloudFront in front of app.posthog.com. We can get the host
# header from NGINX, but we really want to have a way to get to the
# team_id given a host header, and we can't do that with NGINX.
structlog.contextvars.bind_contextvars(
host=request.META.get("HTTP_HOST", ""),
x_forwarded_for=request.META.get("HTTP_X_FORWARDED_FOR", ""),
)
return get_response(request)
return middleware
def user_logging_context_middleware(
get_response: Callable[[HttpRequest], HttpResponse]
) -> Callable[[HttpRequest], HttpResponse]:
"""
This middleware adds the team_id to the logging context if it exists. Note
that this should be added after we have performed authentication, as we
need the user to be authenticated to get the team_id.
"""
def middleware(request: HttpRequest) -> HttpResponse:
if request.user.is_authenticated:
structlog.contextvars.bind_contextvars(team_id=request.user.current_team_id)
return get_response(request)
return middleware
PROMETHEUS_EXTENDED_METRICS = [
"django_http_requests_total_by_view_transport_method",
"django_http_responses_total_by_status_view_method",
"django_http_requests_latency_seconds_by_view_method",
]
class CustomPrometheusMetrics(Metrics):
def register_metric(self, metric_cls, name, documentation, labelnames=(), **kwargs):
if name in PROMETHEUS_EXTENDED_METRICS:
labelnames.extend([LABEL_TEAM_ID])
return super().register_metric(
metric_cls, name, documentation, labelnames=labelnames, **kwargs
)
class PrometheusBeforeMiddlewareWithTeamIds(PrometheusBeforeMiddleware):
metrics_cls = CustomPrometheusMetrics
class PrometheusAfterMiddlewareWithTeamIds(PrometheusAfterMiddleware):
metrics_cls = CustomPrometheusMetrics
def label_metric(self, metric, request, response=None, **labels):
new_labels = labels
if metric._name in PROMETHEUS_EXTENDED_METRICS:
team_id = None
if (
request
and getattr(request, "user", None)
and request.user.is_authenticated
):
if request.resolver_match.kwargs.get("parent_lookup_team_id"):
team_id = request.resolver_match.kwargs["parent_lookup_team_id"]
if team_id == "@current":
if hasattr(request.user, "current_team_id"):
team_id = request.user.current_team_id
else:
team_id = None
new_labels = {LABEL_TEAM_ID: team_id}
new_labels.update(labels)
return super().label_metric(metric, request, response=response, **new_labels)
class PostHogTokenCookieMiddleware(SessionMiddleware):
"""
Adds two secure cookies to enable auto-filling the current project token on the docs.
"""
def process_response(self, request, response):
response = super().process_response(request, response)
if not is_cloud():
return response
# skip adding the cookie on API requests
split_request_path = request.path.split("/")
if (
len(split_request_path)
and split_request_path[1] in cookie_api_paths_to_ignore
):
return response
if request.path.startswith("/logout"):
# clears the cookies that were previously set, except for ph_current_instance as that is used for the website login button
response.delete_cookie(
"ph_current_project_token", domain=default_cookie_options["domain"]
)
response.delete_cookie(
"ph_current_project_name", domain=default_cookie_options["domain"]
)
if request.user and request.user.is_authenticated and request.user.team:
response.set_cookie(
key="ph_current_project_token",
value=request.user.team.api_token,
max_age=365 * 24 * 60 * 60,
expires=default_cookie_options["expires"],
path=default_cookie_options["path"],
domain=default_cookie_options["domain"],
secure=default_cookie_options["secure"],
samesite=default_cookie_options["samesite"],
)
response.set_cookie(
key="ph_current_project_name", # clarify which project is active (orgs can have multiple projects)
value=request.user.team.name.encode("utf-8").decode("latin-1"),
max_age=365 * 24 * 60 * 60,
expires=default_cookie_options["expires"],
path=default_cookie_options["path"],
domain=default_cookie_options["domain"],
secure=default_cookie_options["secure"],
samesite=default_cookie_options["samesite"],
)
response.set_cookie(
key="ph_current_instance",
value=SITE_URL,
max_age=365 * 24 * 60 * 60,
expires=default_cookie_options["expires"],
path=default_cookie_options["path"],
domain=default_cookie_options["domain"],
secure=default_cookie_options["secure"],
samesite=default_cookie_options["samesite"],
)
return response
|
migrations | 0006_auto_20230512_0902 | # Generated by Django 3.2.19 on 2023-05-12 09:02
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
(
"mobile_app",
"0005_mobileappusersettings_important_notification_volume_override",
),
]
operations = [
migrations.AddField(
model_name="mobileappusersettings",
name="info_notification_sound_name",
field=models.CharField(default="default_sound", max_length=100, null=True),
),
migrations.AddField(
model_name="mobileappusersettings",
name="info_notification_volume",
field=models.FloatField(
default=0.8,
null=True,
validators=[
django.core.validators.MinValueValidator(0.0),
django.core.validators.MaxValueValidator(1.0),
],
),
),
migrations.AddField(
model_name="mobileappusersettings",
name="info_notification_volume_override",
field=models.BooleanField(default=False, null=True),
),
migrations.AddField(
model_name="mobileappusersettings",
name="info_notification_volume_type",
field=models.CharField(
choices=[("constant", "Constant"), ("intensifying", "Intensifying")],
default="constant",
max_length=50,
null=True,
),
),
]
|
hogql | base | import re
from dataclasses import dataclass, field
from typing import Literal, Optional
from posthog.hogql.constants import ConstantDataType
from posthog.hogql.errors import NotImplementedException
# Given a string like "CorrectHorseBS", match the "H" and "B", so that we can convert this to "correct_horse_bs"
camel_case_pattern = re.compile(r"(?<!^)(?<![A-Z])(?=[A-Z])")
@dataclass(kw_only=True)
class AST:
start: Optional[int] = field(default=None)
end: Optional[int] = field(default=None)
def accept(self, visitor):
camel_case_name = camel_case_pattern.sub("_", self.__class__.__name__).lower()
method_name = f"visit_{camel_case_name}"
if hasattr(visitor, method_name):
visit = getattr(visitor, method_name)
return visit(self)
if hasattr(visitor, "visit_unknown"):
return visitor.visit_unknown(self)
raise NotImplementedException(f"Visitor has no method {method_name}")
@dataclass(kw_only=True)
class Type(AST):
def get_child(self, name: str) -> "Type":
raise NotImplementedException("Type.get_child not overridden")
def has_child(self, name: str) -> bool:
return self.get_child(name) is not None
def resolve_constant_type(self) -> Optional["ConstantType"]:
return UnknownType()
@dataclass(kw_only=True)
class Expr(AST):
type: Optional[Type] = field(default=None)
@dataclass(kw_only=True)
class CTE(Expr):
"""A common table expression."""
name: str
expr: Expr
# Whether the CTE is an inlined column "WITH 1 AS a" or a subquery "WITH a AS (SELECT 1)"
cte_type: Literal["column", "subquery"]
@dataclass(kw_only=True)
class ConstantType(Type):
data_type: ConstantDataType
def resolve_constant_type(self) -> "ConstantType":
return self
def print_type(self) -> str:
raise NotImplementedException("ConstantType.print_type not implemented")
@dataclass(kw_only=True)
class UnknownType(ConstantType):
data_type: ConstantDataType = field(default="unknown", init=False)
def print_type(self) -> str:
return "Unknown"
|
scenarios | scenario_step | import importlib
import logging
import typing
from apps.slack.alert_group_slack_service import AlertGroupSlackService
from apps.slack.client import SlackClient
if typing.TYPE_CHECKING:
from apps.slack.models import SlackTeamIdentity, SlackUserIdentity
from apps.slack.types import EventPayload
from apps.user_management.models import Organization, User
logger = logging.getLogger(__name__)
class ScenarioStep(object):
def __init__(
self,
slack_team_identity: "SlackTeamIdentity",
organization: typing.Optional["Organization"] = None,
user: typing.Optional["User"] = None,
):
self._slack_client = SlackClient(slack_team_identity)
self.slack_team_identity = slack_team_identity
self.organization = organization
self.user = user
self.alert_group_slack_service = AlertGroupSlackService(
slack_team_identity, self._slack_client
)
def process_scenario(
self,
slack_user_identity: "SlackUserIdentity",
slack_team_identity: "SlackTeamIdentity",
payload: "EventPayload",
) -> None:
pass
@classmethod
def routing_uid(cls) -> str:
return cls.__name__
@classmethod
def get_step(cls, scenario: str, step: str) -> "ScenarioStep":
"""
This is a dynamic Step loader to avoid circular dependencies in scenario files
"""
# Just in case circular dependencies will be an issue again, this may help:
# https://stackoverflow.com/posts/36442015/revisions
try:
module = importlib.import_module("apps.slack.scenarios." + scenario)
return getattr(module, step)
except ImportError as e:
raise Exception(
"Check import spelling! Scenario: {}, Step:{}, Error: {}".format(
scenario, step, e
)
)
def open_warning_window(
self, payload: "EventPayload", warning_text: str, title: str | None = None
) -> None:
if title is None:
title = ":warning: Warning"
view = {
"type": "modal",
"callback_id": "warning",
"title": {
"type": "plain_text",
"text": title,
},
"close": {
"type": "plain_text",
"text": "Ok",
"emoji": True,
},
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": warning_text,
},
},
],
}
self._slack_client.views_open(trigger_id=payload["trigger_id"], view=view)
|
extractor | syfy | from __future__ import unicode_literals
from ..utils import smuggle_url, update_url_query
from .adobepass import AdobePassIE
class SyfyIE(AdobePassIE):
_VALID_URL = r"https?://(?:www\.)?syfy\.com/(?:[^/]+/)?videos/(?P<id>[^/?#]+)"
_TESTS = [
{
"url": "http://www.syfy.com/theinternetruinedmylife/videos/the-internet-ruined-my-life-season-1-trailer",
"info_dict": {
"id": "2968097",
"ext": "mp4",
"title": "The Internet Ruined My Life: Season 1 Trailer",
"description": "One tweet, one post, one click, can destroy everything.",
"uploader": "NBCU-MPAT",
"upload_date": "20170113",
"timestamp": 1484345640,
},
"params": {
# m3u8 download
"skip_download": True,
},
"add_ie": ["ThePlatform"],
}
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
syfy_mpx = list(
self._parse_json(
self._search_regex(
r"jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);",
webpage,
"drupal settings",
),
display_id,
)["syfy"]["syfy_mpx"].values()
)[0]
video_id = syfy_mpx["mpxGUID"]
title = syfy_mpx["episodeTitle"]
query = {
"mbr": "true",
"manifest": "m3u",
}
if syfy_mpx.get("entitlement") == "auth":
resource = self._get_mvpd_resource(
"syfy", title, video_id, syfy_mpx.get("mpxRating", "TV-14")
)
query["auth"] = self._extract_mvpd_auth(url, video_id, "syfy", resource)
return {
"_type": "url_transparent",
"ie_key": "ThePlatform",
"url": smuggle_url(
update_url_query(
self._proto_relative_url(syfy_mpx["releaseURL"]), query
),
{"force_smil_url": True},
),
"title": title,
"id": video_id,
"display_id": display_id,
}
|
examples | chirp_channelize | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import sys
import time
import numpy
from gnuradio import blocks, filter, gr
from gnuradio.fft import window
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write(
"Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n"
)
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 200000 # number of samples to use
self._fs = 9000 # initial sampling rate
self._M = 9 # Number of channels to channelize
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(
1, self._fs, 500, 20, attenuation_dB=10, window=window.WIN_BLACKMAN_hARRIS
)
# Calculate the number of taps per channel for our own information
tpc = numpy.ceil(float(len(self._taps)) / float(self._M))
print("Number of taps: ", len(self._taps))
print("Number of channels: ", self._M)
print("Taps per channel: ", tpc)
repeated = True
if repeated:
self.vco_input = analog.sig_source_f(
self._fs, analog.GR_SIN_WAVE, 0.25, 110
)
else:
amp = 100
data = numpy.arange(0, amp, amp / float(self._N))
self.vco_input = blocks.vector_source_f(data, False)
# Build a VCO controlled by either the sinusoid or single chirp tone
# Then convert this to a complex signal
self.vco = blocks.vco_f(self._fs, 225, 1)
self.f2c = blocks.float_to_complex()
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.vco_input, self.vco, self.f2c)
self.connect(self.f2c, self.head, self.pfb)
self.connect(self.f2c, self.snk_i)
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in range(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print("Run time: %f" % (tend - tstart))
if 1:
fig_in = pylab.figure(1, figsize=(16, 9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16, 9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16, 9), facecolor="w")
fig3 = pylab.figure(4, figsize=(16, 9), facecolor="w")
Ns = 650
Ne = 20000
fftlen = 8192
winfunc = numpy.blackman
fs = tb._fs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X, freq = mlab.psd(
d,
NFFT=fftlen,
noverlap=fftlen / 4,
Fs=fs,
window=lambda d: d * winfunc(fftlen),
scale_by_freq=True,
)
X_in = 10.0 * numpy.log10(abs(numpy.fft.fftshift(X)))
f_in = numpy.arange(-fs / 2.0, fs / 2.0, fs / float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in) + 1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0 / fs
Tmax = len(d) * Ts
t_in = numpy.arange(0, Tmax, Ts)
x_in = numpy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(numpy.floor(numpy.sqrt(tb._M)))
Nrows = int(numpy.floor(tb._M / Ncols))
if tb._M % Ncols != 0:
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs / tb._M
Ts_o = 1.0 / fs_o
Tmax_o = len(d) * Ts_o
for i in range(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1 + i)
X, freq = mlab.psd(
d,
NFFT=fftlen,
noverlap=fftlen / 4,
Fs=fs_o,
window=lambda d: d * winfunc(fftlen),
scale_by_freq=True,
)
X_o = 10.0 * numpy.log10(abs(X))
f_o = freq
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o) + 1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = numpy.array(d)
t_o = numpy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1 + i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o) + 1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
sp3 = fig3.add_subplot(1, 1, 1)
p3 = sp3.plot(t_o, x_o.real)
sp3.set_xlim([min(t_o), max(t_o) + 1])
sp3.set_ylim([-2, 2])
sp3.set_title("All Channels")
sp3.set_xlabel("Time (s)")
sp3.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
uic-files | bookmark_manager_dialog_ui | # Form implementation generated from reading ui file './forms/bookmark_manager_dialog.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from . import main_window_view_rc
class Ui_Bookmark_Dialog(object):
def setupUi(self, Bookmark_Dialog):
Bookmark_Dialog.setObjectName("Bookmark_Dialog")
Bookmark_Dialog.setWindowModality(QtCore.Qt.ApplicationModal)
Bookmark_Dialog.resize(776, 441)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Bookmark_Dialog.sizePolicy().hasHeightForWidth())
Bookmark_Dialog.setSizePolicy(sizePolicy)
icon = QtGui.QIcon()
icon.addPixmap(
QtGui.QPixmap(":/others/pynocchio.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off
)
Bookmark_Dialog.setWindowIcon(icon)
Bookmark_Dialog.setLocale(
QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates)
)
self.verticalLayout_3 = QtWidgets.QVBoxLayout(Bookmark_Dialog)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.bookmark_table = QtWidgets.QTableView(Bookmark_Dialog)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Expanding
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.bookmark_table.sizePolicy().hasHeightForWidth()
)
self.bookmark_table.setSizePolicy(sizePolicy)
self.bookmark_table.setAutoFillBackground(False)
self.bookmark_table.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.bookmark_table.setAutoScroll(True)
self.bookmark_table.setAutoScrollMargin(9)
self.bookmark_table.setEditTriggers(
QtWidgets.QAbstractItemView.AnyKeyPressed
| QtWidgets.QAbstractItemView.EditKeyPressed
| QtWidgets.QAbstractItemView.SelectedClicked
)
self.bookmark_table.setTabKeyNavigation(True)
self.bookmark_table.setProperty("showDropIndicator", False)
self.bookmark_table.setDragDropOverwriteMode(False)
self.bookmark_table.setAlternatingRowColors(True)
self.bookmark_table.setSelectionMode(
QtWidgets.QAbstractItemView.ContiguousSelection
)
self.bookmark_table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.bookmark_table.setShowGrid(True)
self.bookmark_table.setSortingEnabled(True)
self.bookmark_table.setWordWrap(True)
self.bookmark_table.setObjectName("bookmark_table")
self.horizontalLayout.addWidget(self.bookmark_table)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
spacerItem = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
self.verticalLayout_2.addItem(spacerItem)
self.page_image_label = QtWidgets.QLabel(Bookmark_Dialog)
self.page_image_label.setText("")
self.page_image_label.setPixmap(QtGui.QPixmap(":/icons/pynocchio_icon.png"))
self.page_image_label.setScaledContents(True)
self.page_image_label.setAlignment(QtCore.Qt.AlignCenter)
self.page_image_label.setObjectName("page_image_label")
self.verticalLayout_2.addWidget(self.page_image_label)
self.page_preview_label = QtWidgets.QLabel(Bookmark_Dialog)
self.page_preview_label.setEnabled(True)
font = QtGui.QFont()
font.setItalic(True)
self.page_preview_label.setFont(font)
self.page_preview_label.setLocale(
QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates)
)
self.page_preview_label.setAlignment(QtCore.Qt.AlignCenter)
self.page_preview_label.setObjectName("page_preview_label")
self.verticalLayout_2.addWidget(self.page_preview_label)
spacerItem1 = QtWidgets.QSpacerItem(
20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding
)
self.verticalLayout_2.addItem(spacerItem1)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.line_edit_path = QtWidgets.QLineEdit(Bookmark_Dialog)
self.line_edit_path.setReadOnly(True)
self.line_edit_path.setObjectName("line_edit_path")
self.verticalLayout_3.addWidget(self.line_edit_path)
self.grid_layout = QtWidgets.QGridLayout()
self.grid_layout.setObjectName("grid_layout")
self.button_remove = QtWidgets.QPushButton(Bookmark_Dialog)
self.button_remove.setEnabled(False)
self.button_remove.setFocusPolicy(QtCore.Qt.NoFocus)
self.button_remove.setLocale(
QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates)
)
icon1 = QtGui.QIcon()
icon1.addPixmap(
QtGui.QPixmap(":/icons/icons/edit-delete.png"),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
self.button_remove.setIcon(icon1)
self.button_remove.setDefault(False)
self.button_remove.setObjectName("button_remove")
self.grid_layout.addWidget(self.button_remove, 1, 1, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(
40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum
)
self.grid_layout.addItem(spacerItem2, 1, 2, 1, 1)
self.button_cancel = QtWidgets.QPushButton(Bookmark_Dialog)
icon2 = QtGui.QIcon()
icon2.addPixmap(
QtGui.QPixmap(
":/icons/elementary3-icon-theme/actions/48/dialog-cancel.svg"
),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
self.button_cancel.setIcon(icon2)
self.button_cancel.setObjectName("button_cancel")
self.grid_layout.addWidget(self.button_cancel, 1, 3, 1, 1)
self.button_load = QtWidgets.QPushButton(Bookmark_Dialog)
self.button_load.setEnabled(False)
icon3 = QtGui.QIcon()
icon3.addPixmap(
QtGui.QPixmap(":/icons/icons/archive-extract.png"),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
self.button_load.setIcon(icon3)
self.button_load.setDefault(True)
self.button_load.setObjectName("button_load")
self.grid_layout.addWidget(self.button_load, 1, 4, 1, 1)
self.verticalLayout_3.addLayout(self.grid_layout)
self.line_edit_path.raise_()
self.retranslateUi(Bookmark_Dialog)
self.button_cancel.clicked.connect(Bookmark_Dialog.close)
QtCore.QMetaObject.connectSlotsByName(Bookmark_Dialog)
def retranslateUi(self, Bookmark_Dialog):
_translate = QtCore.QCoreApplication.translate
Bookmark_Dialog.setWindowTitle(
_translate("Bookmark_Dialog", "Bookmark manager")
)
self.page_preview_label.setText(_translate("Bookmark_Dialog", "Page Preview"))
self.button_remove.setText(_translate("Bookmark_Dialog", "Remove"))
self.button_cancel.setText(_translate("Bookmark_Dialog", "Cancel"))
self.button_load.setText(_translate("Bookmark_Dialog", "Load"))
|
utils | bytestringio | #!usr/bin/Python
# -*- coding:utf-8 -*-
#仿照StringIO创建的类二进制文件内存读写对象
import os
import sys
from calibre.constants import preferred_encoding
def _complain_ifclosed(closed):
if closed:
raise ValueError, "I/O operation on closed file"
class byteStringIO:
def __init__(self, buf = ''):
self.buf = None # 拼合后的字节数组
self.buflist = [] # 每次写进去的字节数组列表
self.len = 0
self.closed = False
self.pos = 0
def __iter__(self):
return self
def next(self):
_complain_ifclosed(self.closed)
r = self.read(1)
if not r:
raise StopIteration
return r
def close(self):
if not self.closed:
self.closed = True
del self.buflist, self.buf
def seek(self, pos, mode = 0):
_complain_ifclosed(self.closed)
for b in self.buflist: # 先整合list
if not self.buf:
self.buf = b
else:
self.buf += b
self.buflist = []
if mode == 1:
pos += self.pos
elif mode == 2:
pos += self.len
self.pos = max(0, pos)
def tell(self):
"""Return the file's current position."""
_complain_ifclosed(self.closed)
return self.pos
def read(self, n = -1):
_complain_ifclosed(self.closed)
if not s: return
for b in self.buflist: # 先整合list
if not self.buf:
self.buf = b
else:
self.buf += b
self.buflist = []
if n is None or n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def write(self, s):
_complain_ifclosed(self.closed)
if not s: return
if isinstance(s, unicode):
s = s.encode(preferred_encoding)
s = bytearray(s)
spos = self.pos
slen = self.len
if spos == slen:
self.buflist.append(s)
self.len = self.pos = spos + len(s)
return
if spos > slen:
self.buflist.append('\0'*(spos - slen))
slen = spos
newpos = spos + len(s)
if spos < slen:
for b in self.buflist: # 先整合list
if not self.buf:
self.buf = b
else:
self.buf += b
self.buflist = [self.buf[:spos], s, self.buf[newpos:]]
self.buf = ''
if newpos > slen:
slen = newpos
else:
self.buflist.append(s)
slen = newpos
self.len = slen
self.pos = newpos
def flush(self):
_complain_ifclosed(self.closed)
def getvalue(self):
_complain_ifclosed(self.closed)
for b in self.buflist: # 先整合list
if not self.buf:
self.buf = b
else:
self.buf += b
self.buflist = []
return self.buf
|
nyaa | template_utils | import functools
import os.path
import re
from datetime import datetime
from email.utils import formatdate
import flask
from nyaa.backend import get_category_id_map
from nyaa.torrents import create_magnet
from werkzeug.urls import url_encode
app = flask.current_app
bp = flask.Blueprint("template-utils", __name__)
_static_cache = {} # For static_cachebuster
# ######################## CONTEXT PROCESSORS ########################
# For processing ES links
@bp.app_context_processor
def create_magnet_from_es_torrent():
# Since ES entries look like ducks, we can use the create_magnet as-is
return dict(create_magnet_from_es_torrent=create_magnet)
# ######################### TEMPLATE GLOBALS #########################
flask_url_for = flask.url_for
@functools.lru_cache(maxsize=1024 * 4)
def _caching_url_for(endpoint, **values):
return flask_url_for(endpoint, **values)
@bp.app_template_global()
def caching_url_for(*args, **kwargs):
try:
# lru_cache requires the arguments to be hashable.
# Majority of the time, they are! But there are some small edge-cases,
# like our copypasted pagination, parameters can be lists.
# Attempt caching first:
return _caching_url_for(*args, **kwargs)
except TypeError:
# Then fall back to the original url_for.
# We could convert the lists to tuples, but the savings are marginal.
return flask_url_for(*args, **kwargs)
@bp.app_template_global()
def static_cachebuster(filename):
"""Adds a ?t=<mtime> cachebuster to the given path, if the file exists.
Results are cached in memory and persist until app restart!"""
# Instead of timestamps, we could use commit hashes (we already load it in __init__)
# But that'd mean every static resource would get cache busted. This lets unchanged items
# stay in the cache.
if app.debug:
# Do not bust cache on debug (helps debugging)
return flask.url_for("static", filename=filename)
# Get file mtime if not already cached.
if filename not in _static_cache:
file_path = os.path.join(app.static_folder, filename)
file_mtime = None
if os.path.exists(file_path):
file_mtime = int(os.path.getmtime(file_path))
_static_cache[filename] = file_mtime
return flask.url_for("static", filename=filename, t=_static_cache[filename])
@bp.app_template_global()
def modify_query(**new_values):
args = flask.request.args.copy()
args.pop("p", None)
for key, value in new_values.items():
args[key] = value
return "{}?{}".format(flask.request.path, url_encode(args))
@bp.app_template_global()
def filter_truthy(input_list):
"""Jinja2 can't into list comprehension so this is for
the search_results.html template"""
return [item for item in input_list if item]
@bp.app_template_global()
def category_name(cat_id):
"""Given a category id (eg. 1_2), returns a category name (eg. Anime - English-translated)"""
return " - ".join(get_category_id_map().get(cat_id, ["???"]))
# ######################### TEMPLATE FILTERS #########################
@bp.app_template_filter("utc_time")
def get_utc_timestamp(datetime_str):
"""Returns a UTC POSIX timestamp, as seconds"""
UTC_EPOCH = datetime.utcfromtimestamp(0)
return int(
(
datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%S") - UTC_EPOCH
).total_seconds()
)
@bp.app_template_filter("utc_timestamp")
def get_utc_timestamp_seconds(datetime_instance):
"""Returns a UTC POSIX timestamp, as seconds"""
UTC_EPOCH = datetime.utcfromtimestamp(0)
return int((datetime_instance - UTC_EPOCH).total_seconds())
@bp.app_template_filter("display_time")
def get_display_time(datetime_str):
return datetime.strptime(datetime_str, "%Y-%m-%dT%H:%M:%S").strftime(
"%Y-%m-%d %H:%M"
)
@bp.app_template_filter("rfc822")
def _jinja2_filter_rfc822(date, fmt=None):
return formatdate(date.timestamp())
@bp.app_template_filter("rfc822_es")
def _jinja2_filter_rfc822_es(datestr, fmt=None):
return formatdate(datetime.strptime(datestr, "%Y-%m-%dT%H:%M:%S").timestamp())
@bp.app_template_filter()
def timesince(dt, default="just now"):
"""
Returns string representing "time since" e.g.
3 minutes ago, 5 hours ago etc.
Date and time (UTC) are returned if older than 1 day.
"""
now = datetime.utcnow()
diff = now - dt
periods = (
(diff.days, "day", "days"),
(diff.seconds / 3600, "hour", "hours"),
(diff.seconds / 60, "minute", "minutes"),
(diff.seconds, "second", "seconds"),
)
if diff.days >= 1:
return dt.strftime("%Y-%m-%d %H:%M UTC")
else:
for period, singular, plural in periods:
if period >= 1:
return "%d %s ago" % (period, singular if int(period) == 1 else plural)
return default
@bp.app_template_filter()
def regex_replace(s, find, replace):
"""A non-optimal implementation of a regex filter"""
return re.sub(find, replace, s)
|
models | schedule_export_auth_token | import typing
from apps.auth_token import constants, crypto
from apps.auth_token.models.base_auth_token import BaseAuthToken
from apps.schedules.models import OnCallSchedule
from apps.user_management.models import Organization, User
from django.db import models
class ScheduleExportAuthToken(BaseAuthToken):
objects: models.Manager["ScheduleExportAuthToken"]
class Meta:
unique_together = ("user", "organization", "schedule")
user = models.ForeignKey(
to=User,
null=False,
blank=False,
related_name="schedule_export_token",
on_delete=models.CASCADE,
)
organization = models.ForeignKey(
to=Organization,
null=False,
blank=False,
related_name="schedule_export_token",
on_delete=models.CASCADE,
)
schedule = models.ForeignKey(
to=OnCallSchedule,
null=True,
blank=True,
related_name="schedule_export_token",
on_delete=models.CASCADE,
)
active = models.BooleanField(default=True)
@classmethod
def create_auth_token(
cls,
user: User,
organization: Organization,
schedule: typing.Optional[OnCallSchedule] = None,
) -> typing.Tuple["ScheduleExportAuthToken", str]:
token_string = crypto.generate_schedule_token_string()
digest = crypto.hash_token_string(token_string)
instance = cls.objects.create(
token_key=token_string[: constants.TOKEN_KEY_LENGTH],
digest=digest,
user=user,
organization=organization,
schedule=schedule,
)
return instance, token_string
# Insight logs
@property
def insight_logs_type_verbal(self):
return "schedule_export_token"
@property
def insight_logs_verbal(self):
return f"Schedule export token for {self.schedule.insight_logs_verbal}"
@property
def insight_logs_serialized(self):
# Schedule export tokens are not modifiable, return empty dict to implement InsightLoggable interface
return {}
@property
def insight_logs_metadata(self):
return {}
|
extractor | ninegag | from __future__ import unicode_literals
from ..utils import (
ExtractorError,
determine_ext,
int_or_none,
try_get,
unescapeHTML,
url_or_none,
)
from .common import InfoExtractor
class NineGagIE(InfoExtractor):
IE_NAME = "9gag"
_VALID_URL = r"https?://(?:www\.)?9gag\.com/gag/(?P<id>[^/?&#]+)"
_TESTS = [
{
"url": "https://9gag.com/gag/ae5Ag7B",
"info_dict": {
"id": "ae5Ag7B",
"ext": "mp4",
"title": "Capybara Agility Training",
"upload_date": "20191108",
"timestamp": 1573237208,
"categories": ["Awesome"],
"tags": ["Weimaraner", "American Pit Bull Terrier"],
"duration": 44,
"like_count": int,
"dislike_count": int,
"comment_count": int,
},
},
{
# HTML escaped title
"url": "https://9gag.com/gag/av5nvyb",
"only_matching": True,
},
]
def _real_extract(self, url):
post_id = self._match_id(url)
post = self._download_json(
"https://9gag.com/v1/post", post_id, query={"id": post_id}
)["data"]["post"]
if post.get("type") != "Animated":
raise ExtractorError(
"The given url does not contain a video", expected=True
)
title = unescapeHTML(post["title"])
duration = None
formats = []
thumbnails = []
for key, image in (post.get("images") or {}).items():
image_url = url_or_none(image.get("url"))
if not image_url:
continue
ext = determine_ext(image_url)
image_id = key.strip("image")
common = {
"url": image_url,
"width": int_or_none(image.get("width")),
"height": int_or_none(image.get("height")),
}
if ext in ("jpg", "png"):
webp_url = image.get("webpUrl")
if webp_url:
t = common.copy()
t.update(
{
"id": image_id + "-webp",
"url": webp_url,
}
)
thumbnails.append(t)
common.update(
{
"id": image_id,
"ext": ext,
}
)
thumbnails.append(common)
elif ext in ("webm", "mp4"):
if not duration:
duration = int_or_none(image.get("duration"))
common["acodec"] = "none" if image.get("hasAudio") == 0 else None
for vcodec in ("vp8", "vp9", "h265"):
c_url = image.get(vcodec + "Url")
if not c_url:
continue
c_f = common.copy()
c_f.update(
{
"format_id": image_id + "-" + vcodec,
"url": c_url,
"vcodec": vcodec,
}
)
formats.append(c_f)
common.update(
{
"ext": ext,
"format_id": image_id,
}
)
formats.append(common)
self._sort_formats(formats)
section = try_get(post, lambda x: x["postSection"]["name"])
tags = None
post_tags = post.get("tags")
if post_tags:
tags = []
for tag in post_tags:
tag_key = tag.get("key")
if not tag_key:
continue
tags.append(tag_key)
get_count = lambda x: int_or_none(post.get(x + "Count"))
return {
"id": post_id,
"title": title,
"timestamp": int_or_none(post.get("creationTs")),
"duration": duration,
"formats": formats,
"thumbnails": thumbnails,
"like_count": get_count("upVote"),
"dislike_count": get_count("downVote"),
"comment_count": get_count("comments"),
"age_limit": 18 if post.get("nsfw") == 1 else None,
"categories": [section] if section else None,
"tags": tags,
}
|
tunnel | tunnel_component | from ipv8.dht.provider import DHTCommunityProvider
from ipv8.messaging.anonymization.community import TunnelSettings
from tribler.core.components.bandwidth_accounting.bandwidth_accounting_component import (
BandwidthAccountingComponent,
)
from tribler.core.components.component import Component
from tribler.core.components.ipv8.ipv8_component import INFINITE, Ipv8Component
from tribler.core.components.libtorrent.libtorrent_component import LibtorrentComponent
from tribler.core.components.socks_servers.socks_servers_component import (
SocksServersComponent,
)
from tribler.core.components.tunnel.community.discovery import GoldenRatioStrategy
from tribler.core.components.tunnel.community.tunnel_community import (
TriblerTunnelCommunity,
TriblerTunnelTestnetCommunity,
)
class TunnelsComponent(Component):
community: TriblerTunnelCommunity = None
_ipv8_component: Ipv8Component = None
async def run(self):
await super().run()
self._ipv8_component = await self.require_component(Ipv8Component)
dht_discovery_community = self._ipv8_component.dht_discovery_community
bandwidth_component = await self.get_component(BandwidthAccountingComponent)
bandwidth_community = (
bandwidth_component.community if bandwidth_component else None
)
download_component = await self.get_component(LibtorrentComponent)
download_manager = (
download_component.download_manager if download_component else None
)
socks_servers_component = await self.get_component(SocksServersComponent)
socks_servers = (
socks_servers_component.socks_servers if socks_servers_component else None
)
settings = TunnelSettings()
config = self.session.config
settings.min_circuits = config.tunnel_community.min_circuits
settings.max_circuits = config.tunnel_community.max_circuits
if config.general.testnet or config.tunnel_community.testnet:
tunnel_cls = TriblerTunnelTestnetCommunity
else:
tunnel_cls = TriblerTunnelCommunity
provider = (
DHTCommunityProvider(dht_discovery_community, config.ipv8.port)
if dht_discovery_community
else None
)
exitnode_cache = config.state_dir / "exitnode_cache.dat"
# TODO: decouple bandwidth community and dlmgr to initiate later
self.community = tunnel_cls(
self._ipv8_component.peer,
self._ipv8_component.ipv8.endpoint,
self._ipv8_component.ipv8.network,
socks_servers=socks_servers,
config=config.tunnel_community,
notifier=self.session.notifier,
dlmgr=download_manager,
bandwidth_community=bandwidth_community,
dht_provider=provider,
exitnode_cache=exitnode_cache,
settings=settings,
)
self._ipv8_component.initialise_community_by_default(self.community)
self._ipv8_component.ipv8.add_strategy(
self.community, GoldenRatioStrategy(self.community), INFINITE
)
async def shutdown(self):
await super().shutdown()
if self._ipv8_component and self.community:
await self._ipv8_component.unload_community(self.community)
|
model | package_relationship | # encoding: utf-8
from __future__ import annotations
from typing import Any, Optional, cast
import ckan.model.core as core
import ckan.model.domain_object as domain_object
import ckan.model.meta as meta
import ckan.model.package as _package
import ckan.model.types as _types
from ckan.model import package as _package
from ckan.types import Query
from sqlalchemy import Column, ForeignKey, Table, orm, types
from typing_extensions import Self
# i18n only works when this is run as part of pylons,
# which isn't the case for paster commands.
try:
from ckan.common import _
_()
except:
def _(*args: Any, **kwargs: Any) -> str:
return args[0]
__all__ = ["PackageRelationship", "package_relationship_table"]
package_relationship_table = Table(
"package_relationship",
meta.metadata,
Column("id", types.UnicodeText, primary_key=True, default=_types.make_uuid),
Column("subject_package_id", types.UnicodeText, ForeignKey("package.id")),
Column("object_package_id", types.UnicodeText, ForeignKey("package.id")),
Column("type", types.UnicodeText),
Column("comment", types.UnicodeText),
Column("state", types.UnicodeText, default=core.State.ACTIVE),
)
class PackageRelationship(core.StatefulObjectMixin, domain_object.DomainObject):
"""The rule with PackageRelationships is that they are stored in the model
always as the "forward" relationship - i.e. "child_of" but never
as "parent_of". However, the model functions provide the relationships
from both packages in the relationship and the type is swapped from
forward to reverse accordingly, for meaningful display to the user."""
id: str
subject_package_id: str
object_package_id: str
type: str
comment: str
state: str
object: _package.Package
subject: _package.Package
all_types: Optional[list[str]]
fwd_types: Optional[list[str]]
rev_types: Optional[list[str]]
# List of (type, corresponding_reverse_type)
# e.g. (A "depends_on" B, B has a "dependency_of" A)
# don't forget to add specs to Solr's schema.xml
types: list[tuple[str, str]] = [
("depends_on", "dependency_of"),
("derives_from", "has_derivation"),
("links_to", "linked_from"),
("child_of", "parent_of"),
]
types_printable: list[tuple[str, str]] = [
(_("depends on %s"), _("is a dependency of %s")),
(_("derives from %s"), _("has derivation %s")),
(_("links to %s"), _("is linked from %s")),
(_("is a child of %s"), _("is a parent of %s")),
]
inferred_types_printable: dict[str, str] = {"sibling": _("has sibling %s")}
def __repr__(self):
return "<%sPackageRelationship %s %s %s>" % (
"*" if cast(str, self.active) != core.State.ACTIVE else "",
self.subject.name,
self.type,
self.object.name,
)
def as_dict(
self, package: Optional[_package.Package] = None, ref_package_by: str = "id"
) -> dict[str, str]:
"""Returns full relationship info as a dict from the point of view
of the given package if specified.
e.g. {'subject':u'annakarenina',
'type':u'depends_on',
'object':u'warandpeace',
'comment':u'Since 1843'}"""
subject_pkg = self.subject
object_pkg = self.object
relationship_type = self.type
if package and package == object_pkg:
subject_pkg = self.object
object_pkg = self.subject
relationship_type = self.forward_to_reverse_type(self.type)
subject_ref: str = getattr(subject_pkg, ref_package_by)
object_ref: str = getattr(object_pkg, ref_package_by)
return {
"subject": subject_ref,
"type": relationship_type,
"object": object_ref,
"comment": self.comment,
}
def as_tuple(self, package: _package.Package) -> tuple[str, _package.Package]:
"""Returns basic relationship info as a tuple from the point of view
of the given package with the object package object.
e.g. rel.as_tuple(warandpeace) gives (u'depends_on', annakarenina)
meaning warandpeace depends_on annakarenina."""
assert isinstance(package, _package.Package), package
if self.subject == package:
type_str = self.type
other_package = self.object
elif self.object == package:
type_str = self.forward_to_reverse_type(self.type)
other_package = self.subject
else:
# FIXME do we want a more specific error
raise Exception(
"Package %s is not in this relationship: %s" % (package, self)
)
return (type_str, other_package)
@classmethod
def by_subject(cls, package: _package.Package) -> Query[Self]:
return meta.Session.query(cls).filter(cls.subject_package_id == package.id)
@classmethod
def by_object(cls, package: _package.Package) -> Query[Self]:
return meta.Session.query(cls).filter(cls.object_package_id == package.id)
@classmethod
def get_forward_types(cls) -> list[str]:
if not hasattr(cls, "fwd_types"):
cls.fwd_types = [fwd for fwd, _rev in cls.types]
assert cls.fwd_types is not None
return cls.fwd_types
@classmethod
def get_reverse_types(cls) -> list[str]:
if not hasattr(cls, "rev_types"):
cls.rev_types = [rev for _fwd, rev in cls.types]
assert cls.rev_types is not None
return cls.rev_types
@classmethod
def get_all_types(cls) -> list[str]:
if not hasattr(cls, "all_types"):
cls.all_types = []
for fwd, rev in cls.types:
cls.all_types.append(fwd)
cls.all_types.append(rev)
assert cls.all_types is not None
return cls.all_types
@classmethod
def reverse_to_forward_type(cls, reverse_type: str) -> str:
for fwd, rev in cls.types:
if rev == reverse_type:
return fwd
assert False, f"Relationship {reverse_type} is not registered"
@classmethod
def forward_to_reverse_type(cls, forward_type: str) -> str:
for fwd, rev in cls.types:
if fwd == forward_type:
return rev
assert False, f"Relationship {forward_type} is not registered"
@classmethod
def reverse_type(cls, forward_or_reverse_type: str) -> str:
for fwd, rev in cls.types:
if fwd == forward_or_reverse_type:
return rev
if rev == forward_or_reverse_type:
return fwd
assert False, f"Relationship {forward_or_reverse_type} is not registered"
@classmethod
def make_type_printable(cls, type_: str) -> str:
for i, types in enumerate(cls.types):
for j in range(2):
if type_ == types[j]:
return cls.types_printable[i][j]
raise TypeError(type_)
meta.mapper(
PackageRelationship,
package_relationship_table,
properties={
"subject": orm.relation(
_package.Package,
primaryjoin=package_relationship_table.c["subject_package_id"]
== _package.Package.id,
backref="relationships_as_subject",
),
"object": orm.relation(
_package.Package,
primaryjoin=package_relationship_table.c["object_package_id"]
== _package.Package.id,
backref="relationships_as_object",
),
},
)
|
versions | 325b88f66322_ | """empty message
Revision ID: 325b88f66322
Revises: dc8c1be855c
Create Date: 2015-01-10 21:28:10.493601
"""
# revision identifiers, used by Alembic.
revision = "325b88f66322"
down_revision = "dc8c1be855c"
import sqlalchemy as sa
from alembic import op
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
"users",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("email", sa.String(length=50), nullable=True),
sa.Column("password", sa.String(length=100), nullable=True),
sa.Column("upgraded", sa.Boolean(), nullable=True),
sa.Column("registered_on", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_users_email"), "users", ["email"], unique=True)
op.add_column("forms", sa.Column("owner_id", sa.Integer(), nullable=True))
op.create_foreign_key(None, "forms", "users", ["owner_id"], ["id"])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "forms", type_="foreignkey")
op.drop_column("forms", "owner_id")
op.drop_index(op.f("ix_users_email"), table_name="users")
op.drop_table("users")
### end Alembic commands ###
|
models | custom_on_call_shift | import copy
import datetime
import itertools
import logging
import typing
from calendar import monthrange
from uuid import uuid4
import pytz
from apps.schedules.tasks import (
drop_cached_ical_task,
refresh_ical_final_schedule,
schedule_notify_about_empty_shifts_in_schedule,
schedule_notify_about_gaps_in_schedule,
)
from apps.user_management.models import User
from common.public_primary_keys import (
generate_public_primary_key,
increase_public_primary_key_length,
)
from dateutil import relativedelta
from django.conf import settings
from django.core.validators import MinLengthValidator
from django.db import models, transaction
from django.db.models import JSONField
from django.forms.models import model_to_dict
from django.utils import timezone
from django.utils.functional import cached_property
from icalendar.cal import Event
from recurring_ical_events import UnfoldableCalendar
if typing.TYPE_CHECKING:
from apps.schedules.models import OnCallSchedule
from django.db.models.manager import RelatedManager
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def generate_public_primary_key_for_custom_oncall_shift():
prefix = "O"
new_public_primary_key = generate_public_primary_key(prefix)
failure_counter = 0
while CustomOnCallShift.objects.filter(
public_primary_key=new_public_primary_key
).exists():
new_public_primary_key = increase_public_primary_key_length(
failure_counter=failure_counter,
prefix=prefix,
model_name="CustomOnCallShift",
)
failure_counter += 1
return new_public_primary_key
class CustomOnCallShift(models.Model):
parent_shift: typing.Optional["CustomOnCallShift"]
schedules: "RelatedManager['OnCallSchedule']"
(
FREQUENCY_DAILY,
FREQUENCY_WEEKLY,
FREQUENCY_MONTHLY,
FREQUENCY_HOURLY,
) = range(4)
FREQUENCY_CHOICES = (
(FREQUENCY_HOURLY, "Hourly"),
(FREQUENCY_DAILY, "Daily"),
(FREQUENCY_WEEKLY, "Weekly"),
(FREQUENCY_MONTHLY, "Monthly"),
)
PUBLIC_FREQUENCY_CHOICES_MAP = {
FREQUENCY_HOURLY: "hourly",
FREQUENCY_DAILY: "daily",
FREQUENCY_WEEKLY: "weekly",
FREQUENCY_MONTHLY: "monthly",
}
WEB_FREQUENCY_CHOICES_MAP = {
FREQUENCY_HOURLY: "hours",
FREQUENCY_DAILY: "days",
FREQUENCY_WEEKLY: "weeks",
FREQUENCY_MONTHLY: "months",
}
(
TYPE_SINGLE_EVENT,
TYPE_RECURRENT_EVENT,
TYPE_ROLLING_USERS_EVENT,
TYPE_OVERRIDE,
) = range(4)
TYPE_CHOICES = (
(TYPE_SINGLE_EVENT, "Single event"),
(TYPE_RECURRENT_EVENT, "Recurrent event"),
(TYPE_ROLLING_USERS_EVENT, "Rolling users"),
(TYPE_OVERRIDE, "Override"),
)
PUBLIC_TYPE_CHOICES_MAP = {
TYPE_SINGLE_EVENT: "single_event",
TYPE_RECURRENT_EVENT: "recurrent_event",
TYPE_ROLLING_USERS_EVENT: "rolling_users",
TYPE_OVERRIDE: "override",
}
WEB_TYPES = (
TYPE_ROLLING_USERS_EVENT,
TYPE_OVERRIDE,
)
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
WEEKDAY_CHOICES = (
(MONDAY, "Monday"),
(TUESDAY, "Tuesday"),
(WEDNESDAY, "Wednesday"),
(THURSDAY, "Thursday"),
(FRIDAY, "Friday"),
(SATURDAY, "Saturday"),
(SUNDAY, "Sunday"),
)
ICAL_WEEKDAY_MAP = {
MONDAY: "MO",
TUESDAY: "TU",
WEDNESDAY: "WE",
THURSDAY: "TH",
FRIDAY: "FR",
SATURDAY: "SA",
SUNDAY: "SU",
}
ICAL_WEEKDAY_REVERSE_MAP = {v: k for k, v in ICAL_WEEKDAY_MAP.items()}
WEB_WEEKDAY_MAP = {
"MO": "Monday",
"TU": "Tuesday",
"WE": "Wednesday",
"TH": "Thursday",
"FR": "Friday",
"SA": "Saturday",
"SU": "Sunday",
}
(
SOURCE_WEB,
SOURCE_API,
SOURCE_SLACK,
SOURCE_TERRAFORM,
) = range(4)
SOURCE_CHOICES = (
(SOURCE_WEB, "web"),
(SOURCE_API, "api"),
(SOURCE_SLACK, "slack"),
(SOURCE_TERRAFORM, "terraform"),
)
public_primary_key = models.CharField(
max_length=20,
validators=[MinLengthValidator(settings.PUBLIC_PRIMARY_KEY_MIN_LENGTH + 1)],
unique=True,
default=generate_public_primary_key_for_custom_oncall_shift,
)
organization = models.ForeignKey(
"user_management.Organization",
on_delete=models.CASCADE,
related_name="custom_on_call_shifts",
)
team = models.ForeignKey(
"user_management.Team",
on_delete=models.SET_NULL,
related_name="custom_on_call_shifts",
null=True,
default=None,
)
schedule = models.ForeignKey(
"schedules.OnCallSchedule",
on_delete=models.CASCADE,
related_name="custom_shifts",
null=True,
default=None,
)
name = models.CharField(max_length=200, null=True, default=None)
time_zone = models.CharField(max_length=100, null=True, default=None)
source = models.IntegerField(choices=SOURCE_CHOICES, default=SOURCE_API)
users = models.ManyToManyField(
"user_management.User"
) # users in single and recurrent events
rolling_users = JSONField(
null=True, default=None
) # [{user.pk: user.public_primary_key, ...},...]
start_rotation_from_user_index = models.PositiveIntegerField(
null=True, default=None
)
uuid = models.UUIDField(default=uuid4) # event uuid
type = models.IntegerField(
choices=TYPE_CHOICES
) # "rolling_users", "recurrent_event", "single_event", "override"
start = models.DateTimeField() # event start datetime
duration = models.DurationField() # duration in seconds
rotation_start = (
models.DateTimeField()
) # used for calculation users rotation and rotation start date
frequency = models.IntegerField(choices=FREQUENCY_CHOICES, null=True, default=None)
priority_level = models.IntegerField(default=0)
interval = models.IntegerField(
default=None, null=True
) # every n days/months - ical format
until = models.DateTimeField(
default=None, null=True
) # if set, when recurrence ends
# week_start in ical format
week_start = models.IntegerField(
choices=WEEKDAY_CHOICES, default=SUNDAY
) # for weekly events
by_day = JSONField(
default=None, null=True
) # [] BYDAY - (MO, TU); (1MO, -2TU) - for monthly and weekly freq - ical format
by_month = JSONField(
default=None, null=True
) # [] BYMONTH - what months (1, 2, 3, ...) - ical format
by_monthday = JSONField(
default=None, null=True
) # [] BYMONTHDAY - what days of month (1, 2, -3) - ical format
updated_shift = models.OneToOneField(
"schedules.CustomOnCallShift",
on_delete=models.SET_NULL,
default=None,
null=True,
related_name="parent_shift",
)
def delete(self, *args, **kwargs):
schedules_to_update = list(self.schedules.all())
if self.schedule:
schedules_to_update.append(self.schedule)
force = kwargs.pop("force", False)
# do soft delete for started shifts that were created for web schedule
if self.schedule and self.event_is_started and not force:
updated_until = timezone.now().replace(microsecond=0)
if self.until is not None and updated_until >= self.until:
# event is already finished
return
self.until = updated_until
update_fields = ["until"]
if self.type == self.TYPE_OVERRIDE:
# since it is a single-time event, update override duration
delta = self.until - self.start
if delta < self.duration:
self.duration = delta
update_fields += ["duration"]
self.save(update_fields=update_fields)
elif self.schedule:
# for web schedule shifts to be hard-deleted, update the rotation updated_shift links
previous_shift = self.schedule.custom_shifts.filter(
updated_shift=self
).first()
super().delete(*args, **kwargs)
if previous_shift:
previous_shift.updated_shift = self.updated_shift
previous_shift.save(update_fields=["updated_shift"])
else:
super().delete(*args, **kwargs)
for schedule in schedules_to_update:
self.start_drop_ical_and_check_schedule_tasks(schedule)
@property
def repr_settings_for_client_side_logging(self) -> str:
"""
Example of execution:
name: Demo recurrent event, team: example, source: terraform, type: Recurrent event, users: Alex,
start: 2020-09-10T16:00:00+00:00, duration: 3:00:00, priority level: 0, frequency: Weekly, interval: 2,
week start: 6, by day: ['MO', 'WE', 'FR'], by month: None, by monthday: None
"""
if self.type == CustomOnCallShift.TYPE_ROLLING_USERS_EVENT:
users_verbal = "empty"
if self.rolling_users is not None:
users_verbal = ""
for users_dict in self.rolling_users:
users = self.organization.users.filter(
public_primary_key__in=users_dict.values()
)
users_verbal += f"[{', '.join([user.username for user in users])}]"
users_verbal = f"rolling users: {users_verbal}"
else:
users = self.users.all()
users_verbal = (
f"{', '.join([user.username for user in users]) if users else 'empty'}"
)
result = (
f"name: {self.name}, team: {self.team.name if self.team else 'No team'},"
f"{f' time_zone: {self.time_zone},' if self.time_zone else ''} "
f"source: {self.get_source_display()}, type: {self.get_type_display()}, users: {users_verbal}, "
f"start: {self.start.isoformat()}, duration: {self.duration}, priority level: {self.priority_level}"
)
if self.type not in (
CustomOnCallShift.TYPE_SINGLE_EVENT,
CustomOnCallShift.TYPE_OVERRIDE,
):
result += (
f", frequency: {self.get_frequency_display()}, interval: {self.interval}, "
f"week start: {self.week_start}, by day: {self.by_day}, by month: {self.by_month}, "
f"by monthday: {self.by_monthday}, rotation start: {self.rotation_start.isoformat()}, "
f"until: {self.until.isoformat() if self.until else None}"
)
return result
@property
def event_is_started(self):
return bool(self.rotation_start <= timezone.now())
@property
def event_is_finished(self):
if self.frequency is not None:
is_finished = bool(self.until <= timezone.now()) if self.until else False
else:
is_finished = bool(self.start + self.duration <= timezone.now())
return is_finished
def _daily_by_day_to_ical(self, time_zone, start, users_queue):
"""Create ical weekly shifts to distribute user groups combining daily + by_day.
e.g.
by_day: [WED, FRI]
users_queue: [user_group_1, user_group_2, user_group_3]
will result in the following ical shift rules:
user_group_1, weekly WED interval 3
user_group_2, weekly FRI interval 3
user_group_3, weekly WED interval 3
user_group_1, weekly FRI interval 3
user_group_2, weekly WED interval 3
user_group_3, weekly FRI interval 3
"""
result = ""
# keep tracking of (users, day) combinations, and starting dates for each
combinations = []
starting_dates = []
# we may need to iterate several times over users until we get a seen combination
# use the group index as reference since user groups could repeat in the queue
cycle_user_groups = itertools.cycle(range(len(users_queue)))
orig_start = last_start = start
all_rotations_checked = False
# we need to go through each individual day
day_by_day_rrule = copy.deepcopy(self.event_ical_rules)
day_by_day_rrule["interval"] = 1
for user_group_id in cycle_user_groups:
for i in range(self.interval):
if not start: # means that rotation ends before next event starts
all_rotations_checked = True
break
last_start = start
day = CustomOnCallShift.ICAL_WEEKDAY_MAP[start.weekday()]
# double-check day is valid (when until is set, we may get unexpected days)
if day in self.by_day:
if (user_group_id, day, i) in combinations:
all_rotations_checked = True
break
starting_dates.append(start)
combinations.append((user_group_id, day, i))
# get next event date following the original rule
event_ical = self.generate_ical(
start, 1, None, 1, time_zone, custom_rrule=day_by_day_rrule
)
start = self.get_rotation_date(
event_ical, get_next_date=True, interval=1
)
if all_rotations_checked:
break
week_interval = 1
if orig_start and last_start:
# number of weeks used to cover all combinations
week_interval = ((last_start - orig_start).days // 7) or 1
counter = 1
for (user_group_id, day, _), start in zip(combinations, starting_dates):
users = users_queue[user_group_id]
for user_counter, user in enumerate(users, start=1):
# setup weekly events, for each user group/day combinations,
# setting the right interval and the corresponding day
custom_rrule = copy.deepcopy(self.event_ical_rules)
custom_rrule["freq"] = ["WEEKLY"]
custom_rrule["interval"] = [week_interval]
custom_rrule["byday"] = [day]
custom_event_ical = self.generate_ical(
start,
user_counter,
user,
counter,
time_zone,
custom_rrule=custom_rrule,
)
result += custom_event_ical
counter += 1
return result
def convert_to_ical(self, time_zone="UTC", allow_empty_users=False):
result = ""
# use shift time_zone if it exists, otherwise use schedule or default time_zone
time_zone = self.time_zone if self.time_zone is not None else time_zone
# rolling_users shift converts to several ical events
if self.type in (
CustomOnCallShift.TYPE_ROLLING_USERS_EVENT,
CustomOnCallShift.TYPE_OVERRIDE,
):
# generate initial iCal for counting rotation start date
event_ical = self.generate_ical(self.start)
rotations_created = 0
all_rotation_checked = False
users_queue = self.get_rolling_users()
if not users_queue and not allow_empty_users:
return result
if not users_queue and allow_empty_users:
users_queue = [[None]]
if self.frequency is None:
users_queue = users_queue[:1]
# Get the date of the current rotation
if self.start == self.rotation_start or self.frequency is None:
start = self.start
else:
start = self.get_rotation_date(event_ical)
# Make sure we respect the selected days if any when defining start date
if self.frequency is not None and self.by_day and start is not None:
start_day = CustomOnCallShift.ICAL_WEEKDAY_MAP[start.weekday()]
if start_day not in self.by_day:
# when calculating first start date, make sure to sort days using week_start
sorted_days = [
i % 7 for i in range(self.week_start, self.week_start + 7)
]
selected_days = [
CustomOnCallShift.ICAL_WEEKDAY_REVERSE_MAP[d]
for d in self.by_day
]
expected_start_day = [d for d in sorted_days if d in selected_days][
0
]
delta = (expected_start_day - start.weekday()) % 7
start = start + datetime.timedelta(days=delta)
if self.frequency == CustomOnCallShift.FREQUENCY_DAILY and self.by_day:
result = self._daily_by_day_to_ical(time_zone, start, users_queue)
all_rotation_checked = True
while not all_rotation_checked:
for counter, users in enumerate(users_queue, start=1):
if not start: # means that rotation ends before next event starts
all_rotation_checked = True
break
elif (
self.source == CustomOnCallShift.SOURCE_WEB
and start + self.duration > self.rotation_start
) or start >= self.rotation_start:
# event has already started, generate iCal for each user
for user_counter, user in enumerate(users, start=1):
event_ical = self.generate_ical(
start, user_counter, user, counter, time_zone
)
result += event_ical
rotations_created += 1
else: # generate default iCal to calculate the date for the next rotation
event_ical = self.generate_ical(start)
if rotations_created == len(
users_queue
): # means that we generated iCal for every user group
all_rotation_checked = True
break
# Use the flag 'get_next_date' to get the date of the next rotation
start = self.get_rotation_date(event_ical, get_next_date=True)
else:
for user_counter, user in enumerate(self.users.all(), start=1):
result += self.generate_ical(
self.start, user_counter, user, time_zone=time_zone
)
return result
def generate_ical(
self,
start,
user_counter=0,
user=None,
counter=1,
time_zone="UTC",
custom_rrule=None,
):
event = Event()
event[
"uid"
] = f"oncall-{self.uuid}-PK{self.public_primary_key}-U{user_counter}-E{counter}-S{self.source}"
if user:
event.add("summary", self.get_summary_with_user_for_ical(user))
event.add("dtstart", self.convert_dt_to_schedule_timezone(start, time_zone))
dtend = start + self.duration
if self.until:
dtend = min(dtend, self.until)
event.add("dtend", self.convert_dt_to_schedule_timezone(dtend, time_zone))
event.add("dtstamp", self.rotation_start)
if custom_rrule:
event.add("rrule", custom_rrule)
elif self.event_ical_rules:
event.add("rrule", self.event_ical_rules)
try:
event_in_ical = event.to_ical().decode("utf-8")
except ValueError as e:
logger.warning(f"Cannot convert event with pk {self.pk} to ical: {str(e)}")
event_in_ical = ""
return event_in_ical
def get_summary_with_user_for_ical(self, user: User) -> str:
summary = ""
if self.priority_level > 0:
summary += f"[L{self.priority_level}] "
summary += f"{user.username} "
return summary
def get_rotation_date(self, event_ical, get_next_date=False, interval=None):
"""Get date of the next event (for rolling_users shifts)"""
ONE_DAY = 1
ONE_HOUR = 1
def add_months(year, month, months_add):
"""
Utility method for month calculation. E.g. (2022, 12) + 1 month = (2023, 1)
"""
dt = datetime.datetime.min.replace(
year=year, month=month
) + relativedelta.relativedelta(months=months_add)
return dt.year, dt.month
current_event = Event.from_ical(event_ical)
# take shift interval, not event interval. For rolling_users shift it is not the same.
if interval is None:
interval = self.interval or 1
if "rrule" in current_event:
# when triggering shift previews, there could be no rrule information yet
# (e.g. initial empty weekly rotation has no rrule set)
current_event["rrule"]["INTERVAL"] = interval
current_event_start = current_event["DTSTART"].dt
next_event_start = current_event_start
# Calculate the minimum start date for the next event based on rotation frequency. We don't need to do this
# for the first rotation, because in this case the min start date will be the same as the current event date.
if get_next_date:
if self.frequency == CustomOnCallShift.FREQUENCY_HOURLY:
next_event_start = current_event_start + datetime.timedelta(
hours=ONE_HOUR
)
elif self.frequency == CustomOnCallShift.FREQUENCY_DAILY:
next_event_start = current_event_start + datetime.timedelta(
days=ONE_DAY
)
elif self.frequency == CustomOnCallShift.FREQUENCY_WEEKLY:
DAYS_IN_A_WEEK = 7
# count days before the next week starts
days_for_next_event = (
DAYS_IN_A_WEEK - current_event_start.weekday() + self.week_start
)
if days_for_next_event > DAYS_IN_A_WEEK:
days_for_next_event = days_for_next_event % DAYS_IN_A_WEEK
# count next event start date with respect to event interval
next_event_start = current_event_start + datetime.timedelta(
days=days_for_next_event + DAYS_IN_A_WEEK * (interval - 1)
)
elif self.frequency == CustomOnCallShift.FREQUENCY_MONTHLY:
DAYS_IN_A_MONTH = monthrange(
current_event_start.year, current_event_start.month
)[1]
# count days before the next month starts
days_for_next_event = (
DAYS_IN_A_MONTH - current_event_start.day + ONE_DAY
)
# count next event start date with respect to event interval
for i in range(1, interval):
year, month = add_months(
current_event_start.year, current_event_start.month, i
)
next_month_days = monthrange(year, month)[1]
days_for_next_event += next_month_days
next_event_start = current_event_start + datetime.timedelta(
days=days_for_next_event
)
end_date = None
# get the period for calculating the current rotation end date for long events with frequency weekly and monthly
if self.frequency == CustomOnCallShift.FREQUENCY_WEEKLY:
DAYS_IN_A_WEEK = 7
days_diff = 0
# get the last day of the week with respect to the week_start
if next_event_start.weekday() != self.week_start:
days_diff = (
DAYS_IN_A_WEEK + next_event_start.weekday() - self.week_start
)
days_diff %= DAYS_IN_A_WEEK
end_date = next_event_start + datetime.timedelta(
days=DAYS_IN_A_WEEK - days_diff - ONE_DAY
)
elif self.frequency == CustomOnCallShift.FREQUENCY_MONTHLY:
# get the last day of the month
current_day_number = next_event_start.day
number_of_days = monthrange(next_event_start.year, next_event_start.month)[
1
]
days_diff = number_of_days - current_day_number
end_date = next_event_start + datetime.timedelta(days=days_diff)
next_event = None
# repetitions generate the next event shift according with the recurrence rules
repetitions = UnfoldableCalendar(current_event).RepeatedEvent(
current_event, next_event_start.replace(microsecond=0)
)
for event in repetitions.__iter__():
if (
end_date
): # end_date exists for long events with frequency weekly and monthly
if end_date >= event.start >= next_event_start:
if (
self.source == CustomOnCallShift.SOURCE_WEB
and event.stop > self.rotation_start
) or event.start >= self.rotation_start:
next_event = event
break
elif end_date < event.start:
break
else:
if event.start >= next_event_start:
next_event = event
break
next_event_dt = next_event.start if next_event is not None else next_event_start
if self.until and next_event_dt > self.until:
return
return next_event_dt
def get_last_event_date(self, date):
"""Get start date of the last event before the chosen date"""
assert (
date >= self.start
), "Chosen date should be later or equal to initial event start date"
event_ical = self.generate_ical(self.start)
initial_event = Event.from_ical(event_ical)
# take shift interval, not event interval. For rolling_users shift it is not the same.
interval = self.interval or 1
if "rrule" in initial_event:
# means that shift has frequency
initial_event["rrule"]["INTERVAL"] = interval
initial_event_start = initial_event["DTSTART"].dt
last_event = None
# repetitions generate the next event shift according with the recurrence rules
repetitions = UnfoldableCalendar(initial_event).RepeatedEvent(
initial_event, initial_event_start.replace(microsecond=0)
)
for event in repetitions.__iter__():
if event.start > date:
break
last_event = event
last_event_dt = last_event.start if last_event else initial_event_start
return last_event_dt
@cached_property
def event_ical_rules(self):
# e.g. {'freq': ['WEEKLY'], 'interval': [2], 'byday': ['MO', 'WE', 'FR'], 'wkst': ['SU']}
rules = {}
if self.frequency is not None:
rules["freq"] = [self.get_frequency_display().upper()]
if self.event_interval is not None:
rules["interval"] = [self.event_interval]
if self.by_day:
rules["byday"] = self.by_day
if self.by_month is not None:
rules["bymonth"] = self.by_month
if self.by_monthday is not None:
rules["bymonthday"] = self.by_monthday
if self.week_start is not None:
rules["wkst"] = CustomOnCallShift.ICAL_WEEKDAY_MAP[self.week_start]
if self.until is not None:
# RRULE UNTIL values must be specified in UTC when DTSTART is timezone-aware
rules["until"] = self.convert_dt_to_schedule_timezone(self.until, "UTC")
return rules
@cached_property
def event_interval(self):
if self.type == CustomOnCallShift.TYPE_ROLLING_USERS_EVENT:
if self.rolling_users:
if self.interval is not None:
return self.interval * len(self.rolling_users)
else:
return len(self.rolling_users)
return self.interval
def convert_dt_to_schedule_timezone(self, dt, time_zone):
start_naive = dt.replace(tzinfo=None)
if time_zone and time_zone.lower() == "etc/utc":
# dateutil rrule breaks if Etc/UTC is given
time_zone = "UTC"
return pytz.timezone(time_zone).localize(start_naive, is_dst=None)
def get_rolling_users(self):
from apps.user_management.models import User
all_users_pks = set()
users_queue = []
if self.rolling_users is not None:
# get all users pks from rolling_users field
for users_dict in self.rolling_users:
all_users_pks.update(users_dict.keys())
users = User.objects.filter(pk__in=all_users_pks)
# generate users_queue list with user objects
if self.start_rotation_from_user_index is not None:
rolling_users = (
self.rolling_users[self.start_rotation_from_user_index :]
+ self.rolling_users[: self.start_rotation_from_user_index]
)
else:
rolling_users = self.rolling_users
for users_dict in rolling_users:
users_list = list(users.filter(pk__in=users_dict.keys()))
if users_list:
users_queue.append(users_list)
return users_queue
def add_rolling_users(self, rolling_users_list):
result = []
for users in rolling_users_list:
result.append({user.pk: user.public_primary_key for user in users})
self.rolling_users = result
self.save(update_fields=["rolling_users"])
def get_rotation_user_index(self, date):
START_ROTATION_INDEX = 0
result = START_ROTATION_INDEX
if not self.rolling_users or self.frequency is None:
return START_ROTATION_INDEX
# generate initial iCal for counting rotation start date
event_ical = self.generate_ical(self.start, user_counter=0)
# Get the date of the current rotation
if self.start == self.rotation_start:
start = self.start
else:
start = self.get_rotation_date(event_ical)
if not start or start >= date:
return START_ROTATION_INDEX
# count how many times the rotation was triggered before the selected date
while start or start < date:
start = self.get_rotation_date(event_ical, get_next_date=True)
if not start or start >= date:
break
event_ical = self.generate_ical(start, user_counter=0)
result += 1
result %= len(self.rolling_users)
return result
def refresh_schedule(self):
if not self.schedule:
# only trigger sync-refresh for web-created shifts
return
schedule = self.schedule.get_real_instance()
schedule.refresh_ical_file()
refresh_ical_final_schedule.apply_async((schedule.pk,))
def start_drop_ical_and_check_schedule_tasks(self, schedule):
drop_cached_ical_task.apply_async((schedule.pk,))
schedule_notify_about_empty_shifts_in_schedule.apply_async((schedule.pk,))
schedule_notify_about_gaps_in_schedule.apply_async((schedule.pk,))
@cached_property
def last_updated_shift(self):
last_shift = self.updated_shift
if last_shift is not None:
while last_shift.updated_shift is not None:
last_shift = last_shift.updated_shift
return last_shift
def create_or_update_last_shift(self, data):
now = timezone.now().replace(microsecond=0)
# rotation start date cannot be earlier than now
data["rotation_start"] = max(data["rotation_start"], now)
# prepare dict with params of existing instance with last updates and remove unique and m2m fields from it
shift_to_update = self.last_updated_shift or self
instance_data = model_to_dict(shift_to_update)
fields_to_remove = [
"id",
"public_primary_key",
"uuid",
"users",
"updated_shift",
]
for field in fields_to_remove:
instance_data.pop(field)
instance_data.update(data)
instance_data["schedule"] = self.schedule
instance_data["team"] = self.team
# set new event start date to keep rotation index
if instance_data["start"] == self.start:
instance_data["start"] = self.get_last_event_date(now)
# calculate rotation index to keep user rotation order
start_rotation_from_user_index = self.get_rotation_user_index(now) + (
self.start_rotation_from_user_index or 0
)
if start_rotation_from_user_index >= len(instance_data["rolling_users"]):
start_rotation_from_user_index = 0
instance_data["start_rotation_from_user_index"] = start_rotation_from_user_index
if self.last_updated_shift is None or self.last_updated_shift.event_is_started:
# create new shift
with transaction.atomic():
shift = CustomOnCallShift(**instance_data)
shift.save()
shift_to_update.until = data["rotation_start"]
shift_to_update.updated_shift = shift
shift_to_update.save(update_fields=["until", "updated_shift"])
else:
shift = self.last_updated_shift
for key in instance_data:
setattr(shift, key, instance_data[key])
shift.save(update_fields=list(instance_data))
return shift
# Insight logs
@property
def insight_logs_type_verbal(self):
return "oncall_shift"
@property
def insight_logs_verbal(self):
return self.name
@property
def insight_logs_serialized(self):
users_verbal = []
if self.type == CustomOnCallShift.TYPE_ROLLING_USERS_EVENT:
if self.rolling_users is not None:
for users_dict in self.rolling_users:
users = self.organization.users.filter(
public_primary_key__in=users_dict.values()
)
users_verbal.extend([user.username for user in users])
else:
users = self.users.all()
users_verbal = [user.username for user in users]
result = {
"name": self.name,
"source": self.get_source_display(),
"type": self.get_type_display(),
"users": users_verbal,
"start": self.start.isoformat(),
"duration": self.duration.seconds,
"priority_level": self.priority_level,
}
if self.type not in (
CustomOnCallShift.TYPE_SINGLE_EVENT,
CustomOnCallShift.TYPE_OVERRIDE,
):
result["frequency"] = self.get_frequency_display()
result["interval"] = self.interval
result["week_start"] = self.week_start
result["by_day"] = self.by_day
result["by_month"] = self.by_month
result["by_monthday"] = self.by_monthday
result["rotation_start"] = self.rotation_start.isoformat()
if self.until:
result["until"] = self.until.isoformat()
if self.team:
result["team"] = self.team.name
result["team_id"] = self.team.public_primary_key
else:
result["team"] = "General"
if self.time_zone:
result["time_zone"] = self.time_zone
return result
@property
def insight_logs_metadata(self):
result = {}
if self.team:
result["team"] = self.team.name
result["team_id"] = self.team.public_primary_key
else:
result["team"] = "General"
if self.schedule:
result["schedule"] = self.schedule.insight_logs_verbal
result["schedule_id"] = self.schedule.public_primary_key
return result
|
quodlibet | cli | # Copyright 2014,2016 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import json
import os
from quodlibet import C_, _
from quodlibet.remote import Remote, RemoteError
from quodlibet.util.dprint import print_, print_e
from senf import fsn2text, uri2fsn
def exit_(status=None, notify_startup=False):
"""Call this to abort the startup before any mainloop starts.
notify_startup needs to be true if QL could potentially have been
called from the desktop file.
"""
if notify_startup:
import gi
gi.require_version("Gdk", "3.0")
from gi.repository import Gdk
Gdk.notify_startup_complete()
raise SystemExit(status)
def is_running():
"""If maybe is another instance running"""
return Remote.remote_exists()
def control(command, arg=None, ignore_error=False):
"""Sends command to the existing instance if possible and exits.
Will print any response it gets to stdout.
Does not return except if ignore_error is True and sending
the command failed.
"""
if not is_running():
if ignore_error:
return
exit_(
_("Quod Libet is not running (add '--run' to start it)"),
notify_startup=True,
)
return
message = command
if arg is not None:
message += " " + arg
try:
response = Remote.send_message(message)
except RemoteError as e:
if ignore_error:
return
exit_(str(e), notify_startup=True)
else:
if response is not None:
print_(response, end="", flush=True)
exit_(notify_startup=True)
def process_arguments(argv):
from quodlibet import const, util
from quodlibet.util.path import uri_is_valid
actions = []
controls = [
"next",
"previous",
"play",
"pause",
"play-pause",
"stop",
"hide-window",
"show-window",
"toggle-window",
"focus",
"quit",
"unfilter",
"refresh",
"force-previous",
]
controls_opt = [
"seek",
"repeat",
"query",
"volume",
"filter",
"rating",
"set-browser",
"open-browser",
"shuffle",
"queue",
"stop-after",
"random",
"repeat-type",
"shuffle-type",
"add-location",
]
options = util.OptionParser(
"Quod Libet", const.VERSION, _("a music library and player"), _("[option]")
)
options.add("print-playing", help=_("Print the playing song and exit"))
options.add("start-playing", help=_("Begin playing immediately"))
options.add("start-hidden", help=_("Don't show any windows on start"))
for opt, help in [
("next", _("Jump to next song")),
("previous", _("Jump to previous song or restart if near the beginning")),
("force-previous", _("Jump to previous song")),
("play", _("Start playback")),
("pause", _("Pause playback")),
("play-pause", _("Toggle play/pause mode")),
("stop", _("Stop playback")),
("volume-up", _("Turn up volume")),
("volume-down", _("Turn down volume")),
("rating-up", _("Increase rating of playing song by one star")),
("rating-down", _("Decrease rating of playing song by one star")),
("status", _("Print player status")),
("hide-window", _("Hide main window")),
("show-window", _("Show main window")),
("toggle-window", _("Toggle main window visibility")),
("focus", _("Focus the running player")),
("unfilter", _("Remove active browser filters")),
("refresh", _("Refresh and rescan library")),
("list-browsers", _("List available browsers")),
("print-playlist", _("Print the current playlist")),
("print-queue", _("Print the contents of the queue")),
("print-query-text", _("Print the active text query")),
("no-plugins", _("Start without plugins")),
("run", _("Start Quod Libet if it isn't running")),
("quit", _("Exit Quod Libet")),
]:
options.add(opt, help=help)
for opt, help, arg in [
("seek", _("Seek within the playing song"), _("[+|-][HH:]MM:SS")),
("shuffle", _("Set or toggle shuffle mode"), "0|1|t"),
("shuffle-type", _("Set shuffle mode type"), "random|weighted|off"),
("repeat", _("Turn repeat off, on, or toggle it"), "0|1|t"),
("repeat-type", _("Set repeat mode type"), "current|all|one|off"),
("volume", _("Set the volume"), "[+|-]0..100"),
("query", _("Search your audio library"), _("query")),
("play-file", _("Play a file"), C_("command", "filename")),
("rating", _("Set rating of playing song"), "[+|-]0.0..1.0"),
("set-browser", _("Set the current browser"), "BrowserName"),
("stop-after", _("Stop after the playing song"), "0|1|t"),
("open-browser", _("Open a new browser"), "BrowserName"),
("queue", _("Show or hide the queue"), "on|off|t"),
("random", _("Filter on a random value"), C_("command", "tag")),
("filter", _("Filter on a tag value"), _("tag=value")),
(
"enqueue",
_("Enqueue a file or query"),
"%s|%s" % (C_("command", "filename"), _("query")),
),
(
"enqueue-files",
_("Enqueue comma-separated files"),
"%s[,%s..]" % (_("filename"), _("filename")),
),
("print-query", _("Print filenames of results of query to stdout"), _("query")),
(
"unqueue",
_("Unqueue a file or query"),
"%s|%s" % (C_("command", "filename"), _("query")),
),
("add-location", _("Add a file or directory to the library"), _("location")),
("with-pattern", _("Set template for --print-* commands"), _("pattern")),
]:
options.add(opt, help=help, arg=arg)
options.add("sm-config-prefix", arg="dummy")
options.add("sm-client-id", arg="prefix")
options.add("screen", arg="dummy")
def is_vol(str):
if len(str) == 1 and str[0] in "+-":
return True
return is_float(str)
def is_rate(str):
if len(str) == 1 and str[0] in "+-":
return True
return is_float(str)
def is_time(str):
if str[0] not in "+-0123456789":
return False
elif str[0] in "+-":
str = str[1:]
parts = str.split(":")
if len(parts) > 3:
return False
else:
return not (False in [p.isdigit() for p in parts])
def is_float(str):
try:
float(str)
except ValueError:
return False
else:
return True
validators = {
"shuffle": ["0", "1", "t", "on", "off", "toggle"].__contains__,
"shuffle-type": ["random", "weighted", "off", "0"].__contains__,
"repeat": ["0", "1", "t", "on", "off", "toggle"].__contains__,
"repeat-type": ["current", "all", "one", "off", "0"].__contains__,
"volume": is_vol,
"seek": is_time,
"rating": is_rate,
"stop-after": ["0", "1", "t"].__contains__,
}
cmds_todo = []
def queue(*args):
cmds_todo.append(args)
# XXX: to make startup work in case the desktop file isn't passed
# a file path/uri
if argv[-1] == "--play-file":
argv = argv[:-1]
opts, args = options.parse(argv[1:])
for command, arg in opts.items():
if command in controls:
queue(command)
elif command in controls_opt:
if command in validators and not validators[command](arg):
print_e(_("Invalid argument for '%s'.") % command)
print_e(_("Try %s --help.") % fsn2text(argv[0]))
exit_(True, notify_startup=True)
else:
queue(command, arg)
elif command == "status":
queue("status")
elif command == "print-playlist":
queue("dump-playlist", opts.get("with-pattern"))
elif command == "print-queue":
queue("dump-queue", opts.get("with-pattern"))
elif command == "list-browsers":
queue("dump-browsers")
elif command == "volume-up":
queue("volume +")
elif command == "volume-down":
queue("volume -")
elif command == "rating-up":
queue("rating +")
elif command == "rating-down":
queue("rating -")
elif command == "enqueue" or command == "unqueue":
try:
filename = uri2fsn(arg)
except ValueError:
filename = arg
queue(command, filename)
elif command == "enqueue-files":
queue(command, arg)
elif command == "play-file":
if uri_is_valid(arg) and arg.startswith("quodlibet://"):
# TODO: allow handling of URIs without --play-file
queue("uri-received", arg)
else:
try:
filename = uri2fsn(arg)
except ValueError:
filename = arg
filename = os.path.abspath(os.path.expanduser(arg))
queue("play-file", filename)
elif command == "add-location":
try:
path = uri2fsn(arg)
except ValueError:
path = arg
path = os.path.abspath(os.path.expanduser(arg))
queue("add-location", path)
elif command == "print-playing":
try:
queue("print-playing", args[0])
except IndexError:
queue("print-playing", opts.get("with-pattern"))
elif command == "print-query":
pattern = opts.get("with-pattern")
queue(command, json.dumps({"query": arg, "pattern": pattern}))
elif command == "print-query-text":
queue(command)
elif command == "start-playing":
actions.append(command)
elif command == "start-hidden":
actions.append(command)
elif command == "no-plugins":
actions.append(command)
elif command == "run":
actions.append(command)
if cmds_todo:
for cmd in cmds_todo:
control(*cmd, **{"ignore_error": "run" in actions})
else:
# this will exit if it succeeds
control("focus", ignore_error=True)
return actions, cmds_todo
|
picard | i18n | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2012 Frederik “Freso” S. Olesen
# Copyright (C) 2013-2014, 2018-2022 Laurent Monin
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2017-2022 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import builtins
import gettext
import locale
import os
from picard.const.sys import IS_MACOS, IS_WIN
from PyQt5.QtCore import QLocale
builtins.__dict__["N_"] = lambda a: a
_logger = None
def set_locale_from_env():
"""
Depending on environment, locale.setlocale(locale.LC_ALL, '') can fail.
Returns a string LANG[.ENCODING]
>>> import locale
>>> import os
>>> os.environ['LANG'] = 'buggy'
>>> locale.setlocale(locale.LC_ALL, '')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python3.10/locale.py", line 620, in setlocale
return _setlocale(category, locale)
locale.Error: unsupported locale setting
>>> locale.setlocale(locale.LC_ALL, 'C')
'C'
>>> locale.getlocale(locale.LC_ALL)
(None, None)
>>> os.environ['LANG'] = 'en_US.UTF-8'
>>> locale.setlocale(locale.LC_ALL, '')
'en_US.UTF-8'
>>> locale.getlocale(locale.LC_ALL)
('en_US', 'UTF-8')
"""
try:
current_locale = locale.setlocale(locale.LC_ALL, "")
except locale.Error:
# default to 'C' locale if it couldn't be set from env
current_locale = locale.setlocale(locale.LC_ALL, "C")
_logger("Setting locale from env: %r", current_locale)
return current_locale
if IS_WIN:
from ctypes import windll
def _get_default_locale():
try:
return locale.windows_locale[windll.kernel32.GetUserDefaultUILanguage()]
except KeyError:
return None
elif IS_MACOS:
import Foundation
def _get_default_locale():
defaults = Foundation.NSUserDefaults.standardUserDefaults()
return defaults.objectForKey_("AppleLanguages")[0].replace("-", "_")
else:
def _get_default_locale():
return None
def _try_encodings():
"""Generate encodings to try, starting with preferred encoding if possible"""
preferred_encoding = locale.getpreferredencoding()
if preferred_encoding != "UTF-8":
yield preferred_encoding
yield from ("UTF-8", None)
def _try_locales(language):
"""Try setting the locale from language with preferred/UTF-8/no encoding"""
for encoding in _try_encodings():
if encoding:
yield locale.normalize(language + "." + encoding)
else:
yield language
def _load_translation(domain, localedir, language):
try:
_logger(
"Loading gettext translation for %s, localedir=%r, language=%r",
domain,
localedir,
language,
)
return gettext.translation(domain, localedir, languages=[language])
except OSError as e:
_logger(e)
return gettext.NullTranslations()
def _log_lang_env_vars():
env_vars = []
lc_keys = sorted(k for k in os.environ.keys() if k.startswith("LC_"))
for k in ("LANG", "LANGUAGE") + tuple(lc_keys):
if k in os.environ:
env_vars.append(k + "=" + os.environ[k])
_logger("Env vars: %s", " ".join(env_vars))
def setup_gettext(localedir, ui_language=None, logger=None):
"""Setup locales, load translations, install gettext functions."""
global _logger
if not logger:
_logger = lambda *a, **b: None # noqa: E731
else:
_logger = logger
if ui_language:
_logger("UI language: %r", ui_language)
try_locales = list(_try_locales(ui_language))
else:
_logger("UI language: system")
_log_lang_env_vars()
try_locales = []
default_locale = _get_default_locale()
if default_locale:
try_locales.append(default_locale)
_logger("Trying locales: %r", try_locales)
current_locale = None
for loc in try_locales:
try:
current_locale = locale.setlocale(locale.LC_ALL, loc)
_logger("Set locale to: %r", current_locale)
break
except locale.Error:
_logger("Failed to set locale: %r", loc)
if ui_language:
# UI locale may differ from env, those have to match files in po/
current_locale = ui_language
if current_locale is None:
current_locale = set_locale_from_env()
_logger("Using locale: %r", current_locale)
QLocale.setDefault(QLocale(current_locale))
trans = _load_translation("picard", localedir, language=current_locale)
trans_attributes = _load_translation(
"picard-attributes", localedir, language=current_locale
)
trans_constants = _load_translation(
"picard-constants", localedir, language=current_locale
)
trans_countries = _load_translation(
"picard-countries", localedir, language=current_locale
)
trans.install(["ngettext"])
builtins.__dict__["gettext_attributes"] = trans_attributes.gettext
builtins.__dict__["gettext_constants"] = trans_constants.gettext
builtins.__dict__["gettext_countries"] = trans_countries.gettext
if hasattr(trans_attributes, "pgettext"):
builtins.__dict__["pgettext_attributes"] = trans_attributes.pgettext
else:
def pgettext(context, message):
return gettext_ctxt(trans_attributes.gettext, message, context)
builtins.__dict__["pgettext_attributes"] = pgettext
_logger("_ = %r", _)
_logger("N_ = %r", N_)
_logger("ngettext = %r", ngettext)
_logger("gettext_countries = %r", gettext_countries)
_logger("gettext_attributes = %r", gettext_attributes)
_logger("pgettext_attributes = %r", pgettext_attributes)
# Workaround for po files with msgctxt which isn't supported by Python < 3.8
# gettext
# msgctxt are used within attributes.po, and gettext is failing to translate
# strings due to that
# This workaround is a hack until we get proper msgctxt support
_CONTEXT_SEPARATOR = "\x04"
def gettext_ctxt(gettext_, message, context=None):
if context is None:
return gettext_(message)
msg_with_ctxt = "%s%s%s" % (context, _CONTEXT_SEPARATOR, message)
translated = gettext_(msg_with_ctxt)
if _CONTEXT_SEPARATOR in translated:
# no translation found, return original message
return message
return translated
|
books | nfzm | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from base import BaseFeedBook, URLOpener, string_of_tag
from bs4 import BeautifulSoup
def getBook():
return NFZM
class NFZM(BaseFeedBook):
title = "南方周末"
description = "在这里读懂中国 | 每周五更新 | 需要登录"
__author__ = "mcfloundinho"
language = "zh-cn"
feed_encoding = "utf-8"
page_encoding = "utf-8"
mastheadfile = "mh_nfzm.gif"
coverfile = "cv_nfzm.jpg"
deliver_days = ["Friday"]
needs_subscription = True
def ParseFeedUrls(self):
login_url = "http://passport.infzm.com/passport/login"
content_url = "http://www.infzm.com/enews/infzm"
urls = []
opener = URLOpener(self.host, timeout=60)
login_form = {"loginname": self.account, "password": self.password}
login_response = opener.open(login_url, data=login_form)
# opener.SaveCookies(login_response.header_msg.getheaders('Set-Cookie'))
result = opener.open(content_url)
content = result.content.decode(self.feed_encoding)
soup = BeautifulSoup(content, "lxml")
sec_titles = []
for sec_name in soup.find_all("h2"):
sec_titles.append(sec_name.get_text())
for top_news in soup.find_all("dl", {"class": "topnews"}):
url = top_news.a["href"]
feed_content = opener.open(url).content.decode(self.feed_encoding)
feed_soup = BeautifulSoup(feed_content, "lxml")
urls.append(
(
sec_titles[0],
top_news.a["title"],
url,
feed_soup.find(id="articleContent"),
)
)
sec_count = 0
for sec_content in soup.find_all("ul", {"class": "relnews"}):
for a in sec_content.find_all("a"):
url = a["href"]
feed_content = opener.open(url).content.decode(self.feed_encoding)
feed_soup = BeautifulSoup(feed_content, "lxml")
urls.append(
(
sec_titles[sec_count],
a["title"],
url,
feed_soup.find(id="articleContent"),
)
)
sec_count += 1
return urls
|
saveddata | fighterAbility | # ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from eos.utils.stats import DmgTypes
from logbook import Logger
from sqlalchemy.orm import reconstructor
pyfalog = Logger(__name__)
class FighterAbility:
# We aren't able to get data on the charges that can be stored with fighters. So we hardcode that data here, keyed
# with the fighter squadron role
NUM_SHOTS_MAPPING = {
1: 0, # Superiority fighter / Attack
2: 12, # Light fighter / Attack
4: 6, # Heavy fighter / Heavy attack
5: 3, # Heavy fighter / Long range attack
}
# Same as above
REARM_TIME_MAPPING = {
1: 0, # Superiority fighter / Attack
2: 4000, # Light fighter / Attack
4: 6000, # Heavy fighter / Heavy attack
5: 20000, # Heavy fighter / Long range attack
}
def __init__(self, effect):
"""Initialize from the program"""
self.__effect = effect
self.effectID = effect.ID if effect is not None else None
self.active = False
self.build()
@reconstructor
def init(self):
"""Initialize from the database"""
self.__effect = None
if self.effectID:
self.__effect = next(
(
x
for x in self.fighter.item.effects.values()
if x.ID == self.effectID
),
None,
)
if self.__effect is None:
pyfalog.error("Effect (id: {0}) does not exist", self.effectID)
return
self.build()
def build(self):
pass
@property
def effect(self):
return self.__effect
@property
def name(self):
return self.__effect.getattr("displayName") or self.__effect.name
@property
def attrPrefix(self):
return self.__effect.getattr("prefix")
@property
def dealsDamage(self):
attr = "{}DamageMultiplier".format(self.attrPrefix)
return (
attr in self.fighter.itemModifiedAttributes
or self.fighter.charge is not None
)
@property
def grouped(self):
# is the ability applied per fighter (webs, returns False), or as a group (MWD, returned True)
return self.__effect.getattr("grouped")
@property
def hasCharges(self):
return self.__effect.getattr("hasCharges")
@property
def reloadTime(self):
return self.getReloadTime()
def getReloadTime(self, spentShots=None):
if spentShots is not None:
spentShots = max(self.numShots, spentShots)
else:
spentShots = self.numShots
rearm_time = (
self.REARM_TIME_MAPPING[
self.fighter.getModifiedItemAttr("fighterSquadronRole")
]
or 0
if self.hasCharges
else 0
)
return (
self.fighter.getModifiedItemAttr("fighterRefuelingTime")
+ rearm_time * spentShots
)
@property
def numShots(self):
return (
self.NUM_SHOTS_MAPPING[
self.fighter.getModifiedItemAttr("fighterSquadronRole")
]
or 0
if self.hasCharges
else 0
)
@property
def cycleTime(self):
speed = self.fighter.getModifiedItemAttr("{}Duration".format(self.attrPrefix))
return speed
def getVolley(self, targetProfile=None):
if not self.dealsDamage or not self.active:
return DmgTypes(0, 0, 0, 0)
if self.attrPrefix == "fighterAbilityLaunchBomb":
em = self.fighter.getModifiedChargeAttr("emDamage", 0)
therm = self.fighter.getModifiedChargeAttr("thermalDamage", 0)
kin = self.fighter.getModifiedChargeAttr("kineticDamage", 0)
exp = self.fighter.getModifiedChargeAttr("explosiveDamage", 0)
else:
em = self.fighter.getModifiedItemAttr(
"{}DamageEM".format(self.attrPrefix), 0
)
therm = self.fighter.getModifiedItemAttr(
"{}DamageTherm".format(self.attrPrefix), 0
)
kin = self.fighter.getModifiedItemAttr(
"{}DamageKin".format(self.attrPrefix), 0
)
exp = self.fighter.getModifiedItemAttr(
"{}DamageExp".format(self.attrPrefix), 0
)
dmgMult = self.fighter.amount * self.fighter.getModifiedItemAttr(
"{}DamageMultiplier".format(self.attrPrefix), 1
)
volley = DmgTypes(
em=em * dmgMult * (1 - getattr(targetProfile, "emAmount", 0)),
thermal=therm * dmgMult * (1 - getattr(targetProfile, "thermalAmount", 0)),
kinetic=kin * dmgMult * (1 - getattr(targetProfile, "kineticAmount", 0)),
explosive=exp
* dmgMult
* (1 - getattr(targetProfile, "explosiveAmount", 0)),
)
return volley
def getDps(self, targetProfile=None, cycleTimeOverride=None):
volley = self.getVolley(targetProfile=targetProfile)
if not volley:
return DmgTypes(0, 0, 0, 0)
cycleTime = (
cycleTimeOverride if cycleTimeOverride is not None else self.cycleTime
)
dpsFactor = 1 / (cycleTime / 1000)
dps = DmgTypes(
em=volley.em * dpsFactor,
thermal=volley.thermal * dpsFactor,
kinetic=volley.kinetic * dpsFactor,
explosive=volley.explosive * dpsFactor,
)
return dps
def clear(self):
pass
|
KindleUnpack | mobi_pagemap | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import absolute_import, division, print_function, unicode_literals
from .compatibility_utils import PY2, unicode_str
if PY2:
range = xrange
import re
import struct
# note: struct pack, unpack, unpack_from all require bytestring format
# data all the way up to at least python 2.7.5, python 3 okay with bytestring
# note: re requites the pattern to be the exact same type as the data to be searched in python3
# but u"" is not allowed for the pattern itself only b""
_TABLE = [
("m", 1000),
("cm", 900),
("d", 500),
("cd", 400),
("c", 100),
("xc", 90),
("l", 50),
("xl", 40),
("x", 10),
("ix", 9),
("v", 5),
("iv", 4),
("i", 1),
]
def int_to_roman(i):
parts = []
num = i
for letter, value in _TABLE:
while value <= num:
num -= value
parts.append(letter)
return "".join(parts)
def roman_to_int(s):
result = 0
rnstr = s
for letter, value in _TABLE:
while rnstr.startswith(letter):
result += value
rnstr = rnstr[len(letter) :]
return result
_pattern = r"""\(([^\)]*)\)"""
_tup_pattern = re.compile(_pattern, re.IGNORECASE)
def _parseNames(numpages, data):
data = unicode_str(data)
pagenames = []
pageMap = ""
for i in range(numpages):
pagenames.append(None)
for m in re.finditer(_tup_pattern, data):
tup = m.group(1)
if pageMap != "":
pageMap += ","
pageMap += "(" + tup + ")"
spos, nametype, svalue = tup.split(",")
# print(spos, nametype, svalue)
if nametype == "a" or nametype == "r":
svalue = int(svalue)
spos = int(spos)
for i in range(spos - 1, numpages):
if nametype == "r":
pname = int_to_roman(svalue)
svalue += 1
elif nametype == "a":
pname = "%s" % svalue
svalue += 1
elif nametype == "c":
sp = svalue.find("|")
if sp == -1:
pname = svalue
else:
pname = svalue[0:sp]
svalue = svalue[sp + 1 :]
else:
print("Error: unknown page numbering type", nametype)
pagenames[i] = pname
return pagenames, pageMap
class PageMapProcessor:
def __init__(self, mh, data):
self.mh = mh
self.data = data
self.pagenames = []
self.pageoffsets = []
self.pageMap = ""
self.pm_len = 0
self.pm_nn = 0
self.pn_bits = 0
self.pmoff = None
self.pmstr = ""
print("Extracting Page Map Information")
(rev_len,) = struct.unpack_from(b">L", self.data, 0x10)
# skip over header, revision string length data, and revision string
ptr = 0x14 + rev_len
pm_1, self.pm_len, self.pm_nn, self.pm_bits = struct.unpack_from(
b">4H", self.data, ptr
)
# print(pm_1, self.pm_len, self.pm_nn, self.pm_bits)
self.pmstr = self.data[ptr + 8 : ptr + 8 + self.pm_len]
self.pmoff = self.data[ptr + 8 + self.pm_len :]
offsize = b">L"
offwidth = 4
if self.pm_bits == 16:
offsize = b">H"
offwidth = 2
ptr = 0
for i in range(self.pm_nn):
(od,) = struct.unpack_from(offsize, self.pmoff, ptr)
ptr += offwidth
self.pageoffsets.append(od)
self.pagenames, self.pageMap = _parseNames(self.pm_nn, self.pmstr)
def getPageMap(self):
return self.pageMap
def getNames(self):
return self.pagenames
def getOffsets(self):
return self.pageoffsets
# page-map.xml will be unicode but encoded to utf-8 immediately before being written to a file
def generateKF8PageMapXML(self, k8proc):
pagemapxml = '<page-map xmlns="http://www.idpf.org/2007/opf">\n'
for i in range(len(self.pagenames)):
pos = self.pageoffsets[i]
name = self.pagenames[i]
if name is not None and name != "":
[pn, dir, filename, skelpos, skelend, aidtext] = k8proc.getSkelInfo(pos)
idtext = unicode_str(k8proc.getPageIDTag(pos))
linktgt = unicode_str(filename)
if idtext != "":
linktgt += "#" + idtext
pagemapxml += '<page name="%s" href="%s/%s" />\n' % (name, dir, linktgt)
pagemapxml += "</page-map>\n"
return pagemapxml
def generateAPNX(self, apnx_meta):
if apnx_meta["format"] == "MOBI_8":
content_header = (
'{"contentGuid":"%(contentGuid)s","asin":"%(asin)s","cdeType":"%(cdeType)s","format":"%(format)s","fileRevisionId":"1","acr":"%(acr)s"}'
% apnx_meta
)
else:
content_header = (
'{"contentGuid":"%(contentGuid)s","asin":"%(asin)s","cdeType":"%(cdeType)s","fileRevisionId":"1"}'
% apnx_meta
)
content_header = content_header.encode("utf-8")
page_header = '{"asin":"%(asin)s","pageMap":"%(pageMap)s"}' % apnx_meta
page_header = page_header.encode("utf-8")
apnx = struct.pack(b">H", 1) + struct.pack(b">H", 1)
apnx += struct.pack(b">I", 12 + len(content_header))
apnx += struct.pack(b">I", len(content_header))
apnx += content_header
apnx += struct.pack(b">H", 1)
apnx += struct.pack(b">H", len(page_header))
apnx += struct.pack(b">H", self.pm_nn)
apnx += struct.pack(b">H", 32)
apnx += page_header
for page in self.pageoffsets:
apnx += struct.pack(b">L", page)
return apnx
|
Todo | InfoWindowController | from Cocoa import *
from ToDoDocument import *
NOTIFY_TAG = 0
RESCHEDULE_TAG = 1
NOTES_TAG = 2
NotifyLengthNone = 0
NotifyLengthQuarter = 1
NotifyLengthHour = 2
NotifyLengthDay = 3
NotifyLengthOther = 4
_sharedInfoWindowController = None
class InfoWindowController (NSWindowController):
dummyView = objc.IBOutlet()
infoDate = objc.IBOutlet()
infoItem = objc.IBOutlet()
infoNotes = objc.IBOutlet()
infoNotifyAMPM = objc.IBOutlet()
infoNotifyHour = objc.IBOutlet()
infoNotifyMinute = objc.IBOutlet()
infoNotifyOtherHours = objc.IBOutlet()
infoNotifySwitchMatrix = objc.IBOutlet()
infoPopUp = objc.IBOutlet()
infoSchedComplete = objc.IBOutlet()
infoSchedDate = objc.IBOutlet()
infoSchedMatrix = objc.IBOutlet()
infoWindowViews = objc.IBOutlet()
notesView = objc.IBOutlet()
notifyView = objc.IBOutlet()
reschedView = objc.IBOutlet()
__slots__ = ('_inspectingDocument', )
@objc.IBAction
def switchClicked_(self, sender):
dueSecs = 0
idx = 0
theItem = self._inspectingDocument.selectedItem()
if theItem is None:
return
if sender is self.infoNotifyAMPM:
if self.infoNotifyHour.intValue():
pmFlag = (self.infoNotifyAMPM.selectedRow() == 1)
dueSecs = ConvertTimeToSeconds(
self.infoNotifyHour.intValue(),
self.infoNotifyMinute.intValue(),
pmFlag)
theItem.setSecsUntilDue_(dueSecs)
elif sender is self.infoNotifySwitchMatrix:
idx = self.infoNotifySwitchMatrix.selectedRow()
if not theItem:
pass
elif idx == NotifyLengthNone:
theItem.setSecsUntilNotify_(0)
elif idx == NotifyLengthQuarter:
theItem.setSecsUntilNotify_(SECS_IN_HOUR/4)
elif idx == NotifyLengthHour:
theItem.setSecsUntilNotify_(SECS_IN_HOUR)
elif idx == NotifyLengthDay:
theItem.setSecsUntilNotify_(SECS_IN_DAY)
elif idx == NotifyLengthOther:
theItem.setSecsUntilNotify_(
infoNotifyOtherHours.intValue() *
SECS_IN_HOUR)
else:
NSLog("Error in selectedRow")
elif sender is self.infoSchedComplete:
if theItem:
theItem.setStatus_(COMPLETE)
elif sender is self.infoSchedMatrix:
# left as an exercise in the objective-C code
pass
self.updateInfoWindow()
self._inspectingDocument.selectedItemModified()
def textDidChange_(self, notification):
if notification.object() is self.infoNotes:
self._inspectingDocument.selectedItem().setNotes_(self.infoNotes.string())
self._inspectingDocument.selectItemModified()
def textDidEndEditing_(self, notification):
if notification.object() is self.infoNotes:
self._inspectingDocument.selectedItem().setNotes_(infoNotes.string())
self._inspectingDocument.selectedItemModified()
def controlTextDidEndEditing_(self, notification):
#print "controlTextDidEndEditing:", notification.description()
dueSecs = 0
theItem = self._inspectingDocument.selectedItem()
if theItem is None:
return
if (notification.object() is self.infoNotifyHour) or \
(notification.object() is self.infoNotifyMinute):
dueSecs = ConvertTimeToSeconds(
self.infoNotifyHour.intValue(),
self.infoNotifyMinute.intValue(),
self.infoNotifyAMPM.cellAtRow_column_(1,0).state())
#print "New dueSecs: ", dueSecs
#theItem.setSecsUntilDue_(dueSecs)
elif notification.object() is self.infoNotifyOtherHours:
if self.infoNotifySwitchMatrix.selectedRow() == NotifyLengthOther:
theItem.setSecsUntilNotify_(self.infoNotifyOtherHours.intValue() * SECS_IN_HOUR)
else:
return
elif notification.object() is self.infoSchedDate:
# Left as an exercise
pass
self._inspectingDocument.selectedItemModified()
@classmethod
def sharedInfoWindowController(self):
global _sharedInfoWindowController
if not _sharedInfoWindowController:
_sharedInfoWindowController = InfoWindowController.alloc().init()
return _sharedInfoWindowController
def init(self):
# XXX: Not sure if the native code works correctly if the return value
# from super != self.
self = self.initWithWindowNibName_("ToDoInfoWindow")
if self:
self.setWindowFrameAutosaveName_("Info")
return self
def dump_outlets(self):
print 'dummyView', self.dummyView
print 'infoDate', self.infoDate
print 'infoItem', self.infoItem
print 'infoNotes', self.infoNotes
print 'infoNotifyAMPM', self.infoNotifyAMPM
print 'infoNotifyHour', self.infoNotifyHour
print 'infoNotifyMinute', self.infoNotifyMinute
print 'infoNotifyOtherHours', self.infoNotifyOtherHours
print 'infoNotifySwitchMatrix', self.infoNotifySwitchMatrix
print 'infoPopUp', self.infoPopUp
print 'infoSchedComplet', self.infoSchedComplete
print 'infoSchedDate', self.infoSchedDate
print 'infoSchedMatrix', self.infoSchedMatrix
print 'infoWindowViews', self.infoWindowViews
print 'notesView', self.notesView
print 'notifyView', self.notifyView
print 'reschedView', self.reschedView
def windowDidLoad(self):
NSWindowController.windowDidLoad(self)
# XXX: The calls to retain may not be necessary.
self.notifyView.retain()
self.notifyView.removeFromSuperview()
self.reschedView.retain()
self.reschedView.removeFromSuperview()
self.notesView.retain()
self.notesView.removeFromSuperview()
self.infoWindowViews = None
self.infoNotes.setDelegate_(self)
self.swapInfoWindowView_(self)
self.setMainWindow_(NSApp().mainWindow())
self.updateInfoWindow()
NSNotificationCenter.defaultCenter().addObserver_selector_name_object_(
self, "mainWindowChanged:",
NSWindowDidBecomeMainNotification,
None)
NSNotificationCenter.defaultCenter().addObserver_selector_name_object_(
self, "mainWindowResigned:",
NSWindowDidResignMainNotification,
None)
NSNotificationCenter.defaultCenter().addObserver_selector_name_object_(
self, "selectedItemChanged:",
ToDoItemChangedNotification,
None)
def __del__(self): # dealloc
NSNotificationCenter.defaultCenter().removeObserver_(self)
# Cannot to this
NSWindowController.dealloc(self)
def updateInfoWindow(self):
minute = 0
hour = 0
selected = self.infoPopUp.selectedItem().tag()
selectedItem = self._inspectingDocument.selectedItem()
if isinstance(selectedItem, ToDoItem):
self.infoItem.setStringValue_(selectedItem.itemName())
self.infoDate.setStringValue_(
selectedItem.day().descriptionWithCalendarFormat_timeZone_locale_("%a, %b %d %Y", NSTimeZone.localTimeZone(), None))
if selected == NOTIFY_TAG:
dueSecs = selectedItem.secsUntilDue()
hour, minutes, pmFlag = ConvertSecondsToTime(dueSecs)
self.infoNotifyAMPM.cellAtRow_column_(0, 0).setState_(not pmFlag)
self.infoNotifyAMPM.cellAtRow_column_(1, 0).setState_(pmFlag)
self.infoNotifyHour.setIntValue_(hour)
self.infoNotifyMinute.setIntValue_(minute)
notifySecs = selectedItem.secsUntilNotify()
clearButtonMatrix(self.infoNotifySwitchMatrix)
if notifySecs == 0:
self.infoNotifySwitchMatrix.cellAtRow_column_(NotifyLengthNone, 0).setState_(NSOnState)
elif notifySecs == SECS_IN_HOUR / 4:
self.infoNotifySwitchMatrix.cellAtRow_column_(NotifyLengthQuarter, 0).setState_(NSOnState)
elif notifySecs == SECS_IN_HOUR:
self.infoNotifySwitchMatrix.cellAtRow_column_(NotifyLengthHour, 0).setState_(NSOnState)
elif notifySecs == SECS_IN_DAY:
self.infoNotifySwitchMatrix.cellAtRow_column_(NotifyLengthDay, 0).setState_(NSOnState)
else:
self.infoNotifySwitchMatrix.cellAtRow_column_(NotifyLengthOther, 0).setState_(NSOnState)
self.infoNotifyOtherHours.setIntValue_(notifySecs / SECS_IN_HOUR)
elif selected == RESCHEDULE_TAG:
# left as an exercise
pass
elif selected == NOTES_TAG:
self.infoNotes.setString_(selectedItem.notes())
else:
self.infoItem.setStringValue_("")
self.infoDate.setStringValue_("")
self.infoNotifyHour.setStringValue_("")
self.infoNotifyMinute.setStringValue_("")
self.infoNotifyAMPM.cellAtRow_column_(0, 0).setState_(NSOnState)
self.infoNotifyAMPM.cellAtRow_column_(1, 0).setState_(NSOffState)
clearButtonMatrix(self.infoNotifySwitchMatrix)
self.infoNotifySwitchMatrix.cellAtRow_column_(NotifyLengthNone, 0).setState_(NSOnState)
self.infoNotifyOtherHours.setStringValue_("")
self.infoNotes.setString_("")
def setMainWindow_(self, mainWindow):
if not mainWindow:
return
controller = mainWindow.windowController()
if isinstance(controller.document(), ToDoDocument):
self._inspectingDocument = controller.document()
else:
self._inspectingDocument = None
self.updateInfoWindow()
def mainWindowChanged_(self, notification):
self.setMainWindow_(notification.object())
def mainWindowResigned_(self, notification):
self.setMainWindow_(None)
@objc.IBAction
def swapInfoWindowView_(self, sender):
selected = self.infoPopUp.selectedItem().tag()
if selected == NOTIFY_TAG:
newView = self.notifyView
elif selected == RESCHEDULE_TAG:
newView = self.reschedView
elif selected == NOTES_TAG:
newView = self.notesView
if self.dummyView.contentView() != newView:
self.dummyView.setContentView_(newView)
def selectedItemChanged_(self, notification):
self.updateInfoWindow()
def clearButtonMatrix(matrix):
rows, cols = matrix.getNumberOfRows_columns_()
for i in range(rows):
cell = matrix.cellAtRow_column_(i, 0)
if cell: cell.setState_(False)
|
fitCommands | helpers | import math
import eos.db
import wx
from eos.const import FittingModuleState
from eos.saveddata.booster import Booster
from eos.saveddata.cargo import Cargo
from eos.saveddata.drone import Drone
from eos.saveddata.fighter import Fighter
from eos.saveddata.implant import Implant
from eos.saveddata.module import Module
from logbook import Logger
from service.market import Market
from utils.repr import makeReprStr
pyfalog = Logger(__name__)
class InternalCommandHistory:
def __init__(self):
self.__buffer = wx.CommandProcessor()
def submit(self, command):
return self.__buffer.Submit(command)
def submitBatch(self, *commands):
for command in commands:
if not self.__buffer.Submit(command):
# Undo what we already submitted
for commandToUndo in reversed(self.__buffer.Commands):
if commandToUndo in commands:
self.__buffer.Undo()
return False
return True
def undoAll(self):
undoneCommands = []
# Undo commands one by one, starting from the last
for commandToUndo in reversed(self.__buffer.Commands):
if commandToUndo.Undo():
undoneCommands.append(commandToUndo)
# If undoing fails, redo already undone commands, starting from the last undone
else:
for commandToRedo in reversed(undoneCommands):
if not commandToRedo.Do():
break
self.__buffer.ClearCommands()
return False
self.__buffer.ClearCommands()
return True
def __len__(self):
return len(self.__buffer.Commands)
class ModuleInfo:
def __init__(
self,
itemID,
baseItemID=None,
mutaplasmidID=None,
mutations=None,
chargeID=None,
state=None,
spoolType=None,
spoolAmount=None,
rahPattern=None,
):
self.itemID = itemID
self.baseItemID = baseItemID
self.mutaplasmidID = mutaplasmidID
self.mutations = mutations
self.chargeID = chargeID
self.state = state
self.spoolType = spoolType
self.spoolAmount = spoolAmount
self.rahPattern = rahPattern
@classmethod
def fromModule(cls, mod, unmutate=False):
if mod is None:
return None
if unmutate and mod.isMutated:
info = cls(
itemID=mod.baseItemID,
baseItemID=None,
mutaplasmidID=None,
mutations={},
chargeID=mod.chargeID,
state=mod.state,
spoolType=mod.spoolType,
spoolAmount=mod.spoolAmount,
rahPattern=mod.rahPatternOverride,
)
else:
info = cls(
itemID=mod.itemID,
baseItemID=mod.baseItemID,
mutaplasmidID=mod.mutaplasmidID,
mutations={m.attrID: m.value for m in mod.mutators.values()},
chargeID=mod.chargeID,
state=mod.state,
spoolType=mod.spoolType,
spoolAmount=mod.spoolAmount,
rahPattern=mod.rahPatternOverride,
)
return info
def toModule(self, fallbackState=None):
mkt = Market.getInstance()
item = mkt.getItem(self.itemID, eager=("attributes", "group.category"))
if self.baseItemID and self.mutaplasmidID:
baseItem = mkt.getItem(
self.baseItemID, eager=("attributes", "group.category")
)
mutaplasmid = eos.db.getDynamicItem(self.mutaplasmidID)
else:
baseItem = None
mutaplasmid = None
try:
mod = Module(item, baseItem=baseItem, mutaplasmid=mutaplasmid)
except ValueError:
pyfalog.warning("Invalid item: {}".format(self.itemID))
return None
if self.mutations is not None:
for attrID, mutator in mod.mutators.items():
if attrID in self.mutations:
mutator.value = self.mutations[attrID]
if self.spoolType is not None and self.spoolAmount is not None:
mod.spoolType = self.spoolType
mod.spoolAmount = self.spoolAmount
mod.rahPatternOverride = self.rahPattern
if self.state is not None:
if mod.isValidState(self.state):
mod.state = self.state
else:
mod.state = mod.getMaxState(proposedState=self.state)
elif fallbackState is not None:
if mod.isValidState(fallbackState):
mod.state = fallbackState
if self.chargeID is not None:
charge = mkt.getItem(self.chargeID, eager=("attributes",))
if charge is None:
pyfalog.warning("Cannot set charge {}".format(self.chargeID))
return None
mod.charge = charge
return mod
def __eq__(self, other):
if not isinstance(other, ModuleInfo):
return False
return all(
(
self.itemID == other.itemID,
self.baseItemID == other.baseItemID,
self.mutaplasmidID == other.mutaplasmidID,
self.mutations == other.mutations,
self.chargeID == other.chargeID,
self.state == other.state,
self.spoolType == other.spoolType,
self.spoolAmount == other.spoolAmount,
self.rahPattern == other.rahPattern,
)
)
def __repr__(self):
return makeReprStr(
self,
[
"itemID",
"baseItemID",
"mutaplasmidID",
"mutations",
"chargeID",
"state",
"spoolType",
"spoolAmount",
"rahPattern",
],
)
class DroneInfo:
def __init__(
self,
amount,
amountActive,
itemID,
baseItemID=None,
mutaplasmidID=None,
mutations=None,
):
self.itemID = itemID
self.baseItemID = baseItemID
self.mutaplasmidID = mutaplasmidID
self.mutations = mutations
self.amount = amount
self.amountActive = amountActive
@classmethod
def fromDrone(cls, drone):
if drone is None:
return None
info = cls(
itemID=drone.itemID,
amount=drone.amount,
amountActive=drone.amountActive,
baseItemID=drone.baseItemID,
mutaplasmidID=drone.mutaplasmidID,
mutations={m.attrID: m.value for m in drone.mutators.values()},
)
return info
def toDrone(self):
mkt = Market.getInstance()
item = mkt.getItem(self.itemID, eager=("attributes", "group.category"))
if self.baseItemID and self.mutaplasmidID:
baseItem = mkt.getItem(
self.baseItemID, eager=("attributes", "group.category")
)
mutaplasmid = eos.db.getDynamicItem(self.mutaplasmidID)
else:
baseItem = None
mutaplasmid = None
try:
drone = Drone(item, baseItem=baseItem, mutaplasmid=mutaplasmid)
except ValueError:
pyfalog.warning("Invalid item: {}".format(self.itemID))
return None
if self.mutations is not None:
for attrID, mutator in drone.mutators.items():
if attrID in self.mutations:
mutator.value = self.mutations[attrID]
drone.amount = self.amount
drone.amountActive = self.amountActive
return drone
def __repr__(self):
return makeReprStr(
self,
[
"itemID",
"amount",
"amountActive",
"baseItemID",
"mutaplasmidID",
"mutations",
],
)
class FighterInfo:
def __init__(self, itemID, amount=None, state=None, abilities=None):
self.itemID = itemID
self.amount = amount
self.state = state
self.abilities = abilities
@classmethod
def fromFighter(cls, fighter):
if fighter is None:
return None
info = cls(
itemID=fighter.itemID,
amount=fighter.amount,
state=fighter.active,
abilities={fa.effectID: fa.active for fa in fighter.abilities},
)
return info
def toFighter(self):
item = Market.getInstance().getItem(
self.itemID, eager=("attributes", "group.category")
)
try:
fighter = Fighter(item)
except ValueError:
pyfalog.warning("Invalid item: {}".format(self.itemID))
return None
if self.amount is not None:
fighter.amount = self.amount
if self.state is not None:
fighter.active = self.state
if self.abilities is not None:
for ability in fighter.abilities:
ability.active = self.abilities.get(ability.effectID, ability.active)
return fighter
def __repr__(self):
return makeReprStr(self, ["itemID", "amount", "state", "abilities"])
class ImplantInfo:
def __init__(self, itemID, state=None):
self.itemID = itemID
self.state = state
@classmethod
def fromImplant(cls, implant):
if implant is None:
return None
info = cls(itemID=implant.itemID, state=implant.active)
return info
def toImplant(self):
item = Market.getInstance().getItem(
self.itemID, eager=("attributes", "group.category")
)
try:
implant = Implant(item)
except ValueError:
pyfalog.warning("Invalid item: {}".format(self.itemID))
return None
if self.state is not None:
implant.active = self.state
return implant
def __repr__(self):
return makeReprStr(self, ["itemID", "state"])
class BoosterInfo:
def __init__(self, itemID, state=None, sideEffects=None):
self.itemID = itemID
self.state = state
self.sideEffects = sideEffects
@classmethod
def fromBooster(cls, booster):
if booster is None:
return None
info = cls(
itemID=booster.itemID,
state=booster.active,
sideEffects={se.effectID: se.active for se in booster.sideEffects},
)
return info
def toBooster(self):
item = Market.getInstance().getItem(
self.itemID, eager=("attributes", "group.category")
)
try:
booster = Booster(item)
except ValueError:
pyfalog.warning("Invalid item: {}".format(self.itemID))
return None
if self.state is not None:
booster.active = self.state
if self.sideEffects is not None:
for sideEffect in booster.sideEffects:
sideEffect.active = self.sideEffects.get(
sideEffect.effectID, sideEffect.active
)
return booster
def __repr__(self):
return makeReprStr(self, ["itemID", "state", "sideEffects"])
class CargoInfo:
def __init__(self, itemID, amount):
self.itemID = itemID
self.amount = amount
@classmethod
def fromCargo(cls, cargo):
if cargo is None:
return None
info = cls(itemID=cargo.itemID, amount=cargo.amount)
return info
def toCargo(self):
item = Market.getInstance().getItem(self.itemID)
cargo = Cargo(item)
cargo.amount = self.amount
return cargo
def __repr__(self):
return makeReprStr(self, ["itemID", "amount"])
def activeStateLimit(itemIdentity):
item = Market.getInstance().getItem(itemIdentity)
if {
"moduleBonusAssaultDamageControl",
"moduleBonusIndustrialInvulnerability",
"microJumpDrive",
"microJumpPortalDrive",
"emergencyHullEnergizer",
"cynosuralGeneration",
"jumpPortalGeneration",
"jumpPortalGenerationBO",
"cloneJumpAccepting",
"cloakingWarpSafe",
"cloakingPrototype",
"cloaking",
"massEntanglerEffect5",
"electronicAttributeModifyOnline",
"targetPassively",
"cargoScan",
"shipScan",
"surveyScan",
"targetSpectrumBreakerBonus",
"interdictionNullifierBonus",
"warpCoreStabilizerActive",
"industrialItemCompression",
}.intersection(item.effects):
return FittingModuleState.ONLINE
return FittingModuleState.ACTIVE
def droneStackLimit(fit, itemIdentity):
item = Market.getInstance().getItem(itemIdentity)
hardLimit = max(5, fit.extraAttributes["maxActiveDrones"])
releaseLimit = fit.getReleaseLimitForDrone(item)
limit = min(hardLimit, releaseLimit if releaseLimit > 0 else math.inf)
return limit
def restoreCheckedStates(fit, stateInfo, ignoreModPoss=()):
if stateInfo is None:
return
changedMods, changedProjMods, changedProjDrones = stateInfo
for pos, state in changedMods.items():
if pos in ignoreModPoss:
continue
fit.modules[pos].state = state
for pos, state in changedProjMods.items():
fit.projectedModules[pos].state = state
for pos, amountActive in changedProjDrones.items():
fit.projectedDrones[pos].amountActive = amountActive
def restoreRemovedDummies(fit, dummyInfo):
if dummyInfo is None:
return
# Need this to properly undo the case when removal of subsystems removes dummy slots
for position in sorted(dummyInfo):
slot = dummyInfo[position]
fit.modules.insert(position, Module.buildEmpty(slot))
def getSimilarModPositions(mods, mainMod):
sMkt = Market.getInstance()
mainGroupID = getattr(sMkt.getGroupByItem(mainMod.item), "ID", None)
mainMktGroupID = getattr(sMkt.getMarketGroupByItem(mainMod.item), "ID", None)
mainEffects = set(getattr(mainMod.item, "effects", ()))
positions = []
for position, mod in enumerate(mods):
if mod.isEmpty:
continue
# Always include selected module itself
if mod is mainMod:
positions.append(position)
continue
if mod.itemID is None:
continue
# Modules which have the same item ID
if mod.itemID == mainMod.itemID:
positions.append(position)
continue
# And modules from the same group and market group too
modGroupID = getattr(sMkt.getGroupByItem(mod.item), "ID", None)
modMktGroupID = getattr(sMkt.getMarketGroupByItem(mod.item), "ID", None)
modEffects = set(getattr(mod.item, "effects", ()))
if (
modGroupID is not None
and modGroupID == mainGroupID
and modMktGroupID is not None
and modMktGroupID == mainMktGroupID
and modEffects == mainEffects
):
positions.append(position)
continue
return positions
def getSimilarFighters(fighters, mainFighter):
sMkt = Market.getInstance()
mainGroupID = getattr(sMkt.getGroupByItem(mainFighter.item), "ID", None)
mainAbilityIDs = set(a.effectID for a in mainFighter.abilities)
similarFighters = []
for fighter in fighters:
# Always include selected fighter itself
if fighter is mainFighter:
similarFighters.append(fighter)
continue
if fighter.itemID is None:
continue
# Fighters which have the same item ID
if fighter.itemID == mainFighter.itemID:
similarFighters.append(fighter)
continue
# And fighters from the same group and with the same abilities too
fighterGroupID = getattr(sMkt.getGroupByItem(fighter.item), "ID", None)
fighterAbilityIDs = set(a.effectID for a in fighter.abilities)
if (
fighterGroupID is not None
and fighterGroupID == mainGroupID
and len(fighterAbilityIDs) > 0
and fighterAbilityIDs == mainAbilityIDs
):
similarFighters.append(fighter)
continue
return similarFighters
|
display | navigation | # -*- coding: utf-8 -*-
"""
flaskbb.display.navigation
~~~~~~~~~~~~~~~~~~~~~~~~~~
Helpers to create navigation elements in FlaskBB Templates
:copyright: (c) 2018 the FlaskBB Team
:license: BSD, see LICENSE for more details
"""
from abc import ABC, abstractproperty
from enum import Enum
import attr
__all__ = (
"NavigationContentType",
"NavigationLink",
"NavigationExternalLink",
"NavigationHeader",
"NavigationDivider",
)
class NavigationContentType(Enum):
"""
Content type enum for navigation items.
"""
link = 0
external_link = 1
header = 2
divider = 3
class NavigationItem(ABC):
"""
Abstract NavigationItem class. Not meant for use but provides the common
interface for navigation items.
"""
content_type = abstractproperty(lambda: None)
@attr.s(eq=True, order=True, hash=True, repr=True, frozen=True, slots=True)
class NavigationLink(NavigationItem):
"""
Representation of an internal FlaskBB navigation link::
NavigationLink(
endpoint="user.profile",
name="{}'s Profile".format(user.username),
icon="fa fa-home",
active=False, # default
urlforkwargs={"username": user.username}
)
"""
endpoint = attr.ib()
name = attr.ib()
icon = attr.ib(default="")
active = attr.ib(default=False)
urlforkwargs = attr.ib(factory=dict)
content_type = NavigationContentType.link
@attr.s(eq=True, order=True, hash=True, repr=True, frozen=True, slots=True)
class NavigationExternalLink(NavigationItem):
"""
Representation of an external navigation link::
NavigationExternalLink(
uri="mailto:{}".format(user.email),
name="Email {}".format(user.username),
icon="fa fa-at"
)
"""
uri = attr.ib()
name = attr.ib()
icon = attr.ib(default="")
content_type = NavigationContentType.external_link
@attr.s(eq=True, order=True, hash=True, repr=True, frozen=True, slots=True)
class NavigationHeader(NavigationItem):
"""
Representation of header text shown in a navigation bar::
NavigationHeader(
text="A header",
icon="fa fa-exclamation"
)
"""
text = attr.ib()
icon = attr.ib(default="")
content_type = NavigationContentType.header
@attr.s(eq=True, order=True, hash=True, repr=True, frozen=True, slots=True)
class NavigationDivider(NavigationItem):
"""
Representation of a divider in a navigation bar::
NavigationDivider()
"""
content_type = NavigationContentType.divider
|
extensions | enqueue_in_mediaplayer | # -*- coding: utf-8 -*-
# Extension script to add a context menu item for enqueueing episodes in a player
# Requirements: gPodder 3.x (or "tres" branch newer than 2011-06-08)
# (c) 2011-06-08 Thomas Perl <thp.io/about>
# Released under the same license terms as gPodder itself.
import functools
import logging
import gpodder
from gpodder import util
logger = logging.getLogger(__name__)
_ = gpodder.gettext
__title__ = _("Enqueue/Resume in media players")
__description__ = _(
"Add a context menu item for enqueueing/resuming playback of episodes in installed media players"
)
__authors__ = "Thomas Perl <thp@gpodder.org>, Bernd Schlapsi <brot@gmx.info>"
__doc__ = "https://gpodder.github.io/docs/extensions/enqueueinmediaplayer.html"
__payment__ = "https://flattr.com/submit/auto?user_id=BerndSch&url=http://wiki.gpodder.org/wiki/Extensions/EnqueueInMediaplayer"
__category__ = "interface"
__only_for__ = "gtk"
DefaultConfig = {
"enqueue_after_download": False, # Set to True to enqueue an episode right after downloading
"default_player": "", # Set to the player to be used for auto-enqueueing (otherwise pick first installed)
}
class Player(object):
def __init__(self, slug, application, command):
self.slug = slug
self.application = application
self.title = "/".join((_("Enqueue in"), application))
self.command = command
self.gpodder = None
def is_installed(self):
raise NotImplemented("Must be implemented by subclass")
def open_files(self, filenames):
raise NotImplemented("Must be implemented by subclass")
def enqueue_episodes(self, episodes, config=None):
filenames = [episode.get_playback_url(config=config) for episode in episodes]
self.open_files(filenames)
for episode in episodes:
episode.playback_mark()
if self.gpodder is not None:
self.gpodder.update_episode_list_icons(selected=True)
class FreeDesktopPlayer(Player):
def is_installed(self):
return util.find_command(self.command[0]) is not None
def open_files(self, filenames):
util.Popen(self.command + filenames)
class Win32Player(Player):
def is_installed(self):
if not gpodder.ui.win32:
return False
from gpodder.gtkui.desktopfile import win32_read_registry_key
try:
self.command = win32_read_registry_key(self.command)
return True
except Exception as e:
logger.warning("Win32 player not found: %s (%s)", self.command, e)
return False
def open_files(self, filenames):
for cmd in util.format_desktop_command(self.command, filenames):
util.Popen(cmd, close_fds=True)
class MPRISResumer(FreeDesktopPlayer):
"""
resume episod playback at saved time
"""
OBJECT_PLAYER = "/org/mpris/MediaPlayer2"
OBJECT_DBUS = "/org/freedesktop/DBus"
INTERFACE_PLAYER = "org.mpris.MediaPlayer2.Player"
INTERFACE_PROPS = "org.freedesktop.DBus.Properties"
SIGNAL_PROP_CHANGE = "PropertiesChanged"
NAME_DBUS = "org.freedesktop.DBus"
def __init__(self, slug, application, command, bus_name):
super(MPRISResumer, self).__init__(slug, application, command)
self.title = "/".join((_("Resume in"), application))
self.bus_name = bus_name
self.player = None
self.position_us = None
self.url = None
def is_installed(self):
if gpodder.ui.win32:
return False
return util.find_command(self.command[0]) is not None
def enqueue_episodes(self, episodes, config=None):
self.do_enqueue(
episodes[0].get_playback_url(config=config), episodes[0].current_position
)
for episode in episodes:
episode.playback_mark()
if self.gpodder is not None:
self.gpodder.update_episode_list_icons(selected=True)
def init_dbus(self):
bus = gpodder.dbus_session_bus
if not bus.name_has_owner(self.bus_name):
logger.debug("MPRISResumer %s is not there...", self.bus_name)
return False
self.player = bus.get_object(self.bus_name, self.OBJECT_PLAYER)
self.signal_match = self.player.connect_to_signal(
self.SIGNAL_PROP_CHANGE,
self.on_prop_change,
dbus_interface=self.INTERFACE_PROPS,
)
return True
def enqueue_when_ready(self, filename, pos):
def name_owner_changed(name, old_owner, new_owner):
logger.debug(
'name_owner_changed "%s" "%s" "%s"', name, old_owner, new_owner
)
if name == self.bus_name:
logger.debug("MPRISResumer player %s is there", name)
cancel.remove()
util.idle_add(lambda: self.do_enqueue(filename, pos))
bus = gpodder.dbus_session_bus
obj = bus.get_object(self.NAME_DBUS, self.OBJECT_DBUS)
cancel = obj.connect_to_signal(
"NameOwnerChanged", name_owner_changed, dbus_interface=self.NAME_DBUS
)
def do_enqueue(self, filename, pos):
def on_reply():
logger.debug("MPRISResumer opened %s", self.url)
def on_error(exception):
logger.error("MPRISResumer error %s", repr(exception))
self.signal_match.remove()
if filename.startswith("/"):
try:
import pathlib
self.url = pathlib.Path(filename).as_uri()
except ImportError:
self.url = "file://" + filename
self.position_us = pos * 1000 * 1000 # pos in microseconds
if self.init_dbus():
# async to not freeze the ui waiting for the application to answer
self.player.OpenUri(
self.url,
dbus_interface=self.INTERFACE_PLAYER,
reply_handler=on_reply,
error_handler=on_error,
)
else:
self.enqueue_when_ready(filename, pos)
logger.debug("MPRISResumer launching player %s", self.application)
super(MPRISResumer, self).open_files([])
def on_prop_change(self, interface, props, invalidated_props):
def on_reply():
pass
def on_error(exception):
logger.error("MPRISResumer SetPosition error %s", repr(exception))
self.signal_match.remove()
metadata = props.get("Metadata", {})
url = metadata.get("xesam:url")
track_id = metadata.get("mpris:trackid")
if url is not None and track_id is not None:
if url == self.url:
logger.info(
"Enqueue %s setting track %s position=%d",
url,
track_id,
self.position_us,
)
self.player.SetPosition(
str(track_id),
self.position_us,
dbus_interface=self.INTERFACE_PLAYER,
reply_handler=on_reply,
error_handler=on_error,
)
else:
logger.debug("Changed but wrong url: %s, giving up", url)
self.signal_match.remove()
PLAYERS = [
# Amarok, http://amarok.kde.org/
FreeDesktopPlayer("amarok", "Amarok", ["amarok", "--play", "--append"]),
# VLC, http://videolan.org/
FreeDesktopPlayer(
"vlc", "VLC", ["vlc", "--started-from-file", "--playlist-enqueue"]
),
# Totem, https://live.gnome.org/Totem
FreeDesktopPlayer("totem", "Totem", ["totem", "--enqueue"]),
# DeaDBeeF, http://deadbeef.sourceforge.net/
FreeDesktopPlayer("deadbeef", "DeaDBeeF", ["deadbeef", "--queue"]),
# gmusicbrowser, http://gmusicbrowser.org/
FreeDesktopPlayer("gmusicbrowser", "gmusicbrowser", ["gmusicbrowser", "-enqueue"]),
# Audacious, http://audacious-media-player.org/
FreeDesktopPlayer("audacious", "Audacious", ["audacious", "--enqueue"]),
# Clementine, http://www.clementine-player.org/
FreeDesktopPlayer("clementine", "Clementine", ["clementine", "--append"]),
# Strawberry, https://www.strawberrymusicplayer.org/
FreeDesktopPlayer("strawberry", "Strawberry", ["strawberry", "--append"]),
# Parole, http://docs.xfce.org/apps/parole/start
FreeDesktopPlayer("parole", "Parole", ["parole", "-a"]),
# Winamp 2.x, http://www.oldversion.com/windows/winamp/
Win32Player(
"winamp", "Winamp", r"HKEY_CLASSES_ROOT\Winamp.File\shell\Enqueue\command"
),
# VLC media player, http://videolan.org/vlc/
Win32Player(
"vlc", "VLC", r"HKEY_CLASSES_ROOT\VLC.mp3\shell\AddToPlaylistVLC\command"
),
# foobar2000, http://www.foobar2000.org/
Win32Player(
"foobar2000",
"foobar2000",
r"HKEY_CLASSES_ROOT\foobar2000.MP3\shell\enqueue\command",
),
]
RESUMERS = [
# doesn't play on my system, but the track is appended.
MPRISResumer(
"amarok", "Amarok", ["amarok", "--play"], "org.mpris.MediaPlayer2.amarok"
),
MPRISResumer(
"vlc", "VLC", ["vlc", "--started-from-file"], "org.mpris.MediaPlayer2.vlc"
),
# totem mpris2 plugin is broken for me: it raises AttributeError:
# File "/usr/lib/totem/plugins/dbus/dbusservice.py", line 329, in OpenUri
# self.totem.add_to_playlist_and_play (uri)
# MPRISResumer('totem', 'Totem', ['totem'], 'org.mpris.MediaPlayer2.totem'),
# with https://github.com/Serranya/deadbeef-mpris2-plugin
MPRISResumer(
"resume in deadbeef",
"DeaDBeeF",
["deadbeef"],
"org.mpris.MediaPlayer2.DeaDBeeF",
),
# the gPodder Downloads directory must be in gmusicbrowser's library
MPRISResumer(
"resume in gmusicbrowser",
"gmusicbrowser",
["gmusicbrowser"],
"org.mpris.MediaPlayer2.gmusicbrowser",
),
# Audacious doesn't implement MPRIS2.OpenUri
# MPRISResumer('audacious', 'resume in Audacious', ['audacious', '--enqueue'], 'org.mpris.MediaPlayer2.audacious'),
# beware: clementine never exits on my system (even when launched from cmdline)
# so the zombie clementine process will get all the bus messages and never answer
# resulting in freezes and timeouts!
MPRISResumer(
"clementine", "Clementine", ["clementine"], "org.mpris.MediaPlayer2.clementine"
),
# just enable the plugin
MPRISResumer("parole", "Parole", ["parole"], "org.mpris.MediaPlayer2.parole"),
]
class gPodderExtension:
def __init__(self, container):
self.container = container
self.config = container.config
self.gpodder_config = self.container.manager.core.config
# Only display media players that can be found at extension load time
self.players = [player for player in PLAYERS if player.is_installed()]
self.resumers = [r for r in RESUMERS if r.is_installed()]
def on_ui_object_available(self, name, ui_object):
if name == "gpodder-gtk":
for p in self.players + self.resumers:
p.gpodder = ui_object
def on_episodes_context_menu(self, episodes):
if not any(e.file_exists() for e in episodes):
return None
ret = [
(p.title, functools.partial(p.enqueue_episodes, config=self.gpodder_config))
for p in self.players
]
# needs dbus, doesn't handle more than 1 episode
# and no point in using DBus when episode is not played.
if (
not hasattr(gpodder.dbus_session_bus, "fake")
and len(episodes) == 1
and episodes[0].current_position > 0
):
ret.extend(
[
(
p.title,
functools.partial(
p.enqueue_episodes, config=self.gpodder_config
),
)
for p in self.resumers
]
)
return ret
def on_episode_downloaded(self, episode):
if self.config.enqueue_after_download:
if not self.config.default_player and len(self.players):
player = self.players[0]
logger.info(
"Picking first installed player: %s (%s)",
player.slug,
player.application,
)
else:
player = next(
(
player
for player in self.players
if self.config.default_player == player.slug
),
None,
)
if player is None:
logger.info(
"No player set, use one of: %r",
[player.slug for player in self.players],
)
return
logger.info("Enqueueing downloaded file in %s", player.application)
player.enqueue_episodes([episode])
|
books | ZhihuDaily | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = "@ohdarling88"
__version__ = "0.1.2"
import json
import urllib
from base import BaseFeedBook
from config import SHARE_FUCK_GFW_SRV
from lib.urlopener import URLOpener
# 知乎屏蔽了 GAE 访问 API,所以需要一个中转服务器来获取知乎日报的 Feed 内容
# nodejs 版本可以参考:https://github.com/ohdarling/ZhihuDailyForwarder
# 搭建完中转服务器后,将 feeds 中的 http://news.at.zhihu.com/api/1.1/news/latest 替换为实际的中转服务器地址
# 因为KindleEar作者不想安装node.js本地开发环境,部署node.js不成功,
# 因此自己写了一个python版本的:http://github.com/cdhigh/forwarder
# http://forwarder.ap01.aws.af.cm为KindleEar作者搭建的转发器
def getBook():
return ZhihuDaily
class ZhihuDaily(BaseFeedBook):
title = "知乎日报"
description = "知乎日报的内容是动态更新的,建议在晚 8 点或 23 点进行投递。此外,知乎日报 API 需要转发服务器,参见 https://github.com/ohdarling/ZhihuDailyForwarder"
network_timeout = 60
language = "zh-cn"
feed_encoding = "utf-8"
page_encoding = "utf-8"
mastheadfile = "mh_zhihudaily.gif"
coverfile = "cv_zhihudaily.jpg"
fulltext_by_readability = False
fulltext_by_instapaper = False
keep_only_tags = [
dict(name="h1", attrs={"class": "headline-title"}),
dict(name="div", attrs={"class": "question"}),
]
remove_tags = []
remove_ids = []
remove_classes = ["view-more", "avatar"]
remove_attrs = []
extra_css = """
.question-title {font-size:1.1em;font-weight:normal;text-decoration:underline;color:#606060;}
.meta {font-size:0.9em;color:#808080;}
"""
# http_forwarder = 'http://forwarder.ap01.aws.af.cm/?k=xzSlE&t=60&u=%s'
feeds = [
("今日头条", "http://news.at.zhihu.com/api/1.2/news/latest"),
]
partitions = [
("top_stories", "今日头条"),
("news", "今日热闻"),
]
def url4forwarder(self, url):
"生成经过转发器的URL"
return SHARE_FUCK_GFW_SRV % urllib.quote(url)
def ParseFeedUrls(self):
"""return list like [(section,title,url,desc),..]"""
urls = []
urladded = set()
url = self.url4forwarder(self.feeds[0][1])
opener = URLOpener(self.host, timeout=self.timeout)
result = opener.open(url)
if result.status_code == 200 and result.content:
feed = json.loads(result.content.decode(self.feed_encoding))
for partition, section in self.partitions:
for item in feed[partition]:
urlfeed = item["share_url"]
if urlfeed in urladded:
self.log.info("duplicated, skipped %s" % urlfeed)
continue
urls.append(
(section, item["title"], self.url4forwarder(urlfeed), None)
)
urladded.add(urlfeed)
else:
self.log.warn(
"fetch rss failed(%s):%s" % (URLOpener.CodeMap(result.status_code), url)
)
return urls
# def fetcharticle(self, url, opener, decoder):
# result = opener.open(self.url4forwarder(url))
# status_code, content = result.status_code, result.content
# if status_code != 200 or not content:
# self.log.warn('fetch article failed(%s):%s.' % (URLOpener.CodeMap(status_code),url))
# return None
#
# if self.page_encoding:
# return content.decode(self.page_encoding)
# else:
# return decoder.decode(content,url,result.headers)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.