text
stringlengths 0
2.53M
|
---|
# -*- test-case-name: twisted.words.test -*-
# Copyright (c) 2001-2005 Twisted Matrix Laboratories.
# See LICENSE for details.
from zope.interface import Interface, Attribute, implements
class IProtocolPlugin(Interface):
"""Interface for plugins providing an interface to a Words service"""
name = Attribute(
"A single word describing what kind of interface this is (eg, irc or web)"
)
def getFactory(realm, portal):
"""Retrieve a C{twisted.internet.interfaces.IServerFactory} provider
@param realm: An object providing C{twisted.cred.portal.IRealm} and
C{IChatService}, with which service information should be looked up.
@param portal: An object providing C{twisted.cred.portal.IPortal},
through which logins should be performed.
"""
class IGroup(Interface):
name = Attribute("A short string, unique among groups.")
def add(user):
"""Include the given user in this group.
@type user: L{IUser}
"""
def remove(user, reason=None):
"""Remove the given user from this group.
@type user: L{IUser}
@type reason: C{unicode}
"""
def size():
"""Return the number of participants in this group.
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with an C{int} representing the the
number of participants in this group.
"""
def receive(sender, recipient, message):
"""
Broadcast the given message from the given sender to other
users in group.
The message is not re-transmitted to the sender.
@param sender: L{IUser}
@type recipient: L{IGroup}
@param recipient: This is probably a wart. Maybe it will be removed
in the future. For now, it should be the group object the message
is being delivered to.
@param message: C{dict}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with None when delivery has been
attempted for all users.
"""
def setMetadata(meta):
"""Change the metadata associated with this group.
@type meta: C{dict}
"""
def iterusers():
"""Return an iterator of all users in this group."""
class IChatClient(Interface):
"""Interface through which IChatService interacts with clients."""
name = Attribute(
"A short string, unique among users. This will be set by the L{IChatService} at login time."
)
def receive(sender, recipient, message):
"""
Callback notifying this user of the given message sent by the
given user.
This will be invoked whenever another user sends a message to a
group this user is participating in, or whenever another user sends
a message directly to this user. In the former case, C{recipient}
will be the group to which the message was sent; in the latter, it
will be the same object as the user who is receiving the message.
@type sender: L{IUser}
@type recipient: L{IUser} or L{IGroup}
@type message: C{dict}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires when the message has been delivered,
or which fails in some way. If the Deferred fails and the message
was directed at a group, this user will be removed from that group.
"""
def groupMetaUpdate(group, meta):
"""
Callback notifying this user that the metadata for the given
group has changed.
@type group: L{IGroup}
@type meta: C{dict}
@rtype: L{twisted.internet.defer.Deferred}
"""
def userJoined(group, user):
"""
Callback notifying this user that the given user has joined
the given group.
@type group: L{IGroup}
@type user: L{IUser}
@rtype: L{twisted.internet.defer.Deferred}
"""
def userLeft(group, user, reason=None):
"""
Callback notifying this user that the given user has left the
given group for the given reason.
@type group: L{IGroup}
@type user: L{IUser}
@type reason: C{unicode}
@rtype: L{twisted.internet.defer.Deferred}
"""
class IUser(Interface):
"""Interface through which clients interact with IChatService."""
realm = Attribute(
"A reference to the Realm to which this user belongs. Set if and only if the user is logged in."
)
mind = Attribute(
"A reference to the mind which logged in to this user. Set if and only if the user is logged in."
)
name = Attribute("A short string, unique among users.")
lastMessage = Attribute(
"A POSIX timestamp indicating the time of the last message received from this user."
)
signOn = Attribute(
"A POSIX timestamp indicating this user's most recent sign on time."
)
def loggedIn(realm, mind):
"""Invoked by the associated L{IChatService} when login occurs.
@param realm: The L{IChatService} through which login is occurring.
@param mind: The mind object used for cred login.
"""
def send(recipient, message):
"""Send the given message to the given user or group.
@type recipient: Either L{IUser} or L{IGroup}
@type message: C{dict}
"""
def join(group):
"""Attempt to join the given group.
@type group: L{IGroup}
@rtype: L{twisted.internet.defer.Deferred}
"""
def leave(group):
"""Discontinue participation in the given group.
@type group: L{IGroup}
@rtype: L{twisted.internet.defer.Deferred}
"""
def itergroups():
"""
Return an iterator of all groups of which this user is a
member.
"""
class IChatService(Interface):
name = Attribute("A short string identifying this chat service (eg, a hostname)")
createGroupOnRequest = Attribute(
"A boolean indicating whether L{getGroup} should implicitly "
"create groups which are requested but which do not yet exist."
)
createUserOnRequest = Attribute(
"A boolean indicating whether L{getUser} should implicitly "
"create users which are requested but which do not yet exist."
)
def itergroups():
"""Return all groups available on this service.
@rtype: C{twisted.internet.defer.Deferred}
@return: A Deferred which fires with a list of C{IGroup} providers.
"""
def getGroup(name):
"""Retrieve the group by the given name.
@type name: C{str}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with the group with the given
name if one exists (or if one is created due to the setting of
L{createGroupOnRequest}, or which fails with
L{twisted.words.ewords.NoSuchGroup} if no such group exists.
"""
def createGroup(name):
"""Create a new group with the given name.
@type name: C{str}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with the created group, or
with fails with L{twisted.words.ewords.DuplicateGroup} if a
group by that name exists already.
"""
def lookupGroup(name):
"""Retrieve a group by name.
Unlike C{getGroup}, this will never implicitly create a group.
@type name: C{str}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with the group by the given
name, or which fails with L{twisted.words.ewords.NoSuchGroup}.
"""
def getUser(name):
"""Retrieve the user by the given name.
@type name: C{str}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with the user with the given
name if one exists (or if one is created due to the setting of
L{createUserOnRequest}, or which fails with
L{twisted.words.ewords.NoSuchUser} if no such user exists.
"""
def createUser(name):
"""Create a new user with the given name.
@type name: C{str}
@rtype: L{twisted.internet.defer.Deferred}
@return: A Deferred which fires with the created user, or
with fails with L{twisted.words.ewords.DuplicateUser} if a
user by that name exists already.
"""
__all__ = [
"IChatInterface",
"IGroup",
"IChatClient",
"IUser",
"IChatService",
]
|
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.jabber.jid}.
"""
from twisted.trial import unittest
from twisted.words.protocols.jabber import jid
class JIDParsingTest(unittest.TestCase):
def test_parse(self):
"""
Test different forms of JIDs.
"""
# Basic forms
self.assertEqual(jid.parse("user@host/resource"), ("user", "host", "resource"))
self.assertEqual(jid.parse("user@host"), ("user", "host", None))
self.assertEqual(jid.parse("host"), (None, "host", None))
self.assertEqual(jid.parse("host/resource"), (None, "host", "resource"))
# More interesting forms
self.assertEqual(jid.parse("foo/bar@baz"), (None, "foo", "bar@baz"))
self.assertEqual(jid.parse("boo@foo/bar@baz"), ("boo", "foo", "bar@baz"))
self.assertEqual(jid.parse("boo@foo/bar/baz"), ("boo", "foo", "bar/baz"))
self.assertEqual(jid.parse("boo/foo@bar@baz"), (None, "boo", "foo@bar@baz"))
self.assertEqual(jid.parse("boo/foo/bar"), (None, "boo", "foo/bar"))
self.assertEqual(jid.parse("boo//foo"), (None, "boo", "/foo"))
def test_noHost(self):
"""
Test for failure on no host part.
"""
self.assertRaises(jid.InvalidFormat, jid.parse, "user@")
def test_doubleAt(self):
"""
Test for failure on double @ signs.
This should fail because @ is not a valid character for the host
part of the JID.
"""
self.assertRaises(jid.InvalidFormat, jid.parse, "user@@host")
def test_multipleAt(self):
"""
Test for failure on two @ signs.
This should fail because @ is not a valid character for the host
part of the JID.
"""
self.assertRaises(jid.InvalidFormat, jid.parse, "user@host@host")
# Basic tests for case mapping. These are fallback tests for the
# prepping done in twisted.words.protocols.jabber.xmpp_stringprep
def test_prepCaseMapUser(self):
"""
Test case mapping of the user part of the JID.
"""
self.assertEqual(
jid.prep("UsEr", "host", "resource"), ("user", "host", "resource")
)
def test_prepCaseMapHost(self):
"""
Test case mapping of the host part of the JID.
"""
self.assertEqual(
jid.prep("user", "hoST", "resource"), ("user", "host", "resource")
)
def test_prepNoCaseMapResource(self):
"""
Test no case mapping of the resourcce part of the JID.
"""
self.assertEqual(
jid.prep("user", "hoST", "resource"), ("user", "host", "resource")
)
self.assertNotEqual(
jid.prep("user", "host", "Resource"), ("user", "host", "resource")
)
class JIDTest(unittest.TestCase):
def test_noneArguments(self):
"""
Test that using no arguments raises an exception.
"""
self.assertRaises(RuntimeError, jid.JID)
def test_attributes(self):
"""
Test that the attributes correspond with the JID parts.
"""
j = jid.JID("user@host/resource")
self.assertEqual(j.user, "user")
self.assertEqual(j.host, "host")
self.assertEqual(j.resource, "resource")
def test_userhost(self):
"""
Test the extraction of the bare JID.
"""
j = jid.JID("user@host/resource")
self.assertEqual("user@host", j.userhost())
def test_userhostOnlyHost(self):
"""
Test the extraction of the bare JID of the full form host/resource.
"""
j = jid.JID("host/resource")
self.assertEqual("host", j.userhost())
def test_userhostJID(self):
"""
Test getting a JID object of the bare JID.
"""
j1 = jid.JID("user@host/resource")
j2 = jid.internJID("user@host")
self.assertIdentical(j2, j1.userhostJID())
def test_userhostJIDNoResource(self):
"""
Test getting a JID object of the bare JID when there was no resource.
"""
j = jid.JID("user@host")
self.assertIdentical(j, j.userhostJID())
def test_fullHost(self):
"""
Test giving a string representation of the JID with only a host part.
"""
j = jid.JID(tuple=(None, "host", None))
self.assertEqual("host", j.full())
def test_fullHostResource(self):
"""
Test giving a string representation of the JID with host, resource.
"""
j = jid.JID(tuple=(None, "host", "resource"))
self.assertEqual("host/resource", j.full())
def test_fullUserHost(self):
"""
Test giving a string representation of the JID with user, host.
"""
j = jid.JID(tuple=("user", "host", None))
self.assertEqual("user@host", j.full())
def test_fullAll(self):
"""
Test giving a string representation of the JID.
"""
j = jid.JID(tuple=("user", "host", "resource"))
self.assertEqual("user@host/resource", j.full())
def test_equality(self):
"""
Test JID equality.
"""
j1 = jid.JID("user@host/resource")
j2 = jid.JID("user@host/resource")
self.assertNotIdentical(j1, j2)
self.assertEqual(j1, j2)
def test_equalityWithNonJIDs(self):
"""
Test JID equality.
"""
j = jid.JID("user@host/resource")
self.assertFalse(j == "user@host/resource")
def test_inequality(self):
"""
Test JID inequality.
"""
j1 = jid.JID("user1@host/resource")
j2 = jid.JID("user2@host/resource")
self.assertNotEqual(j1, j2)
def test_inequalityWithNonJIDs(self):
"""
Test JID equality.
"""
j = jid.JID("user@host/resource")
self.assertNotEqual(j, "user@host/resource")
def test_hashable(self):
"""
Test JID hashability.
"""
j1 = jid.JID("user@host/resource")
j2 = jid.JID("user@host/resource")
self.assertEqual(hash(j1), hash(j2))
def test_unicode(self):
"""
Test unicode representation of JIDs.
"""
j = jid.JID(tuple=("user", "host", "resource"))
self.assertEqual("user@host/resource", str(j))
def test_repr(self):
"""
Test representation of JID objects.
"""
j = jid.JID(tuple=("user", "host", "resource"))
self.assertEqual("JID(u'user@host/resource')", repr(j))
class InternJIDTest(unittest.TestCase):
def test_identity(self):
"""
Test that two interned JIDs yield the same object.
"""
j1 = jid.internJID("user@host")
j2 = jid.internJID("user@host")
self.assertIdentical(j1, j2)
|
"Class advice.\n\nThis module was adapted from 'protocols.advice', part of the Python\nEnterprise Application Kit (PEAK). Please notify the PEAK authors\n(pje@telecommunity.com and tsarna@sarna.org) if bugs are found or\nZope-specific changes are required, so that the PEAK version of this module\ncan be kept in sync.\n\nPEAK is a Python application framework that interoperates with (but does\nnot require) Zope 3 and Twisted. It provides tools for manipulating UML\nmodels, object-relational persistence, aspect-oriented programming, and more.\nVisit the PEAK home page at http://peak.telecommunity.com for more information.\n\n$Id: advice.py 110699 2010-04-09 08:16:17Z regebro $\n"
from types import FunctionType
try:
from types import ClassType
__python3 = False
except ImportError:
__python3 = True
import sys
def getFrameInfo(frame):
'Return (kind,module,locals,globals) for a frame\n\n \'kind\' is one of "exec", "module", "class", "function call", or "unknown".\n '
f_locals = frame.f_locals
f_globals = frame.f_globals
sameNamespace = (f_locals is f_globals)
hasModule = ('__module__' in f_locals)
hasName = ('__name__' in f_globals)
sameName = (hasModule and hasName)
sameName = (sameName and (f_globals['__name__'] == f_locals['__module__']))
module = ((hasName and (f_globals['__name__'])) or None)
namespaceIsModule = (module and (module.__dict__ is f_globals))
if (not namespaceIsModule):
kind = 'exec'
elif (sameNamespace and (not hasModule)):
kind = 'module'
elif (sameName and (not sameNamespace)):
kind = 'class'
elif (not sameNamespace):
kind = 'function call'
else:
kind = 'unknown'
return (kind, module, f_locals, f_globals)
def addClassAdvisor(callback, depth=2):
'Set up \'callback\' to be passed the containing class upon creation\n\n This function is designed to be called by an "advising" function executed\n in a class suite. The "advising" function supplies a callback that it\n wishes to have executed when the containing class is created. The\n callback will be given one argument: the newly created containing class.\n The return value of the callback will be used in place of the class, so\n the callback should return the input if it does not wish to replace the\n class.\n\n The optional \'depth\' argument to this function determines the number of\n frames between this function and the targeted class suite. \'depth\'\n defaults to 2, since this skips this function\'s frame and one calling\n function frame. If you use this function from a function called directly\n in the class suite, the default will be correct, otherwise you will need\n to determine the correct depth yourself.\n\n This function works by installing a special class factory function in\n place of the \'__metaclass__\' of the containing class. Therefore, only\n callbacks *after* the last \'__metaclass__\' assignment in the containing\n class will be executed. Be sure that classes using "advising" functions\n declare any \'__metaclass__\' *first*, to ensure all callbacks are run.'
frame = (depth)
(kind, module, caller_locals, caller_globals) = (frame)
previousMetaclass = ('__metaclass__')
if __python3:
defaultMetaclass = ('__metaclass__', type)
else:
defaultMetaclass = ('__metaclass__', ClassType)
def advise(name, bases, cdict):
if ('__metaclass__' in cdict):
del cdict['__metaclass__']
if (previousMetaclass is None):
if bases:
meta = (bases)
else:
meta = defaultMetaclass
elif (previousMetaclass):
meta = previousMetaclass
else:
meta = (bases, previousMetaclass)
newClass = (name, bases, cdict)
return (newClass)
advise.previousMetaclass = previousMetaclass
advise.callback = callback
caller_locals['__metaclass__'] = advise
def isClassAdvisor(ob):
"True if 'ob' is a class advisor function"
return ((ob, FunctionType) and (ob, 'previousMetaclass'))
def determineMetaclass(bases, explicit_mc=None):
'Determine metaclass from 1+ bases and optional explicit __metaclass__'
meta = [(b, '__class__', (b)) for b in bases]
if (explicit_mc is not None):
(explicit_mc)
if ((meta) == 1):
return meta[0]
candidates = (meta)
if (not candidates):
if (not (not __python3)):
raise ()
return ClassType
elif ((candidates) > 1):
raise ('Incompatible metatypes', bases)
return candidates[0]
def minimalBases(classes):
'Reduce a list of base classes to its ordered minimum equivalent'
if (not __python3):
classes = [c for c in classes if (c is not ClassType)]
candidates = []
for m in classes:
for n in classes:
if ((n, m) and (m is not n)):
break
else:
if (m in candidates):
(m)
(m)
return candidates |
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test interface sorting
$Id: test_sorting.py 110699 2010-04-09 08:16:17Z regebro $
"""
from unittest import TestCase, TestSuite, main, makeSuite
from zope.interface import Interface
class I1(Interface):
pass
class I2(I1):
pass
class I3(I1):
pass
class I4(Interface):
pass
class I5(I4):
pass
class I6(I2):
pass
class Test(TestCase):
def test(self):
l = [I1, I3, I5, I6, I4, I2]
l.sort()
self.assertEqual(l, [I1, I2, I3, I4, I5, I6])
def test_w_None(self):
l = [I1, None, I3, I5, I6, I4, I2]
l.sort()
self.assertEqual(l, [I1, I2, I3, I4, I5, I6, None])
def test_suite():
return TestSuite((makeSuite(Test),))
if __name__ == "__main__":
main(defaultTest="test_suite")
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the methods to import mail via Google Apps Email Migration API.
MigrationService: Provides methods to import mail.
"""
__author__ = ("google-apps-apis@googlegroups.com", "pti@google.com (Prashant Tiwari)")
import base64
import threading
import time
from atom.service import deprecation
from gdata.apps import migration
from gdata.apps.migration import MailEntryProperties
import gdata.apps.service
import gdata.service
API_VER = "2.0"
class MigrationService(gdata.apps.service.AppsService):
"""Client for the EMAPI migration service. Use either ImportMail to import
one message at a time, or AddMailEntry and ImportMultipleMails to import a
bunch of messages at a time.
"""
def __init__(
self,
email=None,
password=None,
domain=None,
source=None,
server="apps-apis.google.com",
additional_headers=None,
):
gdata.apps.service.AppsService.__init__(
self,
email=email,
password=password,
domain=domain,
source=source,
server=server,
additional_headers=additional_headers,
)
self.mail_batch = migration.BatchMailEventFeed()
self.mail_entries = []
self.exceptions = 0
def _BaseURL(self):
return "/a/feeds/migration/%s/%s" % (API_VER, self.domain)
def ImportMail(self, user_name, mail_message, mail_item_properties, mail_labels):
"""Imports a single mail message.
Args:
user_name: The username to import messages to.
mail_message: An RFC822 format email message.
mail_item_properties: A list of Gmail properties to apply to the message.
mail_labels: A list of labels to apply to the message.
Returns:
A MailEntry representing the successfully imported message.
Raises:
AppsForYourDomainException: An error occurred importing the message.
"""
uri = "%s/%s/mail" % (self._BaseURL(), user_name)
mail_entry = migration.MailEntry()
mail_entry.rfc822_msg = migration.Rfc822Msg(
text=(base64.b64encode(mail_message))
)
mail_entry.rfc822_msg.encoding = "base64"
mail_entry.mail_item_property = [
migration.MailItemProperty(value=x) for x in mail_item_properties
]
mail_entry.label = [migration.Label(label_name=x) for x in mail_labels]
try:
return migration.MailEntryFromString(str(self.Post(mail_entry, uri)))
except gdata.service.RequestError as e:
# Store the number of failed imports when importing several at a time
self.exceptions += 1
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
def AddBatchEntry(self, mail_message, mail_item_properties, mail_labels):
"""Adds a message to the current batch that you later will submit.
Deprecated, use AddMailEntry instead
Args:
mail_message: An RFC822 format email message.
mail_item_properties: A list of Gmail properties to apply to the message.
mail_labels: A list of labels to apply to the message.
Returns:
The length of the MailEntry representing the message.
"""
deprecation("calling deprecated method AddBatchEntry")
mail_entry = migration.BatchMailEntry()
mail_entry.rfc822_msg = migration.Rfc822Msg(
text=(base64.b64encode(mail_message))
)
mail_entry.rfc822_msg.encoding = "base64"
mail_entry.mail_item_property = [
migration.MailItemProperty(value=x) for x in mail_item_properties
]
mail_entry.label = [migration.Label(label_name=x) for x in mail_labels]
self.mail_batch.AddBatchEntry(mail_entry)
return len(str(mail_entry))
def SubmitBatch(self, user_name):
"""Sends all the mail items you have added to the batch to the server.
Deprecated, use ImportMultipleMails instead
Args:
user_name: The username to import messages to.
Returns:
An HTTPResponse from the web service call.
Raises:
AppsForYourDomainException: An error occurred importing the batch.
"""
deprecation("calling deprecated method SubmitBatch")
uri = "%s/%s/mail/batch" % (self._BaseURL(), user_name)
try:
self.result = self.Post(
self.mail_batch, uri, converter=migration.BatchMailEventFeedFromString
)
except gdata.service.RequestError as e:
raise gdata.apps.service.AppsForYourDomainException(e.args[0])
self.mail_batch = migration.BatchMailEventFeed()
return self.result
def AddMailEntry(
self, mail_message, mail_item_properties=None, mail_labels=None, identifier=None
):
"""Prepares a list of mail messages to import using ImportMultipleMails.
Args:
mail_message: An RFC822 format email message as a string.
mail_item_properties: List of Gmail properties to apply to the
message.
mail_labels: List of Gmail labels to apply to the message.
identifier: The optional file identifier string
Returns:
The number of email messages to be imported.
"""
mail_entry_properties = MailEntryProperties(
mail_message=mail_message,
mail_item_properties=mail_item_properties,
mail_labels=mail_labels,
identifier=identifier,
)
self.mail_entries.append(mail_entry_properties)
return len(self.mail_entries)
def ImportMultipleMails(self, user_name, threads_per_batch=20):
"""Launches separate threads to import every message added by AddMailEntry.
Args:
user_name: The user account name to import messages to.
threads_per_batch: Number of messages to import at a time.
Returns:
The number of email messages that were successfully migrated.
Raises:
Exception: An error occurred while importing mails.
"""
num_entries = len(self.mail_entries)
if not num_entries:
return 0
threads = []
for mail_entry_properties in self.mail_entries:
t = threading.Thread(
name=mail_entry_properties.identifier,
target=self.ImportMail,
args=(
user_name,
mail_entry_properties.mail_message,
mail_entry_properties.mail_item_properties,
mail_entry_properties.mail_labels,
),
)
threads.append(t)
try:
# Determine the number of batches needed with threads_per_batch in each
batches = num_entries / threads_per_batch + (
0 if num_entries % threads_per_batch == 0 else 1
)
batch_min = 0
# Start the threads, one batch at a time
for batch in range(batches):
batch_max = (
(batch + 1) * threads_per_batch
if (batch + 1) * threads_per_batch < num_entries
else num_entries
)
for i in range(batch_min, batch_max):
threads[i].start()
time.sleep(1)
for i in range(batch_min, batch_max):
threads[i].join()
batch_min = batch_max
self.mail_entries = []
except Exception as e:
raise Exception(e.args[0])
else:
return num_entries - self.exceptions
|
"""Class for post-handshake certificate checking."""
from .utils.cryptomath import hashAndBase64
from .X509 import X509
from .X509CertChain import X509CertChain
from .errors import *
class Checker:
"""This class is passed to a handshake function to check the other
party's certificate chain.
If a handshake function completes successfully, but the Checker
judges the other party's certificate chain to be missing or
inadequate, a subclass of
L{tlslite.errors.TLSAuthenticationError} will be raised.
Currently, the Checker can check either an X.509 or a cryptoID
chain (for the latter, cryptoIDlib must be installed).
"""
def __init__(
self,
cryptoID=None,
protocol=None,
x509Fingerprint=None,
x509TrustList=None,
x509CommonName=None,
checkResumedSession=False,
):
"""Create a new Checker instance.
You must pass in one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
@type cryptoID: str
@param cryptoID: A cryptoID which the other party's certificate
chain must match. The cryptoIDlib module must be installed.
Mutually exclusive with all of the 'x509...' arguments.
@type protocol: str
@param protocol: A cryptoID protocol URI which the other
party's certificate chain must match. Requires the 'cryptoID'
argument.
@type x509Fingerprint: str
@param x509Fingerprint: A hex-encoded X.509 end-entity
fingerprint which the other party's end-entity certificate must
match. Mutually exclusive with the 'cryptoID' and
'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed. Mutually exclusive with the 'cryptoID' and
'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type checkResumedSession: bool
@param checkResumedSession: If resumed sessions should be
checked. This defaults to False, on the theory that if the
session was checked once, we don't need to bother
re-checking it.
"""
if cryptoID and (x509Fingerprint or x509TrustList):
raise ValueError()
if x509Fingerprint and x509TrustList:
raise ValueError()
if x509CommonName and not x509TrustList:
raise ValueError()
if protocol and not cryptoID:
raise ValueError()
if cryptoID:
import cryptoIDlib # So we raise an error here
if x509TrustList:
import cryptlib_py # So we raise an error here
self.cryptoID = cryptoID
self.protocol = protocol
self.x509Fingerprint = x509Fingerprint
self.x509TrustList = x509TrustList
self.x509CommonName = x509CommonName
self.checkResumedSession = checkResumedSession
def __call__(self, connection):
"""Check a TLSConnection.
When a Checker is passed to a handshake function, this will
be called at the end of the function.
@type connection: L{tlslite.TLSConnection.TLSConnection}
@param connection: The TLSConnection to examine.
@raise tlslite.errors.TLSAuthenticationError: If the other
party's certificate chain is missing or bad.
"""
if not self.checkResumedSession and connection.resumed:
return
if self.cryptoID or self.x509Fingerprint or self.x509TrustList:
if connection._client:
chain = connection.session.serverCertChain
else:
chain = connection.session.clientCertChain
if self.x509Fingerprint or self.x509TrustList:
if isinstance(chain, X509CertChain):
if self.x509Fingerprint:
if chain.getFingerprint() != self.x509Fingerprint:
raise TLSFingerprintError(
"X.509 fingerprint mismatch: %s, %s"
% (chain.getFingerprint(), self.x509Fingerprint)
)
else: # self.x509TrustList
if not chain.validate(self.x509TrustList):
raise TLSValidationError("X.509 validation failure")
if self.x509CommonName and (
chain.getCommonName() != self.x509CommonName
):
raise TLSAuthorizationError(
"X.509 Common Name mismatch: %s, %s"
% (chain.getCommonName(), self.x509CommonName)
)
elif chain:
raise TLSAuthenticationTypeError()
else:
raise TLSNoAuthenticationError()
elif self.cryptoID:
import cryptoIDlib.CertChain
if isinstance(chain, cryptoIDlib.CertChain.CertChain):
if chain.cryptoID != self.cryptoID:
raise TLSFingerprintError(
"cryptoID mismatch: %s, %s"
% (chain.cryptoID, self.cryptoID)
)
if self.protocol:
if not chain.checkProtocol(self.protocol):
raise TLSAuthorizationError("cryptoID protocol mismatch")
if not chain.validate():
raise TLSValidationError("cryptoID validation failure")
elif chain:
raise TLSAuthenticationTypeError()
else:
raise TLSNoAuthenticationError()
|
#
# (C) Copyright 2003-2011 Jacek Konieczny <jajcus@jajcus.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# pylint: disable-msg=W0221
"""Client-Server stream handling.
Normative reference:
- `RFC 6120 <http://www.ietf.org/rfc/rfc6120.txt>`__
"""
__docformat__ = "restructuredtext en"
from .streambase import StreamBase
from .jid import JID
from .settings import XMPPSettings
from .constants import STANZA_CLIENT_NS
class ClientStream(StreamBase):
"""Handles XMPP-IM c2s stream.
Both client and server side of the connection is supported.
"""
# pylint: disable=R0904
def __init__(self, jid, stanza_route, handlers, settings=None):
"""Initialize the ClientStream object.
:Parameters:
- `jid`: local JID.
- `handlers`: XMPP feature and event handlers
- `settings`: PyXMPP settings for the stream
:Types:
- `jid`: `JID`
- `settings`: `XMPPSettings`
"""
if handlers is None:
handlers = []
if settings is None:
settings = XMPPSettings()
if "resource" not in settings:
settings["resource"] = jid.resource
StreamBase.__init__(self, STANZA_CLIENT_NS, stanza_route, handlers, settings)
self.me = JID(jid.local, jid.domain)
def initiate(self, transport, to=None):
"""Initiate an XMPP connection over the `transport`.
:Parameters:
- `transport`: an XMPP transport instance
- `to`: peer name (defaults to own jid domain part)
"""
if to is None:
to = JID(self.me.domain)
return StreamBase.initiate(self, transport, to)
def receive(self, transport, myname=None):
"""Receive an XMPP connection over the `transport`.
:Parameters:
- `transport`: an XMPP transport instance
- `myname`: local stream endpoint name (defaults to own jid domain
part).
"""
if myname is None:
myname = JID(self.me.domain)
return StreamBase.receive(self, transport, myname)
def fix_out_stanza(self, stanza):
"""Fix outgoing stanza.
On a client clear the sender JID. On a server set the sender
address to the own JID if the address is not set yet."""
StreamBase.fix_out_stanza(self, stanza)
if self.initiator:
if stanza.from_jid:
stanza.from_jid = None
else:
if not stanza.from_jid:
stanza.from_jid = self.me
def fix_in_stanza(self, stanza):
"""Fix an incoming stanza.
Ona server replace the sender address with authorized client JID."""
StreamBase.fix_in_stanza(self, stanza)
if not self.initiator:
if stanza.from_jid != self.peer:
stanza.set_from(self.peer)
# vi: sts=4 et sw=4
|
'DNS resolever with SRV record support.\n\nNormative reference:\n - `RFC 1035 <http://www.ietf.org/rfc/rfc1035.txt>`__\n - `RFC 2782 <http://www.ietf.org/rfc/rfc2782.txt>`__\n'
__docformat__ = 'restructuredtext en'
import socket
import random
import logging
import threading
import queue
from .settings import XMPPSettings
from .interfaces import Resolver
logger = ('pyxmpp2.resolver')
try:
import dns.resolver
import dns.name
import dns.exception
HAVE_DNSPYTHON = True
except ImportError:
HAVE_DNSPYTHON = False
def is_ipv6_available():
'Check if IPv6 is available.\n\n :Return: `True` when an IPv6 socket can be created.\n '
try:
(socket.AF_INET6)
except (socket.error, AttributeError):
return False
return True
def is_ipv4_available():
'Check if IPv4 is available.\n\n :Return: `True` when an IPv4 socket can be created.\n '
try:
(socket.AF_INET)
except socket.error:
return False
return True
def shuffle_srv(records):
'Randomly reorder SRV records using their weights.\n\n :Parameters:\n - `records`: SRV records to shuffle.\n :Types:\n - `records`: sequence of :dns:`dns.rdtypes.IN.SRV`\n\n :return: reordered records.\n :returntype: `list` of :dns:`dns.rdtypes.IN.SRV`'
if (not records):
return []
ret = []
while ((records) > 1):
weight_sum = 0
for rrecord in records:
weight_sum += (rrecord.weight + 0.1)
thres = (() * weight_sum)
weight_sum = 0
for rrecord in records:
weight_sum += (rrecord.weight + 0.1)
if (thres < weight_sum):
(rrecord)
(rrecord)
break
(records[0])
return ret
def reorder_srv(records):
'Reorder SRV records using their priorities and weights.\n\n :Parameters:\n - `records`: SRV records to shuffle.\n :Types:\n - `records`: `list` of :dns:`dns.rdtypes.IN.SRV`\n\n :return: reordered records.\n :returntype: `list` of :dns:`dns.rdtypes.IN.SRV`'
records = (records)
()
ret = []
tmp = []
for rrecord in records:
if ((not tmp) or (rrecord.priority == tmp[0].priority)):
(rrecord)
continue
ret += (tmp)
tmp = [rrecord]
if tmp:
ret += (tmp)
return ret
class ThreadedResolverBase(Resolver):
'Base class for threaded resolvers.\n\n Starts worker threads, each running a blocking resolver implementation\n and communicates with them to provide non-blocking asynchronous API.\n '
def __init__(self, settings=None, max_threads=1):
if settings:
self.settings = settings
else:
self.settings = ()
self.threads = []
self.queue = ()
self.lock = ()
self.max_threads = max_threads
self.last_thread_n = 0
def _make_resolver(self):
'Return the blocking resolver implementation that should be\n used by the resolver threads.\n '
raise NotImplementedError
def stop(self):
'Stop the resolver threads.'
with self.lock:
for dummy in self.threads:
(None)
def _start_thread(self):
'Start a new working thread unless the maximum number of threads\n has been reached or the request queue is empty.\n '
with self.lock:
if (self.threads and ()):
return
if ((self.threads) >= self.max_threads):
return
thread_n = (self.last_thread_n + 1)
self.last_thread_n = thread_n
thread = ()
(thread)
thread.daemon = True
()
def resolve_address(self, hostname, callback, allow_cname=True):
request = ('resolve_address', (hostname, callback, allow_cname))
()
(request)
def resolve_srv(self, domain, service, protocol, callback):
request = ('resolve_srv', (domain, service, protocol, callback))
()
(request)
def _run(self, thread_n):
'The thread function.'
try:
((self, thread_n))
resolver = ()
while True:
request = ()
if (request is None):
break
(method, args) = request
((resolver, method, args))
(*args)
()
((self, thread_n))
finally:
(())
class DumbBlockingResolver(Resolver):
"Simple blocking resolver using only the standard Python library.\n\n This doesn't support SRV lookups!\n\n `resolve_srv` will raise NotImplementedError\n `resolve_address` will block until the lookup completes or fail and then\n call the callback immediately.\n "
def __init__(self, settings=None):
if settings:
self.settings = settings
else:
self.settings = ()
def resolve_srv(self, domain, service, protocol, callback):
raise ('The DumbBlockingResolver cannot resolve SRV records. DNSPython or target hostname explicitely set required')
def resolve_address(self, hostname, callback, allow_cname=True):
'Start looking up an A or AAAA record.\n\n `callback` will be called with a list of (family, address) tuples\n on success. Family is :std:`socket.AF_INET` or :std:`socket.AF_INET6`,\n the address is IPv4 or IPv6 literal. The list will be empty on error.\n\n :Parameters:\n - `hostname`: the host name to look up\n - `callback`: a function to be called with a list of received\n addresses\n - `allow_cname`: `True` if CNAMEs should be followed\n :Types:\n - `hostname`: `str`\n - `callback`: function accepting a single argument\n - `allow_cname`: `bool`\n '
if self.settings['ipv6']:
if self.settings['ipv4']:
family = socket.AF_UNSPEC
else:
family = socket.AF_INET6
elif self.settings['ipv4']:
family = socket.AF_INET
else:
('Neither IPv6 or IPv4 allowed.')
([])
return
try:
ret = (hostname, 0, family, socket.SOCK_STREAM, 0)
except socket.gaierror as err:
((hostname, err))
([])
return
if (family == socket.AF_UNSPEC):
tmp = ret
if self.settings['prefer_ipv6']:
ret = [addr for addr in tmp if (addr[0] == socket.AF_INET6)]
ret += [addr for addr in tmp if (addr[0] == socket.AF_INET)]
else:
ret = [addr for addr in tmp if (addr[0] == socket.AF_INET)]
ret += [addr for addr in tmp if (addr[0] == socket.AF_INET6)]
([(addr[0], addr[4][0]) for addr in ret])
if HAVE_DNSPYTHON:
class BlockingResolver(Resolver):
'Blocking resolver using the DNSPython package.\n\n Both `resolve_srv` and `resolve_address` will block until the\n lookup completes or fail and then call the callback immediately.\n '
def __init__(self, settings=None):
if settings:
self.settings = settings
else:
self.settings = ()
def resolve_srv(self, domain, service, protocol, callback):
'Start looking up an SRV record for `service` at `domain`.\n\n `callback` will be called with a properly sorted list of (hostname,\n port) pairs on success. The list will be empty on error and it will\n contain only (".", 0) when the service is explicitely disabled.\n\n :Parameters:\n - `domain`: domain name to look up\n - `service`: service name e.g. \'xmpp-client\'\n - `protocol`: protocol name, e.g. \'tcp\'\n - `callback`: a function to be called with a list of received\n addresses\n :Types:\n - `domain`: `str`\n - `service`: `str`\n - `protocol`: `str`\n - `callback`: function accepting a single argument\n '
if (domain, str):
domain = ('us-ascii')
domain = (service, protocol, domain)
try:
records = (domain, 'SRV')
except dns.exception.DNSException as err:
((domain, err.__class__.__name__))
([])
return
if (not records):
([])
return
result = []
for record in (records):
hostname = ()
if (hostname in ('.', '')):
continue
((hostname, record.port))
if (not result):
([('.', 0)])
else:
(result)
return
def resolve_address(self, hostname, callback, allow_cname=True):
'Start looking up an A or AAAA record.\n\n `callback` will be called with a list of (family, address) tuples\n (each holiding socket.AF_* and IPv4 or IPv6 address literal) on\n success. The list will be empty on error.\n\n :Parameters:\n - `hostname`: the host name to look up\n - `callback`: a function to be called with a list of received\n addresses\n - `allow_cname`: `True` if CNAMEs should be followed\n :Types:\n - `hostname`: `str`\n - `callback`: function accepting a single argument\n - `allow_cname`: `bool`\n '
if (hostname, str):
hostname = ('us-ascii')
rtypes = []
if self.settings['ipv6']:
(('AAAA', socket.AF_INET6))
if self.settings['ipv4']:
(('A', socket.AF_INET))
if (not self.settings['prefer_ipv6']):
()
exception = None
result = []
for (rtype, rfamily) in rtypes:
try:
try:
records = (hostname, rtype)
except dns.exception.DNSException:
records = ((hostname + '.'), rtype)
except dns.exception.DNSException as err:
exception = err
continue
if ((not allow_cname) and (records.rrset.name != (hostname))):
((hostname))
continue
if records:
for record in records:
((rfamily, ()))
if ((not result) and exception):
((hostname, exception.__class__.__name__))
(result)
class ThreadedResolver(ThreadedResolverBase):
'Threaded resolver implementation using the DNSPython\n :dns:`dns.resolver` module.\n '
def _make_resolver(self):
return (self.settings)
_DEFAULT_RESOLVER = BlockingResolver
else:
_DEFAULT_RESOLVER = DumbBlockingResolver
('dns_resolver')
('ipv4')
('ipv6')
('prefer_ipv6') |
import datetime
import copy
import decimal
import logging
import os
import re
import threading
import time
import warnings
try:
import sqlite3
except ImportError:
sqlite3 = None
try:
import psycopg2
except ImportError:
psycopg2 = None
try:
import MySQLdb as mysql
except ImportError:
mysql = None
__all__ = ['ImproperlyConfigured', 'SqliteDatabase', 'MySQLDatabase', 'PostgresqlDatabase', 'asc', 'desc', 'Count', 'Max', 'Min', 'Sum', 'Q', 'Field', 'CharField', 'TextField', 'DateTimeField', 'BooleanField', 'DecimalField', 'FloatField', 'IntegerField', 'PrimaryKeyField', 'ForeignKeyField', 'DoubleField', 'BigIntegerField', 'Model', 'filter_query', 'annotate_query', 'F', 'R', 'DateField', 'TimeField', 'transaction']
class ImproperlyConfigured(Exception):
pass
if ((sqlite3 is None) and (psycopg2 is None) and (mysql is None)):
raise ('Either sqlite3, psycopg2 or MySQLdb must be installed')
if sqlite3:
(decimal.Decimal, str)
(datetime.date, str)
(datetime.time, str)
('decimal', (lambda v: (v)))
if psycopg2:
import psycopg2.extensions
(psycopg2.extensions.UNICODE)
(psycopg2.extensions.UNICODEARRAY)
DATABASE_NAME = ('PEEWEE_DATABASE', 'peewee.db')
logger = ('peewee.logger')
class BaseAdapter(object):
'\n The various subclasses of `BaseAdapter` provide a bridge between the high-\n level `Database` abstraction and the underlying python libraries like\n psycopg2. It also provides a way to unify the pythonic field types with\n the underlying column types used by the database engine.\n\n The `BaseAdapter` provides two types of mappings:\n - mapping between filter operations and their database equivalents\n - mapping between basic field types and their database column types\n\n The `BaseAdapter` also is the mechanism used by the `Database` class to:\n - handle connections with the database\n - extract information from the database cursor\n '
operations = {'eq': '= %s'}
interpolation = '%s'
sequence_support = False
for_update_support = False
subquery_delete_same_table = True
reserved_tables = []
quote_char = '"'
def get_field_types(self):
field_types = {'integer': 'INTEGER', 'bigint': 'INTEGER', 'float': 'REAL', 'decimal': 'DECIMAL', 'double': 'REAL', 'string': 'VARCHAR', 'text': 'TEXT', 'datetime': 'DATETIME', 'time': 'TIME', 'date': 'DATE', 'primary_key': 'INTEGER', 'primary_key_with_sequence': 'INTEGER', 'foreign_key': 'INTEGER', 'boolean': 'SMALLINT', 'blob': 'BLOB'}
(())
return field_types
def get_field_overrides(self):
return {}
def connect(self, database, **kwargs):
raise NotImplementedError
def close(self, conn):
()
def lookup_cast(self, lookup, value):
'\n When a lookup is being performed as a part of a WHERE clause, provides\n a way to alter the incoming value that is passed to the database driver\n as part of the list of parameters\n '
if (lookup in ('contains', 'icontains')):
return ('%%%s%%' % value)
elif (lookup in ('startswith', 'istartswith')):
return ('%s%%' % value)
return value
def last_insert_id(self, cursor, model):
return cursor.lastrowid
def rows_affected(self, cursor):
return cursor.rowcount
class SqliteAdapter(BaseAdapter):
operations = {'lt': '< %s', 'lte': '<= %s', 'gt': '> %s', 'gte': '>= %s', 'eq': '= %s', 'ne': '!= %s', 'in': 'IN (%s)', 'is': 'IS %s', 'isnull': 'IS NULL', 'between': 'BETWEEN %s AND %s', 'icontains': "LIKE %s ESCAPE '\\'", 'contains': 'GLOB %s', 'istartswith': "LIKE %s ESCAPE '\\'", 'startswith': 'GLOB %s'}
interpolation = '?'
def connect(self, database, **kwargs):
if (not sqlite3):
raise ('sqlite3 must be installed on the system')
return (database)
def lookup_cast(self, lookup, value):
if (lookup == 'contains'):
return ('*%s*' % value)
elif (lookup == 'icontains'):
return ('%%%s%%' % value)
elif (lookup == 'startswith'):
return ('%s*' % value)
elif (lookup == 'istartswith'):
return ('%s%%' % value)
return value
class PostgresqlAdapter(BaseAdapter):
operations = {'lt': '< %s', 'lte': '<= %s', 'gt': '> %s', 'gte': '>= %s', 'eq': '= %s', 'ne': '!= %s', 'in': 'IN (%s)', 'is': 'IS %s', 'isnull': 'IS NULL', 'between': 'BETWEEN %s AND %s', 'icontains': 'ILIKE %s', 'contains': 'LIKE %s', 'istartswith': 'ILIKE %s', 'startswith': 'LIKE %s'}
reserved_tables = ['user']
sequence_support = True
for_update_support = True
def connect(self, database, **kwargs):
if (not psycopg2):
raise ('psycopg2 must be installed on the system')
return ()
def get_field_overrides(self):
return {'primary_key': 'SERIAL', 'primary_key_with_sequence': 'INTEGER', 'datetime': 'TIMESTAMP', 'decimal': 'NUMERIC', 'double': 'DOUBLE PRECISION', 'bigint': 'BIGINT', 'boolean': 'BOOLEAN', 'blob': 'BYTEA'}
def last_insert_id(self, cursor, model):
if model._meta.pk_sequence:
(('SELECT CURRVAL(\'"%s"\')' % model._meta.pk_sequence))
else:
(('SELECT CURRVAL(\'"%s_%s_seq"\')' % (model._meta.db_table, model._meta.pk_col)))
return ()[0]
class MySQLAdapter(BaseAdapter):
operations = {'lt': '< %s', 'lte': '<= %s', 'gt': '> %s', 'gte': '>= %s', 'eq': '= %s', 'ne': '!= %s', 'in': 'IN (%s)', 'is': 'IS %s', 'isnull': 'IS NULL', 'between': 'BETWEEN %s AND %s', 'icontains': 'LIKE %s', 'contains': 'LIKE BINARY %s', 'istartswith': 'LIKE %s', 'startswith': 'LIKE BINARY %s'}
quote_char = '`'
for_update_support = True
subquery_delete_same_table = False
def connect(self, database, **kwargs):
if (not mysql):
raise ('MySQLdb must be installed on the system')
conn_kwargs = {'charset': 'utf8', 'use_unicode': True}
(kwargs)
return ()
def get_field_overrides(self):
return {'primary_key': 'integer AUTO_INCREMENT', 'boolean': 'bool', 'float': 'float', 'double': 'double precision', 'bigint': 'bigint', 'text': 'longtext', 'decimal': 'numeric'}
class Database(object):
'\n A high-level api for working with the supported database engines. `Database`\n provides a wrapper around some of the functions performed by the `Adapter`,\n in addition providing support for:\n - execution of SQL queries\n - creating and dropping tables and indexes\n '
def require_sequence_support(func):
def inner(self, *args, **kwargs):
if (not self.adapter.sequence_support):
raise (('%s adapter does not support sequences' % self.adapter))
return (self, *args)
return inner
def __init__(self, adapter, database, threadlocals=False, autocommit=True, **connect_kwargs):
self.adapter = adapter
(database)
if threadlocals:
self.__local = ()
else:
self.__local = ('DummyLocal', (object,), {})
self._conn_lock = ()
self.autocommit = autocommit
def init(self, database, **connect_kwargs):
self.deferred = (database is None)
self.database = database
self.connect_kwargs = connect_kwargs
def connect(self):
with self._conn_lock:
if self.deferred:
raise ('Error, database not properly initialized before opening connection')
self.__local.conn = (self.database)
self.__local.closed = False
def close(self):
with self._conn_lock:
if self.deferred:
raise ('Error, database not properly initialized before closing connection')
(self.__local.conn)
self.__local.closed = True
def get_conn(self):
if ((not (self.__local, 'closed')) or self.__local.closed):
()
return self.__local.conn
def get_cursor(self):
return ()
def execute(self, sql, params=None):
cursor = ()
res = (sql, (params or ()))
if ():
()
((sql, params))
return cursor
def commit(self):
()
def rollback(self):
()
def set_autocommit(self, autocommit):
self.__local.autocommit = autocommit
def get_autocommit(self):
if (not (self.__local, 'autocommit')):
(self.autocommit)
return self.__local.autocommit
def commit_on_success(self, func):
def inner(*args, **kwargs):
orig = ()
(False)
try:
res = (*args)
()
except:
()
raise
else:
return res
finally:
(orig)
return inner
def last_insert_id(self, cursor, model):
if model._meta.auto_increment:
return (cursor, model)
def rows_affected(self, cursor):
return (cursor)
def quote_name(self, name):
return ((self.adapter.quote_char, name, self.adapter.quote_char))
def column_for_field(self, field):
return (())
def column_for_field_type(self, db_field_type):
try:
return ()[db_field_type]
except KeyError:
raise (('Unknown field type: "%s", valid types are: %s' % db_field_type), ((())))
def field_sql(self, field):
return ('%s %s' % ((field.db_column), ()))
def create_table_query(self, model_class, safe, extra=''):
if (model_class._meta.pk_sequence and self.adapter.sequence_support):
if (not (model_class._meta.pk_sequence)):
(model_class._meta.pk_sequence)
framing = ((safe and 'CREATE TABLE IF NOT EXISTS %s (%s)%s;') or 'CREATE TABLE %s (%s)%s;')
columns = []
for field in ():
((field))
if extra:
extra = (' ' + extra)
table = (model_class._meta.db_table)
return (framing % (table, (columns), extra))
def create_table(self, model_class, safe=False, extra=''):
((model_class, safe, extra))
def create_index_query(self, model_class, field_name, unique):
framing = 'CREATE %(unique)s INDEX %(index)s ON %(table)s(%(field)s);'
if (field_name not in model_class._meta.fields):
raise (('Field %s not on model %s' % (field_name, model_class)))
field_obj = model_class._meta.fields[field_name]
db_table = model_class._meta.db_table
index_name = (('%s_%s' % (db_table, field_obj.db_column)))
unique_expr = (unique, 'UNIQUE', '')
return (framing % {'unique': unique_expr, 'index': index_name, 'table': (db_table), 'field': (field_obj.db_column)})
def create_index(self, model_class, field_name, unique=False):
((model_class, field_name, unique))
def create_foreign_key(self, model_class, field):
return (model_class, field.name, field.unique)
def drop_table(self, model_class, fail_silently=False):
framing = ((fail_silently and 'DROP TABLE IF EXISTS %s;') or 'DROP TABLE %s;')
((framing % (model_class._meta.db_table)))
def add_column_sql(self, model_class, field_name):
field = model_class._meta.fields[field_name]
return ('ALTER TABLE %s ADD COLUMN %s' % ((model_class._meta.db_table), (field)))
def rename_column_sql(self, model_class, field_name, new_name):
field = model_class._meta.fields[field_name]
return ('ALTER TABLE %s RENAME COLUMN %s TO %s' % ((model_class._meta.db_table), (field.db_column), (new_name)))
def drop_column_sql(self, model_class, field_name):
field = model_class._meta.fields[field_name]
return ('ALTER TABLE %s DROP COLUMN %s' % ((model_class._meta.db_table), (field.db_column)))
@require_sequence_support
def create_sequence(self, sequence_name):
return (('CREATE SEQUENCE %s;' % (sequence_name)))
@require_sequence_support
def drop_sequence(self, sequence_name):
return (('DROP SEQUENCE %s;' % (sequence_name)))
def get_indexes_for_table(self, table):
raise NotImplementedError
def get_tables(self):
raise NotImplementedError
def sequence_exists(self, sequence):
raise NotImplementedError
def transaction(self):
return (self)
class SqliteDatabase(Database):
def __init__(self, database, **connect_kwargs):
((), database)
def get_indexes_for_table(self, table):
res = (('PRAGMA index_list(%s);' % (table)))
rows = ([(r[1], (r[2] == 1)) for r in ()])
return rows
def get_tables(self):
res = ('select name from sqlite_master where type="table" order by name')
return [r[0] for r in ()]
def drop_column_sql(self, model_class, field_name):
raise ('Sqlite3 does not have direct support for dropping columns')
def rename_column_sql(self, model_class, field_name, new_name):
raise ('Sqlite3 does not have direct support for renaming columns')
class PostgresqlDatabase(Database):
def __init__(self, database, **connect_kwargs):
((), database)
def get_indexes_for_table(self, table):
res = ('\n SELECT c2.relname, i.indisprimary, i.indisunique\n FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index i\n WHERE c.relname = %s AND c.oid = i.indrelid AND i.indexrelid = c2.oid\n ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname', (table,))
return ([(r[0], r[1]) for r in ()])
def get_tables(self):
res = ("\n SELECT c.relname\n FROM pg_catalog.pg_class c\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n WHERE c.relkind IN ('r', 'v', '')\n AND n.nspname NOT IN ('pg_catalog', 'pg_toast')\n AND pg_catalog.pg_table_is_visible(c.oid)\n ORDER BY c.relname")
return [row[0] for row in ()]
def sequence_exists(self, sequence):
res = ("\n SELECT COUNT(*)\n FROM pg_class, pg_namespace\n WHERE relkind='S'\n AND pg_class.relnamespace = pg_namespace.oid\n AND relname=%s", (sequence,))
return (()[0])
def set_search_path(self, *search_path):
path_params = ((['%s'] * (search_path)))
(('SET search_path TO %s' % path_params), search_path)
class MySQLDatabase(Database):
def __init__(self, database, **connect_kwargs):
((), database)
def create_foreign_key(self, model_class, field):
framing = '\n ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s\n FOREIGN KEY (%(field)s) REFERENCES %(to)s(%(to_field)s)%(cascade)s;\n '
db_table = model_class._meta.db_table
constraint = ('fk_%s_%s_%s' % (db_table, field.to._meta.db_table, field.db_column))
query = (framing % {'table': (db_table), 'constraint': (constraint), 'field': (field.db_column), 'to': (field.to._meta.db_table), 'to_field': (field.to._meta.pk_col), 'cascade': (' ON DELETE CASCADE' if field.cascade else '')})
(query)
return (model_class, field)
def rename_column_sql(self, model_class, field_name, new_name):
field = model_class._meta.fields[field_name]
return ('ALTER TABLE %s CHANGE COLUMN %s %s %s' % ((model_class._meta.db_table), (field.db_column), (new_name), ()))
def get_indexes_for_table(self, table):
res = (('SHOW INDEXES IN %s;' % (table)))
rows = ([(r[2], (r[1] == 0)) for r in ()])
return rows
def get_tables(self):
res = ('SHOW TABLES;')
return [r[0] for r in ()]
class transaction(object):
def __init__(self, db):
self.db = db
def __enter__(self):
self._orig = ()
(False)
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type:
()
else:
()
(self._orig)
class QueryResultWrapper(object):
'\n Provides an iterator over the results of a raw Query, additionally doing\n two things:\n - converts rows from the database into model instances\n - ensures that multiple iterations do not result in multiple queries\n '
def __init__(self, model, cursor, meta=None):
self.model = model
self.cursor = cursor
self.query_meta = (meta or {})
self.column_meta = ('columns')
self.join_meta = ('graph')
self.__ct = 0
self.__idx = 0
self._result_cache = []
self._populated = False
def model_from_rowset(self, model_class, attr_dict):
instance = ()
for (attr, value) in ():
if (attr in instance._meta.columns):
field = instance._meta.columns[attr]
(instance, field.name, (value))
else:
(instance, attr, value)
return instance
def _row_to_dict(self, row):
return (((self.cursor.description[i][0], value) for (i, value) in (row)))
def construct_instance(self, row):
if (not self.column_meta):
row_dict = (row)
return (self.model, row_dict)
else:
collected_models = {}
for (i, (model, col)) in (self.column_meta):
value = row[i]
if (col, tuple):
if ((col) == 3):
model = self.model
col_name = attr = col[2]
else:
(col_name, attr) = col
else:
col_name = attr = col
if (model not in collected_models):
collected_models[model] = ()
instance = collected_models[model]
if (col_name in instance._meta.columns):
field = instance._meta.columns[col_name]
(instance, field.name, (value))
else:
(instance, attr, value)
return (self.join_meta, collected_models, self.model)
def follow_joins(self, joins, collected_models, current):
inst = collected_models[current]
if (current not in joins):
return inst
for (joined_model, _, _) in joins[current]:
if (joined_model in collected_models):
joined_inst = (joins, collected_models, joined_model)
fk_field = (joined_model)
if (not fk_field):
continue
if (not ()):
((inst, fk_field.id_storage))
(inst, fk_field.name, joined_inst)
(inst, fk_field.id_storage, ())
return inst
def __iter__(self):
self.__idx = 0
if (not self._populated):
return self
else:
return (self._result_cache)
def first(self):
try:
self.__idx = 0
inst = (self)
except StopIteration:
inst = None
self.__idx = 0
return inst
def fill_cache(self):
if (not self._populated):
idx = self.__idx
self.__idx = self.__ct
for x in self:
pass
self.__idx = idx
def iterate(self):
row = ()
if row:
return (row)
else:
self._populated = True
raise StopIteration
def iterator(self):
while 1:
(yield ())
def __next__(self):
if (self.__idx < self.__ct):
inst = self._result_cache[self.__idx]
self.__idx += 1
return inst
instance = ()
()
(instance)
self.__ct += 1
self.__idx += 1
return instance
class DoesNotExist(Exception):
pass
def asc(f):
return (f, 'ASC')
def desc(f):
return (f, 'DESC')
def Count(f, alias='count'):
return ('COUNT', f, alias)
def Max(f, alias='max'):
return ('MAX', f, alias)
def Min(f, alias='min'):
return ('MIN', f, alias)
def Sum(f, alias='sum'):
return ('SUM', f, alias)
def returns_clone(func):
def inner(self, *args, **kwargs):
clone = ()
res = (clone, *args)
return clone
return inner
ternary = (lambda cond, t, f: ((cond and [t]) or [f])[0])
class Node(object):
def __init__(self, connector='AND', children=None):
self.connector = connector
self.children = (children or [])
self.negated = False
def connect(self, rhs, connector):
if (rhs, Leaf):
if (connector == self.connector):
(rhs)
return self
else:
p = (connector)
p.children = [self, rhs]
return p
elif (rhs, Node):
p = (connector)
p.children = [self, rhs]
return p
def __or__(self, rhs):
return (rhs, 'OR')
def __and__(self, rhs):
return (rhs, 'AND')
def __invert__(self):
self.negated = (not self.negated)
return self
def __bool__(self):
return (self.children)
def __unicode__(self):
query = []
nodes = []
for child in self.children:
if (child, Q):
((child))
elif (child, Node):
(('(%s)' % (child)))
(nodes)
connector = (' %s ' % self.connector)
query = (query)
if self.negated:
query = ('NOT %s' % query)
return query
class Leaf(object):
def __init__(self):
self.parent = None
def connect(self, connector):
if (self.parent is None):
self.parent = (connector)
(self)
def __or__(self, rhs):
('OR')
return (self.parent | rhs)
def __and__(self, rhs):
('AND')
return (self.parent & rhs)
def __invert__(self):
self.negated = (not self.negated)
return self
class Q(Leaf):
def __init__(self, _model=None, **kwargs):
self.model = _model
self.query = kwargs
self.negated = False
()
def __unicode__(self):
bits = [('%s = %s' % (k, v)) for (k, v) in (())]
if (((())) > 1):
connector = ' AND '
expr = ('(%s)' % (bits))
else:
expr = bits[0]
if self.negated:
expr = ('NOT %s' % expr)
return expr
class F(object):
def __init__(self, field, model=None):
self.field = field
self.model = model
self.op = None
self.value = None
def __add__(self, rhs):
self.op = '+'
self.value = rhs
return self
def __sub__(self, rhs):
self.op = '-'
self.value = rhs
return self
class R(Leaf):
def __init__(self, *params):
self.params = params
()
def sql_select(self):
if ((self.params) == 2):
return self.params
else:
raise ('Incorrect number of argument provided for R() expression')
def sql_where(self):
return (self.params[0], self.params[1:])
def apply_model(model, item):
'\n Q() objects take a model, which provides context for the keyword arguments.\n In this way Q() objects can be mixed across models. The purpose of this\n function is to recurse into a query datastructure and apply the given model\n to all Q() objects that do not have a model explicitly set.\n '
if (item, Node):
for child in item.children:
(model, child)
elif (item, Q):
if (item.model is None):
item.model = model
def parseq(model, *args, **kwargs):
'\n Convert any query into a single Node() object -- used to build up the list\n of where clauses when querying.\n '
node = ()
for piece in args:
(model, piece)
if (piece, (Q, R, Node)):
(piece)
else:
raise ('Unknown object: %s', piece)
if kwargs:
((model))
return node
def find_models(item):
'\n Utility function to find models referenced in a query and return a set()\n containing them. This function is used to generate the list of models that\n are part of a where clause.\n '
seen = ()
if (item, Node):
for child in item.children:
((child))
elif (item, Q):
(item.model)
return seen
class EmptyResultException(Exception):
pass
class BaseQuery(object):
query_separator = '__'
force_alias = False
def __init__(self, model):
self.model = model
self.query_context = model
self.database = self.model._meta.database
self.operations = self.database.adapter.operations
self.interpolation = self.database.adapter.interpolation
self._dirty = True
self._where = []
self._where_models = ()
self._joins = {}
self._joined_models = ()
self._table_alias = {}
def _clone_dict_graph(self, dg):
cloned = {}
for (node, edges) in (()):
cloned[node] = (edges)
return cloned
def clone_where(self):
return (self._where)
def clone_joins(self):
return (self._joins)
def clone(self):
raise NotImplementedError
def qn(self, name):
return (name)
def lookup_cast(self, lookup, value):
return (lookup, value)
def parse_query_args(self, _model, **query):
'\n Parse out and normalize clauses in a query. The query is composed of\n various column+lookup-type/value pairs. Validates that the lookups\n are valid and returns a list of lookup tuples that have the form:\n (field name, (operation, value))\n '
model = _model
parsed = []
for (lhs, rhs) in ():
if (self.query_separator in lhs):
(lhs, op) = (self.query_separator, 1)
else:
op = 'eq'
if (lhs in model._meta.columns):
lhs = model._meta.columns[lhs].name
try:
field = (lhs)
except AttributeError:
field = (lhs)
if (field is None):
raise
if (rhs, R):
(expr, params) = ()
lookup_value = [(o) for o in params]
combined_expr = (self.operations[op] % expr)
operation = (combined_expr % ((self.interpolation for p in params)))
elif (rhs, F):
lookup_value = rhs
operation = self.operations[op]
elif (op == 'in'):
if (rhs, SelectQuery):
lookup_value = rhs
operation = 'IN (%s)'
else:
if (not rhs):
raise EmptyResultException
lookup_value = [(o) for o in rhs]
operation = (self.operations[op] % ([self.interpolation for v in lookup_value]))
elif (op == 'is'):
if (rhs is not None):
raise ('__is lookups only accept None')
operation = 'IS NULL'
lookup_value = []
elif (op == 'isnull'):
operation = ('IS NULL' if rhs else 'IS NOT NULL')
lookup_value = []
elif (rhs, (list, tuple)):
lookup_value = [(o) for o in rhs]
operation = (self.operations[op] % ((self.interpolation for v in lookup_value)))
else:
lookup_value = (rhs)
operation = (self.operations[op] % self.interpolation)
((field.db_column, (operation, (op, lookup_value))))
return parsed
@returns_clone
def where(self, *args, **kwargs):
parsed = (self.query_context, *args)
if parsed:
(parsed)
((parsed))
@returns_clone
def join(self, model, join_type=None, on=None, alias=None):
if (model):
(model)
(self.query_context, [])
((model, join_type, on))
if alias:
self._table_alias[model] = alias
self.query_context = model
else:
raise (('No foreign key found between %s and %s' % (self.query_context.__name__, model.__name__)))
@returns_clone
def switch(self, model):
if (model == self.model):
self.query_context = model
return
if (model in self._joined_models):
self.query_context = model
return
raise (('You must JOIN on %s' % model.__name__))
def use_aliases(self):
return (((self._joined_models) > 0) or self.force_alias)
def combine_field(self, alias, field_col):
quoted = (field_col)
if alias:
return ('%s.%s' % (alias, quoted))
return quoted
def safe_combine(self, model, alias, col):
if (col in model._meta.columns):
return (alias, col)
elif (col in model._meta.fields):
return (alias, model._meta.fields[col].db_column)
return col
def follow_joins(self, current, alias_map, alias_required, alias_count, seen=None):
computed = []
seen = (seen or ())
if (current not in self._joins):
return computed
for (i, (model, join_type, on)) in (self._joins[current]):
(model)
if alias_required:
if (model in self._table_alias):
alias_map[model] = self._table_alias[model]
else:
alias_count += 1
alias_map[model] = ('t%d' % alias_count)
else:
alias_map[model] = ''
from_model = current
field = (model, on)
if field:
left_field = field.db_column
right_field = model._meta.pk_col
else:
field = (model, on)
left_field = from_model._meta.pk_col
right_field = field.db_column
if (join_type is None):
if (field.null and (model not in self._where_models)):
join_type = 'LEFT OUTER'
else:
join_type = 'INNER'
(('%s JOIN %s AS %s ON %s = %s' % (join_type, (model._meta.db_table), alias_map[model], (alias_map[from_model], left_field), (alias_map[model], right_field))))
((model, alias_map, alias_required, alias_count, seen))
return computed
def compile_where(self):
alias_count = 0
alias_map = {}
alias_required = ()
if alias_required:
if (self.model in self._table_alias):
alias_map[self.model] = self._table_alias[self.model]
else:
alias_count += 1
alias_map[self.model] = ('t%d' % alias_count)
else:
alias_map[self.model] = ''
computed_joins = (self.model, alias_map, alias_required, alias_count)
clauses = [(node, alias_map) for node in self._where]
return (computed_joins, clauses, alias_map)
def flatten_clauses(self, clauses):
where_with_alias = []
where_data = []
for (query, data) in clauses:
(query)
(data)
return (where_with_alias, where_data)
def convert_where_to_params(self, where_data):
flattened = []
for clause in where_data:
if (clause, (tuple, list)):
(clause)
else:
(clause)
return flattened
def parse_node(self, node, alias_map):
query = []
query_data = []
for child in node.children:
if (child, Q):
(parsed, data) = (child, alias_map)
(parsed)
(data)
elif (child, R):
(parsed, data) = (child, alias_map)
((parsed % ((self.interpolation for o in data))))
(data)
elif (child, Node):
(parsed, data) = (child, alias_map)
(('(%s)' % parsed))
(data)
connector = (' %s ' % node.connector)
query = (query)
if node.negated:
query = ('NOT (%s)' % query)
return (query, query_data)
def parse_q(self, q, alias_map):
model = (q.model or self.model)
query = []
query_data = []
parsed = (model)
for (name, lookup) in parsed:
(operation, value) = lookup
if (value, SelectQuery):
(sql, value) = (value)
operation = (operation % sql)
if (value, F):
f_model = (value.model or model)
operation = (operation % (value, f_model, alias_map))
else:
(value)
combined = (alias_map[model], name)
(('%s %s' % (combined, operation)))
if ((query) > 1):
query = ('(%s)' % (query))
else:
query = query[0]
if q.negated:
query = ('NOT %s' % query)
return (query, query_data)
def parse_f(self, f_object, model, alias_map):
combined = (alias_map[model], f_object.field)
if (f_object.op is not None):
combined = ('(%s %s %s)' % (combined, f_object.op, f_object.value))
return combined
def parse_r(self, r_object, alias_map):
return ()
def convert_subquery(self, subquery):
orig_query = subquery.query
if (subquery.query == '*'):
subquery.query = subquery.model._meta.pk_name
(subquery.force_alias, orig_alias) = (True, subquery.force_alias)
(sql, data) = ()
subquery.query = orig_query
subquery.force_alias = orig_alias
return (sql, data)
def sorted_models(self, alias_map):
return [(model, alias) for (model, alias) in ((()))]
def sql(self):
raise NotImplementedError
def execute(self):
raise NotImplementedError
def raw_execute(self, query, params):
return (query, params)
class RawQuery(BaseQuery):
def __init__(self, model, query, *params):
self._sql = query
self._params = (params)
(model)
def clone(self):
return (self.model, self._sql, *self._params)
def sql(self):
return (self._sql, self._params)
def execute(self):
return (self.model, (*()))
def join(self):
raise ('Raw queries do not support joining programmatically')
def where(self):
raise ('Raw queries do not support querying programmatically')
def switch(self):
raise ('Raw queries do not support switching contexts')
def __iter__(self):
return (())
class SelectQuery(BaseQuery):
def __init__(self, model, query=None):
self.query = (query or '*')
self._group_by = []
self._having = []
self._order_by = []
self._limit = None
self._offset = None
self._distinct = False
self._qr = None
self._for_update = False
self._naive = False
(model)
def clone(self):
query = (self.model, self.query)
query.query_context = self.query_context
query._group_by = (self._group_by)
query._having = (self._having)
query._order_by = (self._order_by)
query._limit = self._limit
query._offset = self._offset
query._distinct = self._distinct
query._qr = self._qr
query._for_update = self._for_update
query._naive = self._naive
query._where = ()
query._where_models = (self._where_models)
query._joined_models = ()
query._joins = ()
query._table_alias = (self._table_alias)
return query
@returns_clone
def paginate(self, page, paginate_by=20):
if (page > 0):
page -= 1
self._limit = paginate_by
self._offset = (page * paginate_by)
@returns_clone
def limit(self, num_rows):
self._limit = num_rows
@returns_clone
def offset(self, num_rows):
self._offset = num_rows
@returns_clone
def for_update(self, for_update=True):
self._for_update = for_update
def count(self):
if (self._distinct or self._group_by):
return ()
clone = ()
clone._limit = clone._offset = None
if ():
clone.query = ('COUNT(t1.%s)' % clone.model._meta.pk_col)
else:
clone.query = ('COUNT(%s)' % clone.model._meta.pk_col)
res = (*())
return (() or [0])[0]
def wrapped_count(self):
clone = ()
clone._limit = clone._offset = None
(sql, params) = ()
query = ('SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql)
res = (query, params)
return ()[0]
@returns_clone
def group_by(self, *clauses):
model = self.query_context
for clause in clauses:
if (clause, str):
fields = (clause,)
elif (clause, (list, tuple)):
fields = clause
elif (clause, Model):
model = clause
fields = ()
((model, fields))
@returns_clone
def having(self, *clauses):
self._having = clauses
@returns_clone
def distinct(self):
self._distinct = True
@returns_clone
def order_by(self, *clauses):
order_by = []
for clause in clauses:
if (clause, tuple):
if ((clause) == 3):
(model, field, ordering) = clause
elif ((clause) == 2):
if (clause[0], str):
model = self.query_context
(field, ordering) = clause
else:
(model, field) = clause
ordering = 'ASC'
else:
raise ('Incorrect arguments passed in order_by clause')
else:
model = self.query_context
field = clause
ordering = 'ASC'
((model, field, ordering))
self._order_by = order_by
def exists(self):
clone = (1, 1)
clone.query = '(1) AS a'
curs = (*())
return (())
def get(self, *args, **kwargs):
orig_ctx = self.query_context
self.query_context = self.model
query = (1, 1)
try:
obj = (())
return obj
except StopIteration:
raise (('instance matching query does not exist:\nSQL: %s\nPARAMS: %s' % ()))
finally:
self.query_context = orig_ctx
def filter(self, *args, **kwargs):
return (self, *args)
def annotate(self, related_model, aggregation=None):
return (self, related_model, aggregation)
def aggregate(self, func):
clone = ()
clone.query = [func]
curs = (*())
return ()[0]
@returns_clone
def naive(self, make_naive=True):
self._naive = make_naive
def parse_select_query(self, alias_map):
q = self.query
if (q, (list, tuple)):
q = {self.model: self.query}
elif (q, str):
if (q == '*'):
q = {self.model: ()}
elif (q in (self.model._meta.pk_col, self.model._meta.pk_name)):
q = {self.model: [self.model._meta.pk_name]}
else:
return (q, [])
if (not (q, dict)):
raise ('Unknown type encountered parsing select query')
sorted_models = (alias_map)
columns = []
model_cols = []
for (model, alias) in sorted_models:
if (model not in q):
continue
if ('*' in q[model]):
idx = ('*')
q[model] = ((q[model][:idx] + ()) + q[model][(idx + 1):])
for clause in q[model]:
if (clause, R):
clause = ()
if (clause, tuple):
if ((clause) == 3):
(func, col_name, col_alias) = clause
column = (col_name)
(('%s(%s) AS %s' % (func, (model, alias, column), col_alias)))
((model, (func, column, col_alias)))
elif ((clause) == 2):
(col_name, col_alias) = clause
column = (col_name)
(('%s AS %s' % ((model, alias, column), col_alias)))
((model, (column, col_alias)))
else:
raise ('Clause must be either a 2- or 3-tuple')
else:
column = (clause)
((model, alias, column))
((model, column))
return ((columns), model_cols)
def sql_meta(self):
(joins, clauses, alias_map) = ()
(where, where_data) = (clauses)
table = (self.model._meta.db_table)
params = []
group_by = []
use_aliases = ()
if use_aliases:
table = ('%s AS %s' % (table, alias_map[self.model]))
for (model, clause) in self._group_by:
if use_aliases:
alias = alias_map[model]
else:
alias = ''
for field in clause:
((model, alias, field))
(parsed_query, model_cols) = (alias_map)
query_meta = {'columns': model_cols, 'graph': self._joins}
if self._distinct:
sel = 'SELECT DISTINCT'
else:
sel = 'SELECT'
select = ('%s %s FROM %s' % (sel, parsed_query, table))
joins = (joins)
where = (where)
group_by = (group_by)
having = (self._having)
order_by = []
for piece in self._order_by:
(model, field, ordering) = piece
if use_aliases:
alias = alias_map[model]
else:
alias = ''
(('%s %s' % ((model, alias, field), ordering)))
pieces = [select]
if joins:
(joins)
if where:
(('WHERE %s' % where))
((where_data))
if group_by:
(('GROUP BY %s' % group_by))
if having:
(('HAVING %s' % having))
if order_by:
(('ORDER BY %s' % (order_by)))
if self._limit:
(('LIMIT %d' % self._limit))
if self._offset:
(('OFFSET %d' % self._offset))
if (self._for_update and self.database.adapter.for_update_support):
('FOR UPDATE')
return ((pieces), params, query_meta)
def sql(self):
(query, params, meta) = ()
return (query, params)
def execute(self):
if (self._dirty or (not self._qr)):
try:
(sql, params, meta) = ()
except EmptyResultException:
return []
else:
if self._naive:
meta = None
self._qr = (self.model, (sql, params), meta)
self._dirty = False
return self._qr
else:
return self._qr
def __iter__(self):
return (())
class UpdateQuery(BaseQuery):
def __init__(self, _model, **kwargs):
self.update_query = kwargs
(_model)
def clone(self):
query = (self.model)
query._where = ()
query._where_models = (self._where_models)
query._joined_models = ()
query._joins = ()
query._table_alias = (self._table_alias)
return query
def parse_update(self):
sets = {}
for (k, v) in ():
if (k in self.model._meta.columns):
k = self.model._meta.columns[k].name
try:
field = (k)
except AttributeError:
field = (k)
if (field is None):
raise
if (not (v, F)):
v = (v)
sets[field.db_column] = v
return sets
def sql(self):
(joins, clauses, alias_map) = ()
(where, where_data) = (clauses)
set_statement = ()
params = []
update_params = []
alias = (self.model)
for (k, v) in ((())):
if (v, F):
value = (v, (v.model or self.model), alias_map)
else:
(v)
value = self.interpolation
(('%s=%s' % ((alias, k), value)))
update = ('UPDATE %s SET %s' % ((self.model._meta.db_table), (update_params)))
where = (where)
pieces = [update]
if where:
(('WHERE %s' % where))
((where_data))
return ((pieces), params)
def join(self, *args, **kwargs):
raise ('Update queries do not support JOINs in sqlite')
def execute(self):
result = (*())
return (result)
class DeleteQuery(BaseQuery):
def clone(self):
query = (self.model)
query._where = ()
query._where_models = (self._where_models)
query._joined_models = ()
query._joins = ()
query._table_alias = (self._table_alias)
return query
def sql(self):
(joins, clauses, alias_map) = ()
(where, where_data) = (clauses)
params = []
delete = ('DELETE FROM %s' % (self.model._meta.db_table))
where = (where)
pieces = [delete]
if where:
(('WHERE %s' % where))
((where_data))
return ((pieces), params)
def join(self, *args, **kwargs):
raise ('Update queries do not support JOINs in sqlite')
def execute(self):
result = (*())
return (result)
class InsertQuery(BaseQuery):
def __init__(self, _model, **kwargs):
self.insert_query = kwargs
(_model)
def parse_insert(self):
cols = []
vals = []
for (k, v) in ((())):
if (k in self.model._meta.columns):
k = self.model._meta.columns[k].name
try:
field = (k)
except AttributeError:
field = (k)
if (field is None):
raise
((field.db_column))
((v))
return (cols, vals)
def sql(self):
(cols, vals) = ()
insert = ('INSERT INTO %s (%s) VALUES (%s)' % ((self.model._meta.db_table), (cols), ((self.interpolation for v in vals))))
return (insert, vals)
def where(self, *args, **kwargs):
raise ('Insert queries do not support WHERE clauses')
def join(self, *args, **kwargs):
raise ('Insert queries do not support JOINs')
def execute(self):
result = (*())
return (result, self.model)
def model_or_select(m_or_q):
'\n Return both a model and a select query for the provided model *OR* select\n query.\n '
if (m_or_q, BaseQuery):
return (m_or_q.model, m_or_q)
else:
return (m_or_q, ())
def convert_lookup(model, joins, lookup):
'\n Given a model, a graph of joins, and a lookup, return a tuple containing\n a normalized lookup:\n\n (model actually being queried, updated graph of joins, normalized lookup)\n '
operations = model._meta.database.adapter.operations
pieces = ('__')
operation = None
query_model = model
if ((pieces) > 1):
if (pieces[(- 1)] in operations):
operation = ()
lookup = ()
if (pieces):
for piece in pieces:
joined_model = None
for field in ():
if (not (field, ForeignKeyField)):
continue
if (piece in (field.name, field.db_column, field.related_name)):
joined_model = field.to
if (not joined_model):
try:
joined_model = query_model._meta.reverse_relations[piece]
except KeyError:
raise (('Unknown relation: "%s" of "%s"' % (piece, query_model)))
(query_model, ())
(joined_model)
query_model = joined_model
if operation:
lookup = ('%s__%s' % (lookup, operation))
return (query_model, joins, lookup)
def filter_query(model_or_query, *args, **kwargs):
'\n Provide a django-like interface for executing queries\n '
(model, select_query) = (model_or_query)
query = {}
joins = {}
def fix_q(node_or_q, joins):
if (node_or_q, Node):
for child in node_or_q.children:
(child, joins)
elif (node_or_q, Q):
new_query = {}
curr_model = (node_or_q.model or model)
for (raw_lookup, value) in (()):
(query_model, joins, lookup) = (curr_model, joins, raw_lookup)
new_query[lookup] = value
node_or_q.model = query_model
node_or_q.query = new_query
for node_or_q in args:
(node_or_q, joins)
for (raw_lookup, value) in (()):
(queried_model, joins, lookup) = (model, joins, raw_lookup)
(queried_model, [])
((lookup, value))
def follow_joins(current, query):
if (current in joins):
for joined_model in joins[current]:
query = (current)
if (joined_model not in query._joined_models):
query = (joined_model)
query = (joined_model, query)
return query
select_query = (model, select_query)
for node in args:
select_query = (node)
for (model, lookups) in (()):
(qargs, qkwargs) = ([], {})
for lookup in lookups:
if (lookup, tuple):
qkwargs[lookup[0]] = lookup[1]
else:
(lookup)
select_query = (*qargs)
return select_query
def annotate_query(select_query, related_model, aggregation):
'\n Perform an aggregation against a related model\n '
aggregation = (aggregation or (related_model._meta.pk_name))
model = select_query.model
select_query = (model)
cols = select_query.query
if (related_model not in select_query._joined_models):
select_query = (model)
if (cols, dict):
selection = cols
group_by = cols[model]
elif (cols, str):
selection = {model: [cols]}
if (cols == '*'):
group_by = model
else:
group_by = [() for col in (',')]
elif (cols, (list, tuple)):
selection = {model: cols}
group_by = cols
else:
raise (('Unknown type passed in to select query: "%s"' % (cols)))
selection[related_model] = [aggregation]
select_query.query = selection
return (group_by)
class Column(object):
db_field = ''
template = '%(column_type)s'
def __init__(self, **attributes):
self.attributes = ()
()
def get_attributes(self):
return {}
def python_value(self, value):
return value
def db_value(self, value):
return value
def render(self, db):
params = {'column_type': (self.db_field)}
(self.attributes)
return (self.template % params)
class VarCharColumn(Column):
db_field = 'string'
template = '%(column_type)s(%(max_length)d)'
def get_attributes(self):
return {'max_length': 255}
def db_value(self, value):
value = ((value or ''))
return value[:self.attributes['max_length']]
class TextColumn(Column):
db_field = 'text'
def db_value(self, value):
return (value or '')
def format_date_time(value, formats, post_process=None):
post_process = (post_process or (lambda x: x))
for fmt in formats:
try:
return ((value, fmt))
except ValueError:
raise
return value
class DateTimeColumn(Column):
db_field = 'datetime'
def get_attributes(self):
return {'formats': ['%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d']}
def python_value(self, value):
if (value, str):
return (value, self.attributes['formats'])
return value
class DateColumn(Column):
db_field = 'date'
def get_attributes(self):
return {'formats': ['%Y-%m-%d', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f']}
def python_value(self, value):
if (value, str):
pp = (lambda x: ())
return (value, self.attributes['formats'], pp)
elif (value, datetime.datetime):
return ()
return value
class TimeColumn(Column):
db_field = 'time'
def get_attributes(self):
return {'formats': ['%H:%M:%S.%f', '%H:%M:%S', '%H:%M', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S']}
def python_value(self, value):
if (value, str):
pp = (lambda x: ())
return (value, self.attributes['formats'], pp)
elif (value, datetime.datetime):
return ()
return value
class IntegerColumn(Column):
db_field = 'integer'
def db_value(self, value):
return (value or 0)
def python_value(self, value):
if (value is not None):
return (value)
class BigIntegerColumn(IntegerColumn):
db_field = 'bigint'
class BooleanColumn(Column):
db_field = 'boolean'
def db_value(self, value):
return (value)
def python_value(self, value):
return (value)
class FloatColumn(Column):
db_field = 'float'
def db_value(self, value):
return (value or 0.0)
def python_value(self, value):
if (value is not None):
return (value)
class DoubleColumn(FloatColumn):
db_field = 'double'
class DecimalColumn(Column):
db_field = 'decimal'
template = '%(column_type)s(%(max_digits)d, %(decimal_places)d)'
def get_attributes(self):
return {'max_digits': 10, 'decimal_places': 5}
def db_value(self, value):
return (value or (0))
def python_value(self, value):
if (value is not None):
if (value, decimal.Decimal):
return value
return ((value))
class PrimaryKeyColumn(Column):
db_field = 'primary_key'
class PrimaryKeySequenceColumn(PrimaryKeyColumn):
db_field = 'primary_key_with_sequence'
class FieldDescriptor(object):
def __init__(self, field):
self.field = field
self._cache_name = ('__%s' % self.field.name)
def __get__(self, instance, instance_type=None):
if instance:
return (instance, self._cache_name, None)
return self.field
def __set__(self, instance, value):
(instance, self._cache_name, value)
class Field(object):
column_class = None
default = None
field_template = '%(column)s%(nullable)s'
_field_counter = 0
_order = 0
def __init__(self, null=False, db_index=False, unique=False, verbose_name=None, help_text=None, db_column=None, default=None, *args, **kwargs):
self.null = null
self.db_index = db_index
self.unique = unique
self.verbose_name = verbose_name
self.help_text = help_text
self.db_column = db_column
self.default = default
self.attributes = kwargs
Field._field_counter += 1
self._order = Field._field_counter
def add_to_class(self, klass, name):
self.name = name
self.model = klass
self.verbose_name = (self.verbose_name or ())
self.db_column = (self.db_column or self.name)
self.column = ()
(klass, name, (self))
def get_column(self):
return ()
def render_field_template(self):
params = {'column': (self.model._meta.database), 'nullable': (self.null, '', ' NOT NULL')}
(self.column.attributes)
return (self.field_template % params)
def db_value(self, value):
if (self.null and (value is None)):
return None
return (value)
def python_value(self, value):
return (value)
def lookup_value(self, lookup_type, value):
return (value)
def class_prepared(self):
pass
class CharField(Field):
column_class = VarCharColumn
class TextField(Field):
column_class = TextColumn
class DateTimeField(Field):
column_class = DateTimeColumn
class DateField(Field):
column_class = DateColumn
class TimeField(Field):
column_class = TimeColumn
class IntegerField(Field):
column_class = IntegerColumn
class BigIntegerField(IntegerField):
column_class = BigIntegerColumn
class BooleanField(IntegerField):
column_class = BooleanColumn
class FloatField(Field):
column_class = FloatColumn
class DoubleField(Field):
column_class = DoubleColumn
class DecimalField(Field):
column_class = DecimalColumn
class PrimaryKeyField(IntegerField):
column_class = PrimaryKeyColumn
field_template = '%(column)s NOT NULL PRIMARY KEY%(nextval)s'
def __init__(self, column_class=None, *args, **kwargs):
if ('null'):
raise ('Primary keys cannot be nullable')
if column_class:
self.column_class = column_class
if ('nextval' not in kwargs):
kwargs['nextval'] = ''
(*args)
def get_column_class(self):
if (self.column_class == PrimaryKeyColumn):
if ((self.model._meta.pk_sequence != None) and self.model._meta.database.adapter.sequence_support):
self.column_class = PrimaryKeySequenceColumn
return self.column_class
def get_column(self):
return ()
class ForeignRelatedObject(object):
def __init__(self, to, field):
self.to = to
self.field = field
self.field_name = self.field.name
self.field_column = self.field.id_storage
self.cache_name = ('_cache_%s' % self.field_name)
def __get__(self, instance, instance_type=None):
if (not instance):
return self.field
if (not (instance, self.cache_name, None)):
id = (instance, self.field_column, 0)
qr = ()
try:
(instance, self.cache_name, ())
except self.to.DoesNotExist:
if (not self.field.null):
raise
return (instance, self.cache_name, None)
def __set__(self, instance, obj):
if (self.field.null and (obj is None)):
(instance, self.field_column, None)
(instance, self.cache_name, None)
elif (not (obj, Model)):
(instance, self.field_column, obj)
else:
if (not (obj, self.to)):
raise (('Cannot assign %s to %s, invalid type' % (obj, self.field.name)))
(instance, self.field_column, ())
(instance, self.cache_name, obj)
class ReverseForeignRelatedObject(object):
def __init__(self, related_model, name):
self.field_name = name
self.related_model = related_model
def __get__(self, instance, instance_type=None):
query = {self.field_name: ()}
qr = ()
return qr
class ForeignKeyField(IntegerField):
field_template = '%(column)s%(nullable)s REFERENCES %(to_table)s (%(to_pk)s)%(cascade)s%(extra)s'
def __init__(self, to, null=False, related_name=None, cascade=False, extra=None, *args, **kwargs):
self.to = to
self._related_name = related_name
self.cascade = cascade
self.extra = extra
({'cascade': (' ON DELETE CASCADE' if self.cascade else ''), 'extra': (self.extra or '')})
(*args)
def add_to_class(self, klass, name):
self.name = name
self.model = klass
self.db_column = (self.db_column or (self.name + '_id'))
if (self.name == self.db_column):
self.id_storage = (self.db_column + '_id')
else:
self.id_storage = self.db_column
if (self.to == 'self'):
self.to = self.model
self.verbose_name = (self.verbose_name or ())
if (self._related_name is not None):
self.related_name = self._related_name
else:
self.related_name = (klass._meta.db_table + '_set')
klass._meta.rel_fields[name] = self.name
(klass, self.name, (self.to, self))
(klass, self.id_storage, None)
reverse_rel = (klass, self.name)
(self.to, self.related_name, reverse_rel)
self.to._meta.reverse_relations[self.related_name] = klass
def lookup_value(self, lookup_type, value):
if (value, Model):
return ()
return (value or None)
def db_value(self, value):
if (value, Model):
return ()
if (self.null and (value is None)):
return None
return (value)
def get_column(self):
to_pk = (self.to._meta.pk_name)
to_col_class = ()
if (to_col_class not in (PrimaryKeyColumn, PrimaryKeySequenceColumn)):
self.column_class = ()
return ()
def class_prepared(self):
({'to_table': self.to._meta.db_table, 'to_pk': self.to._meta.pk_col})
self.column = ()
database = (DATABASE_NAME)
class BaseModelOptions(object):
ordering = None
pk_sequence = None
def __init__(self, model_class, options=None):
options = (options or {'database': database})
for (k, v) in (()):
(self, k, v)
self.rel_fields = {}
self.reverse_relations = {}
self.fields = {}
self.columns = {}
self.model_class = model_class
def get_sorted_fields(self):
return ((()))
def get_field_names(self):
return [f[0] for f in ()]
def get_fields(self):
return [f[1] for f in ()]
def get_field_by_name(self, name):
if (name in self.fields):
return self.fields[name]
raise (('Field named %s not found' % name))
def get_column_names(self):
return (())
def get_column(self, field_or_col):
if (field_or_col in self.fields):
return self.fields[field_or_col].db_column
return field_or_col
def get_related_field_by_name(self, name):
if (name in self.rel_fields):
return self.fields[self.rel_fields[name]]
def get_related_field_for_model(self, model, name=None):
for field in (()):
if ((field, ForeignKeyField) and (field.to == model)):
if ((name is None) or (name == field.name) or (name == field.db_column)):
return field
def get_reverse_related_field_for_model(self, model, name=None):
for field in (()):
if ((field, ForeignKeyField) and (field.to == self.model_class)):
if ((name is None) or (name == field.name) or (name == field.db_column)):
return field
def get_field_for_related_name(self, model, related_name):
for field in (()):
if ((field, ForeignKeyField) and (field.to == self.model_class)):
if (field.related_name == related_name):
return field
def rel_exists(self, model):
return ((model) or (model))
class BaseModel(type):
inheritable_options = ['database', 'ordering', 'pk_sequence']
def __new__(cls, name, bases, attrs):
cls = (cls, name, bases, attrs)
if (not bases):
return cls
attr_dict = {}
meta = ('Meta', None)
if meta:
attr_dict = meta.__dict__
for b in bases:
base_meta = (b, '_meta', None)
if (not base_meta):
continue
for (k, v) in (()):
if ((k in cls.inheritable_options) and (k not in attr_dict)):
attr_dict[k] = v
elif (k == 'fields'):
for (field_name, field_obj) in (()):
if (field_obj, PrimaryKeyField):
continue
if (field_name in cls.__dict__):
continue
field_copy = (field_obj)
(cls, field_name, field_copy)
_meta = (cls, attr_dict)
if (not (_meta, 'db_table')):
_meta.db_table = ('[^\\w]+', '_', ())
if (_meta.db_table in _meta.database.adapter.reserved_tables):
(('Table for %s ("%s") is reserved, please override using Meta.db_table' % (cls, _meta.db_table)))
(cls, '_meta', _meta)
_meta.pk_name = None
for (name, attr) in (()):
if (attr, Field):
(cls, name)
_meta.fields[attr.name] = attr
_meta.columns[attr.db_column] = attr
if (attr, PrimaryKeyField):
_meta.pk_name = attr.name
if (_meta.pk_name is None):
_meta.pk_name = 'id'
pk = ()
(cls, _meta.pk_name)
_meta.fields[_meta.pk_name] = pk
_meta.model_name = cls.__name__
pk_field = _meta.fields[_meta.pk_name]
pk_col = pk_field.column
if (_meta.pk_sequence and _meta.database.adapter.sequence_support):
pk_col.attributes['nextval'] = (" default nextval('%s')" % _meta.pk_sequence)
_meta.pk_col = pk_field.db_column
_meta.auto_increment = (pk_col, PrimaryKeyColumn)
for field in (()):
()
if (cls, '__unicode__'):
(cls, '__repr__', (lambda self: ('<%s: %r>' % (_meta.model_name, ()))))
exception_class = (('%sDoesNotExist' % _meta.model_name), (DoesNotExist,), {})
cls.DoesNotExist = exception_class
return cls
class Model(object, metaclass=BaseModel):
def __init__(self, *args, **kwargs):
()
for (k, v) in (()):
(self, k, v)
def initialize_defaults(self):
for field in (()):
if (field.default is not None):
if (field.default):
field_value = ()
else:
field_value = field.default
(self, field.name, field_value)
def prepared(self):
pass
def __eq__(self, other):
return ((other.__class__ == self.__class__) and () and (() == ()))
def get_field_dict(self):
field_dict = {}
for field in (()):
if (field, ForeignKeyField):
field_dict[field.name] = (self, field.id_storage)
else:
field_dict[field.name] = (self, field.name)
return field_dict
@classmethod
def table_exists(cls):
return (cls._meta.db_table in ())
@classmethod
def create_table(cls, fail_silently=False, extra=''):
if (fail_silently and ()):
return
(cls)
for (field_name, field_obj) in (()):
if (field_obj, ForeignKeyField):
(cls, field_obj)
elif (field_obj.db_index or field_obj.unique):
(cls, field_obj.name, field_obj.unique)
@classmethod
def drop_table(cls, fail_silently=False):
(cls, fail_silently)
@classmethod
def filter(cls, *args, **kwargs):
return (cls, *args)
@classmethod
def select(cls, query=None):
select_query = (cls, query)
if cls._meta.ordering:
select_query = (*cls._meta.ordering)
return select_query
@classmethod
def update(cls, **query):
return (cls)
@classmethod
def insert(cls, **query):
return (cls)
@classmethod
def delete(cls, **query):
return (cls)
@classmethod
def raw(cls, sql, *params):
return (cls, sql, *params)
@classmethod
def create(cls, **query):
inst = ()
()
return inst
@classmethod
def get_or_create(cls, **query):
try:
inst = ()
except cls.DoesNotExist:
inst = ()
return inst
@classmethod
def get(cls, *args, **kwargs):
return (*args)
def get_pk(self):
return (self, self._meta.pk_name, None)
def set_pk(self, pk):
pk_field = self._meta.fields[self._meta.pk_name]
(self, self._meta.pk_name, (pk))
def save(self, force_insert=False):
field_dict = ()
if (() and (not force_insert)):
(self._meta.pk_name)
update = ()
()
else:
if self._meta.auto_increment:
(self._meta.pk_name)
insert = ()
new_pk = ()
if self._meta.auto_increment:
(self, self._meta.pk_name, new_pk)
@classmethod
def collect_models(cls, accum=None):
accum = (accum or [])
models = []
for (related_name, rel_model) in (()):
rel_field = (rel_model, related_name)
coll = ([(rel_model, rel_field.name, rel_field.null)] + accum)
if (not rel_field.null):
((coll))
(coll)
return models
def collect_queries(self):
select_queries = []
nullable_queries = []
collected_models = ()
if collected_models:
for model_joins in collected_models:
depth = (model_joins)
(base, last, nullable) = model_joins[0]
query = ([base._meta.pk_name])
for (model, join, _) in model_joins[1:]:
query = (model)
last = join
query = ()
if nullable:
((query, last, depth))
else:
((query, last, depth))
return (select_queries, nullable_queries)
def delete_instance(self, recursive=False):
if recursive:
(select_queries, nullable_queries) = ()
for (query, fk_field, depth) in select_queries:
model = query.model
if (not self._meta.database.adapter.subquery_delete_same_table):
query = [() for obj in query]
if (not query):
continue
()
for (query, fk_field, depth) in nullable_queries:
model = query.model
if (not self._meta.database.adapter.subquery_delete_same_table):
query = [() for obj in query]
if (not query):
continue
()
return ()
def refresh(self, *fields):
fields = (fields or ())
obj = ()
for field_name in fields:
(self, field_name, (obj, field_name))
def find_subclasses(klass, include_self=False):
accum = []
for child in ():
((child, True))
if include_self:
(klass)
return accum
def create_model_tables(models, **create_table_kwargs):
'Create tables for all given models (in the right order).'
for m in (models):
()
def drop_model_tables(models, **drop_table_kwargs):
'Drop tables for all given models (in the right order).'
for m in ((models)):
()
def sort_models_topologically(models):
'Sort models topologically so that parents will precede children.'
models = (models)
seen = ()
ordering = []
def dfs(model):
if ((model in models) and (model not in seen)):
(model)
for child_model in (()):
(child_model)
(model)
names = (lambda m: (m._meta.model_name, m._meta.db_table))
for m in (models):
(m)
return ((ordering)) |
import re
import time
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = [
"YYYY-?MM-?DD",
"YYYY-0MM?-?DD",
"YYYY-MM",
"YYYY-?OOO",
"YY-?MM-?DD",
"YY-?OOO",
"YYYY",
"-YY-?MM",
"-OOO",
"-YY",
"--MM-?DD",
"--MM",
"---DD",
"CC",
"",
]
_iso8601_re = [
tmpl.replace("YYYY", r"(?P<year>\d{4})")
.replace("YY", r"(?P<year>\d\d)")
.replace("MM", r"(?P<month>[01]\d)")
.replace("DD", r"(?P<day>[0123]\d)")
.replace("OOO", r"(?P<ordinal>[0123]\d\d)")
.replace("CC", r"(?P<century>\d\d$)")
+ r"(T?(?P<hour>\d{2}):(?P<minute>\d{2})"
+ r"(:(?P<second>\d{2}))?"
+ r"(\.(?P<fracsecond>\d+))?"
+ r"(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?"
for tmpl in _iso8601_tmpl
]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(dateString):
"""Parse a variety of ISO-8601-compatible formats like 20040105"""
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get("ordinal", 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get("year", "--")
if not year or year == "--":
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get("month", "-")
if not month or month == "-":
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get("day", 0)
if not day:
# see above
if ordinal:
day = ordinal
elif (
params.get("century", 0) or params.get("year", 0) or params.get("month", 0)
):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if "century" in params:
year = (int(params["century"]) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ["hour", "minute", "second", "tzhour", "tzmin"]:
if not params.get(field, None):
params[field] = 0
hour = int(params.get("hour", 0))
minute = int(params.get("minute", 0))
second = int(float(params.get("second", 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [
year,
month,
day,
hour,
minute,
second,
weekday,
ordinal,
daylight_savings_flag,
]
# ISO 8601 time zone adjustments
tz = params.get("tz")
if tz and tz != "Z":
if tz[0] == "-":
tm[3] += int(params.get("tzhour", 0))
tm[4] += int(params.get("tzmin", 0))
elif tz[0] == "+":
tm[3] -= int(params.get("tzhour", 0))
tm[4] -= int(params.get("tzmin", 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
|
# -*- coding: utf-8 -*-
#
# retask documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 3 14:56:38 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(os.path.abspath("_themes"))
html_theme_path = ["_themes"]
html_theme = "kr"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.doctest",
"sphinx.ext.coverage",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "retask"
copyright = "2012-2016, Kushal Das"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# pygments_style = 'flask_theme_support.FlaskyStyle'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
html_sidebars = {
"index": ["sidebarintro.html", "sourcelink.html", "searchbox.html"],
"**": [
"sidebarlogo.html",
"localtoc.html",
"relations.html",
"sourcelink.html",
"searchbox.html",
],
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "retaskdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "retask.tex", "retask Documentation", "Kushal Das", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "retask", "retask Documentation", ["Kushal Das"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"retask",
"retask Documentation",
"Kushal Das",
"retask",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
# -*- coding: utf-8 -*-
from nose.tools import assert_equals
from diylisp.interpreter import interpret
from diylisp.types import Environment
def test_gcd():
"""Tests Greates Common Dividor (GCD).
This test is intended to run after you have completed the core of the
language, just to make sure that everything is holding together.
"""
program = """
(define gcd
(lambda (a b)
(if (eq b 0)
a
(gcd b (mod a b)))))
"""
env = Environment()
interpret(program, env)
assert_equals("6", interpret("(gcd 108 30)", env))
assert_equals("1", interpret("(gcd 17 5)", env))
|
from io import StringIO
from pytest import raises
from tests import TestCase
from flask_storage import MockStorage, MockStorageFile, StorageException, FileNotFoundError
class TestMockStorage(TestCase):
def setup_method(self, method):
(self, method)
MockStorage._files = {}
def test_assigns_folder_on_initialization(self):
storage = ('uploads')
if (not (storage.folder_name == 'uploads')):
raise ()
def test_saves_key_value_pair_in_dict(self):
storage = ()
('key', '')
if (not ('key')):
raise ()
def test_save_returns_file_object_on_success(self):
storage = ()
obj = ('key', 'value')
if (not (obj.name == 'key')):
raise ()
def test_save_supports_overwrite(self):
storage = ()
('key', 'value')
('key', 'value 2')
if (not ((MockStorage._files) == 1)):
raise ()
def test_reads_file_object_and_saves_in_dict(self):
storage = ()
io = ()
('file contents')
('key', io)
if (not (() == 'file contents')):
raise ()
def test_returns_file_url(self):
storage = ('/uploads')
('key', '')
if (not (('key') == '/uploads/key')):
raise ()
def test_supports_directories_in_file_names(self):
storage = ()
('some_dir/filename.txt', 'something')
if (not (() == 'something')):
raise ()
def test_open_returns_file_object(self):
storage = ()
('key', '')
file_ = ('key')
if (not (file_, MockStorageFile)):
raise ()
def test_open_raises_excetion_for_unknown_file(self):
storage = ()
with (FileNotFoundError):
('key')
def test_delete_raises_exception_for_unknown_file(self):
storage = ()
with (FileNotFoundError):
('key')
def test_new_file(self):
storage = ()
if (not ((), MockStorageFile)):
raise ()
def test_new_file_supports_prefixes(self):
storage = ()
if (not (().prefix == 'pics')):
raise ()
class TestMockStorageFile(TestCase):
def setup_method(self, method):
(self, method)
MockStorage._files = {}
self.storage = ('/uploads')
def test_size_returns_the_associated_file_size(self):
storage = ('uploads')
('key', '123123')
file_ = ('key')
if (not (file_.size == 6)):
raise ()
def test_read_returns_file_contents(self):
storage = ('uploads')
('key', '123123')
file_ = ('key')
if (not (() == '123123')):
raise ()
def test_supports_file_objects_without_name(self):
storage = ('uploads')
file_ = (storage)
if (not ((file_) is False)):
raise ()
def test_returns_file_url(self):
storage = ('/uploads')
file_ = ('key', '123123')
if (not (file_.url == '/uploads/key')):
raise ()
def test_supports_name_attribute(self):
storage = ('uploads')
file_ = (storage)
file_.name = 'some_key'
if (not (file_.name == 'some_key')):
raise ()
def test_rename_throws_error(self):
storage = ('uploads')
file_ = (storage)
file_.name = 'some_key'
with (StorageException):
file_.name = 'some_key2'
def test_supports_save(self):
file_ = (self.storage)
file_.name = 'some_key'
()
if (not (() == 'something')):
raise ()
def test_supports_prefixes(self):
file_ = (self.storage)
file_.name = 'some_key'
if (not (file_.name == 'pics/some_key')):
raise ()
def test_supports_last_modified(self):
file_ = (self.storage)
file_.name = 'some_key'
file_.last_modified
def test_equality_operator(self):
file_ = (self.storage)
file_.name = 'some_key'
file2 = (self.storage)
file2.name = 'some_key'
if (not (file_ == file2)):
raise ()
('some other key')
if (not (file_ != file2)):
raise ()
def test_equality_operator_with_none_values(self):
file_ = (self.storage)
file_.name = 'some_key'
none = None
if (not (not (file_ == none))):
raise ()
if (not (file_ != none)):
raise () |
import re
from .utils import validator
slug_regex = re.compile(r"^[-a-zA-Z0-9_]+$")
@validator
def slug(value):
"""
Validate whether or not given value is valid slug.
Valid slug can contain only alphanumeric characters, hyphens and
underscores.
Examples::
>>> slug('my.slug')
ValidationFailure(func=slug, args={'value': 'my.slug'})
>>> slug('my-slug-2134')
True
.. versionadded:: 0.6
:param value: value to validate
"""
return slug_regex.match(value)
|
from decimal import Decimal
import six
import sqlalchemy as sa
from wtforms_components import SelectField
from tests import ModelFormTestCase
class MultiDict(dict):
def getlist(self, key):
return [self[key]]
class TestSelectFieldDefaultValue(ModelFormTestCase):
def test_option_selected_by_field_default_value(self):
choices = [('1', '1'), ('2', '2')]
()
form = (({'test_column': '2'}))
if (not ('<option selected value="2">2</option>' in (form.test_column))):
raise ()
class TestSelectFieldCoerce(ModelFormTestCase):
def test_integer_coerces_values_to_integers(self):
choices = [('1', '1'), ('2', '2')]
()
form = (({'test_column': '2'}))
if (not (form.test_column.data is 2)):
raise ()
def test_nullable_integer_coerces_values_to_integers(self):
choices = [('1', '1'), ('2', '2')]
()
form = (({'test_column': '2'}))
if (not (form.test_column.data is 2)):
raise ()
def test_integer_coerces_empty_strings_to_nulls(self):
choices = [('1', '1'), ('2', '2')]
()
form = (({'test_column': ''}))
if (not (form.test_column.data is None)):
raise ()
def test_big_integer_coerces_values_to_integers(self):
choices = [('1', '1'), ('2', '2')]
()
('test_column', SelectField)
form = (({'test_column': '2'}))
if (not (form.test_column.data is 2)):
raise ()
def test_small_integer_coerces_values_to_integers(self):
choices = [('1', '1'), ('2', '2')]
()
form = (({'test_column': '2'}))
if (not (form.test_column.data is 2)):
raise ()
def test_numeric_coerces_values_to_decimals(self):
choices = [('1.0', '1.0'), ('2.0', '2.0')]
()
form = (({'test_column': '2.0'}))
if (not (form.test_column.data == ('2.0'))):
raise ()
def test_float_coerces_values_to_floats(self):
choices = [('1.0', '1.0'), ('2.0', '2.0')]
()
form = (({'test_column': '2.0'}))
if (not (form.test_column.data == 2.0)):
raise ()
def test_unicode_coerces_values_to_unicode_strings(self):
choices = [('1.0', '1.0'), ('2.0', '2.0')]
()
form = (({'test_column': '2.0'}))
if (not (form.test_column.data == '2.0')):
raise ()
if (not (form.test_column.data, six.text_type)):
raise ()
def test_unicode_text_coerces_values_to_unicode_strings(self):
choices = [('1.0', '1.0'), ('2.0', '2.0')]
()
form = (({'test_column': '2.0'}))
if (not (form.test_column.data == '2.0')):
raise ()
if (not (form.test_column.data, six.text_type)):
raise () |
from datetime import date, datetime, time
from wtforms import Form
from wtforms.validators import DataRequired
from tests import MultiDict, SimpleFieldTestCase
from wtforms_components.fields import SplitDateTimeField
class Obj(object):
test_field = None
class TestSplitDateTimeField(SimpleFieldTestCase):
field_class = SplitDateTimeField
def test_assigns_required_to_date(self):
form_class = ()
form = ()
if (not ((form.test_field.date) == '<input id="test_field-date" name="test_field-date" required type="date" value="">')):
raise ()
def test_renders_date_field(self):
form_class = ()
form = ()
if (not ((form.test_field.date) == '<input id="test_field-date" name="test_field-date" type="date" value="">')):
raise ()
def test_assigns_required_to_time(self):
form_class = ()
form = ()
if (not ((form.test_field.time) == '<input id="test_field-time" name="test_field-time" required type="time" value="">')):
raise ()
def test_renders_time_field(self):
form_class = ()
form = ()
if (not ((form.test_field.time) == '<input id="test_field-time" name="test_field-time" type="time" value="">')):
raise ()
def test_processes_values(self):
form_class = ()
form = (({'test_field-date': '2000-3-2', 'test_field-time': '19:10'}))
if (not (form.test_field.data['date'] == (2000, 3, 2))):
raise ()
if (not (form.test_field.data['time'] == (19, 10))):
raise ()
def test_populates_object(self):
form_class = ()
form = (({'test_field-date': '2000-3-2', 'test_field-time': '19:10'}))
obj = ()
(obj)
if (not (obj.test_field == (2000, 3, 2, 19, 10))):
raise ()
def test_processes_values_when_format_is_set(self):
form_class = ()
form = (({'test_field-date': '2.3.2000', 'test_field-time': '19.10'}))
if (not (form.test_field.data['date'] == (2000, 3, 2))):
raise ()
if (not (form.test_field.data['time'] == (19, 10))):
raise ()
def test_default_base_form(self):
form_class = ()
form = ()
if (not (form.test_field.form.__class__.__bases__ == (Form,))):
raise ()
def test_custom_base_form(self):
class A(Form):
pass
form_class = ()
form = ()
if (not (form.test_field.form.__class__.__bases__ == (A,))):
raise ()
def test_custom_base_form_with_two_instances(self):
class A(Form):
pass
form_class = ()
form = ()
form2 = ()
if (not (form.test_field.form.__class__.__bases__ == (A,))):
raise ()
if (not (form2.test_field.form.__class__.__bases__ == (A,))):
raise () |
import six
from pytest import mark
from wtforms import Form, IntegerField, SelectMultipleField, TextField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import IPAddress
sa = None
try:
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
except ImportError:
raise
class TestFieldTypeCoercion(object):
def test_integer_to_unicode_coercion(self):
class NetworkForm(Form):
address = ('Address', [()])
network = ()
form = (network)
if (not (not ())):
raise ()
def test_integer_coercion(self):
class UserForm(Form):
age = ('age')
network = ()
form = (network)
if (not ()):
raise ()
class FooForm(Form):
items = ()
@('sa is None or six.PY3')
class TestQuerySelectField(object):
def setup_method(self, method):
self.Base = ()
class Team(self.Base):
__tablename__ = 'team'
id = (sa.Integer)
name = ((255))
class Match(self.Base):
__tablename__ = 'match'
id = (sa.Integer)
name = ((255))
team_id = (sa.BigInteger, (Team.id))
team = (Team)
self.Team = Team
self.Match = Match
def test_integer_coercion(self):
class MatchForm(Form):
team = ()
data = {'team': 1}
form = (data)
if (not ()):
raise ()
if (not form.team.data):
raise ()
class TestSelectMultipleField(object):
def test_from_json(self):
data = {'items': [1, 3]}
form = (data)
if (not ()):
raise ()
if (not form.items.data):
raise () |
import logging
from utils import _pprint
from pandas import Series, concat
from scipy.stats import norm
import random
import hashlib
import math
import copy
import numpy as np
from sklearn import cross_validation, ensemble, linear_model
class Selector(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __repr__(self):
return ('%s(%s)' % (self.__class__.__name__, (self.__dict__)))
class RandomForestSelector(Selector):
def __init__(self, n=100, thresh=None, min_=True, classifier=True, seed=2345, *args, **kwargs):
self.n = n
self.min = min_
self.thresh = thresh
self.seed = seed
self.classifier = classifier
(*args)
def select(self, x, y, n_keep):
cls = ensemble.RandomForestRegressor
if self.classifier:
cls = ensemble.RandomForestClassifier
rf = ()
(x.values, y.values)
importances = rf.feature_importances_
imps = ((importances, x.columns))
for (i, x) in (imps):
(imp, f) = x
(('%d\t%0.4f\t%s' % (i, imp, f)))
if self.thresh:
imps = [t for t in imps if (t[0] > self.thresh)]
return [t[1] for t in imps[:n_keep]]
def sets_cv(self, x, y):
totals = ([0] * (x.columns))
if self.min:
totals = ([1000] * (x.columns))
i = 0
for (train, test) in ():
i += 1
(())
cls = ensemble.RandomForestRegressor
if self.classifier:
cls = ensemble.RandomForestClassifier
rf = ()
(x.values[train], y.values[train])
importances = rf.feature_importances_
if self.min:
totals = [(imp, t) for (imp, t) in (importances, totals)]
else:
totals = [(imp + t) for (imp, t) in (importances, totals)]
imps = ((totals, x.columns))
for (i, x) in (imps):
(imp, f) = x
(('%d\t%0.4f\t%s' % (i, imp, f)))
if self.thresh:
imps = [t for t in imps if (t[0] > self.thresh)]
sets = [[t[1] for t in imps[:(i + 1)]] for i in ((imps))]
return sets
class StepwiseForwardSelector(Selector):
def __init__(self, n=100, min_=True):
self.n = n
self.min = min_
def sets(self, x, y):
lm = ()
remaining = x.columns
curr = []
('stepwise forward')
for i in (self.n):
if ((i % 10) == 0):
(())
coefs = []
for col in remaining:
cols = (curr + [col])
fcf = 1000000000000.0
for (train, test) in ():
(x[cols].values[train], y.values[train])
cf = lm.coef_[(- 1)]
if ((()) or (() < 1e-07)):
cf = 0
cf = (cf)
fcf = (cf, fcf)
(fcf)
(coef, col) = (((coefs, remaining)))
(())
(col)
remaining = ([col])
(yield (curr))
class LassoPathSelector(Selector):
def sets(self, x, y, n_keep):
(alphas, active, coef_path) = (x.values, y.values)
sets = []
seen = ()
(coef_path)
for coefs in coef_path.T:
cols = [x.columns[i] for i in ((coefs)) if (coefs[i] > 1e-09)]
if ((cols) >= n_keep):
return cols
return cols
class BinaryFeatureSelector(Selector):
'Only for classification and binary(-able) features'
def __init__(self, type='bns', *args, **kwargs):
"type in ('ig', 'bns', 'acc')\n see: jmlr.csail.mit.edu/papers/volume3/forman03a/forman03a.pdf"
self.type = type
(*args)
def select(self, x, y, n_keep):
cnts = ()
(('Computing binary feature scores for %d features...' % (x.columns)))
if ((cnts) > 2):
scores = (x, y, n_keep)
else:
scores = (x, y)
scores = [s[1] for s in scores]
(scores[:200])
return scores[:n_keep]
def round_robin(self, x, y, n_keep):
'Ensures all classes get representative features, not just those with strong features'
vals = ()
scores = {}
for cls in vals:
scores[cls] = (x, ('Int64'))
()
keepers = ()
while ((keepers) < n_keep):
for cls in vals:
(()[1])
return (keepers)
def rank(self, x, y):
cnts = ()
scores = []
def e(x, y):
if ((y) < 1e-09):
yx = 0
lyx = 0
else:
yx = (y / (x + y))
lyx = ((y / (x + y)))
if ((x) < 1e-09):
xy = 0
lxy = 0
else:
xy = (x / (x + y))
lxy = ((x / (x + y)))
return (((- xy) * lxy) - (yx * lyx))
for c in x.columns:
true_positives = (((x[c], y)))
false_positives = (((x[c], (y))))
pos = (cnts[1])
neg = (cnts[0])
n = (pos + neg)
if (self.type == 'bns'):
tpr = (0.0005, (true_positives / pos))
fpr = (0.0005, (false_positives / neg))
tpr = (0.9995, tpr)
fpr = (0.9995, fpr)
score = (((tpr) - (fpr)))
elif (self.type == 'acc'):
score = ((tpr - fpr))
elif (self.type == 'ig'):
score = ((pos, neg) - ((((true_positives + false_positives) / n) * (true_positives, false_positives)) + ((1 - ((true_positives + false_positives) / n)) * ((pos - true_positives), (neg - false_positives)))))
((score, c))
()
return scores
class InformationGainSelector(Selector):
'Only for binary classification'
def sets(self, x, y, n_keep):
cnts = ()
if (not ((cnts) == 2)):
raise ()
('Computing IG scores...')
scores = []
for c in x.columns:
true_positives = ((x[c], y))
false_positives = ((x[c], (y)))
score = ((((true_positives / (cnts[1]))) - ((false_positives / (cnts[0])))))
((score, c))
()
return [s[1] for s in scores[:n_keep]] |
'Manual clustering GUI component.'
from collections import OrderedDict
from functools import partial
import logging
import numpy as np
from ._history import GlobalHistory
from ._utils import create_cluster_meta
from .clustering import Clustering
from phy.gui.qt import _show_box
from phy.gui.actions import Actions
from phy.gui.widgets import Table
logger = (__name__)
def _process_ups(ups):
'This function processes the UpdateInfo instances of the two\n undo stacks (clustering and cluster metadata) and concatenates them\n into a single UpdateInfo instance.'
if ((ups) == 0):
return
elif ((ups) == 1):
return ups[0]
elif ((ups) == 2):
up = ups[0]
(ups[1])
return up
else:
raise ()
class ClusterView(Table):
def __init__(self):
()
("\n table tr[data-good='true'] {\n color: #86D16D;\n }\n ")
@property
def state(self):
return {'sort_by': self.current_sort}
def set_state(self, state):
(sort_by, order) = ('sort_by', (None, None))
if sort_by:
(sort_by, order)
class ManualClustering(object):
'Component that brings manual clustering facilities to a GUI:\n\n * Clustering instance: merge, split, undo, redo\n * ClusterMeta instance: change cluster metadata (e.g. group)\n * Selection\n * Many manual clustering-related actions, snippets, shortcuts, etc.\n\n Parameters\n ----------\n\n spike_clusters : ndarray\n spikes_per_cluster : function `cluster_id -> spike_ids`\n cluster_groups : dictionary\n shortcuts : dict\n quality: func\n similarity: func\n\n GUI events\n ----------\n\n When this component is attached to a GUI, the GUI emits the following\n events:\n\n select(cluster_ids)\n when clusters are selected\n cluster(up)\n when a merge or split happens\n request_save(spike_clusters, cluster_groups)\n when a save is requested by the user\n\n '
default_shortcuts = {'merge': 'g', 'split': 'k', 'move_best_to_noise': 'alt+n', 'move_best_to_mua': 'alt+m', 'move_best_to_good': 'alt+g', 'move_similar_to_noise': 'ctrl+n', 'move_similar_to_mua': 'ctrl+m', 'move_similar_to_good': 'ctrl+g', 'move_all_to_noise': 'ctrl+alt+n', 'move_all_to_mua': 'ctrl+alt+m', 'move_all_to_good': 'ctrl+alt+g', 'reset': 'ctrl+alt+space', 'next': 'space', 'previous': 'shift+space', 'next_best': 'down', 'previous_best': 'up', 'save': 'Save', 'show_shortcuts': 'Save', 'undo': 'Undo', 'redo': ('ctrl+shift+z', 'ctrl+y')}
def __init__(self, spike_clusters, spikes_per_cluster, cluster_groups=None, best_channel=None, shortcuts=None, quality=None, similarity=None, new_cluster_id=None):
self.gui = None
self.quality = quality
self.similarity = similarity
self.best_channel = best_channel
if (not (spikes_per_cluster, '__call__')):
raise ()
self.spikes_per_cluster = spikes_per_cluster
self.shortcuts = ()
((shortcuts or {}))
self.clustering = (spike_clusters)
self.cluster_groups = (cluster_groups or {})
self.cluster_meta = (self.cluster_groups)
self._global_history = ()
()
()
()
self._best = None
self._current_similarity_values = {}
def _register_logging(self):
@self.clustering.connect
def on_cluster(up):
if up.history:
((() + ' cluster assign.'))
elif (up.description == 'merge'):
('Merge clusters %s to %s.', ((str, up.deleted)), up.added[0])
else:
('Assigned %s spikes.', (up.spike_ids))
if self.gui:
('cluster', up)
@self.cluster_meta.connect
def on_cluster(up):
for clu in up.metadata_changed:
self.cluster_groups[clu] = up.metadata_value
if up.history:
((() + ' move.'))
else:
('Move clusters %s to %s.', ((str, up.metadata_changed)), up.metadata_value)
if self.gui:
('cluster', up)
def _add_default_columns(self):
@()
def n_spikes(cluster_id):
return ((cluster_id))
(self.best_channel)
@()
def skip(cluster_id):
'Whether to skip that cluster.'
return (('group', cluster_id) in ('noise', 'mua'))
@()
def good(cluster_id):
'Good column for color.'
return (('group', cluster_id) == 'good')
def similarity(cluster_id):
return (cluster_id, 0)
if self.similarity:
(similarity)
def _create_actions(self, gui):
self.actions = (gui)
(self.select)
()
(self.merge)
(self.split)
()
(self.move)
for group in ('noise', 'mua', 'good'):
((self.move_best, group))
((self.move_similar, group))
((self.move_all, group))
()
(self.undo)
(self.redo)
(self.save)
(self.reset)
(self.__next__)
(self.previous)
(self.next_best)
(self.previous_best)
()
def _create_cluster_views(self):
self.cluster_view = ()
()
self.similarity_view = ()
()
@self.cluster_view.connect_
def on_select(cluster_ids):
(cluster_ids)
()
@self.similarity_view.connect_
def on_select(cluster_ids):
cluster_ids = (self.cluster_view.selected + cluster_ids)
(cluster_ids)
def on_request_undo_state(up):
return {'selection': (self.cluster_view.selected, self.similarity_view.selected)}
(on_request_undo_state)
(on_request_undo_state)
()
def _update_cluster_view(self):
'Initialize the cluster view with cluster data.'
(5, 'Update the cluster view.')
cluster_ids = [(c) for c in self.clustering.cluster_ids]
(cluster_ids)
def _update_similarity_view(self):
'Update the similarity view with matches for the specified\n clusters.'
if (not self.similarity):
return
selection = self.cluster_view.selected
if (not (selection)):
return
cluster_id = selection[0]
cluster_ids = self.clustering.cluster_ids
self._best = cluster_id
(5, 'Update the similarity view.')
similarities = (cluster_id)
clusters_sim = ([((cl), s) for (cl, s) in similarities])
clusters = [c for c in (()) if (c in cluster_ids)]
self._current_similarity_values = clusters_sim
([c for c in clusters if (c not in selection)])
def _emit_select(self, cluster_ids):
'Choose spikes from the specified clusters and emit the\n `select` event on the GUI.'
('Select clusters: %s.', ((str, cluster_ids)))
if self.gui:
('select', cluster_ids)
def add_column(self, func=None, name=None, show=True, default=False):
if (func is None):
return (lambda f: (f))
name = (name or func.__name__)
if (not name):
raise ()
(func)
(func)
if default:
(name)
def set_default_sort(self, name, sort_dir='desc'):
if (not name):
raise ()
('Set default sort `%s` %s.', name, sort_dir)
(name, sort_dir)
()
(name, sort_dir)
def on_cluster(self, up):
'Update the cluster views after clustering actions.'
similar = self.similarity_view.selected
if up.added:
()
if (up.history == 'undo'):
(clusters_0, clusters_1) = up.undo_state[0]['selection']
(clusters_0)
(clusters_1)
elif up.added:
if (up.description == 'assign'):
added = up.added[::(- 1)]
else:
added = up.added
(added)
if similar:
(self.similarity_view)
elif up.metadata_changed:
if ((up.metadata_changed) <= (similar)):
selected = self.similarity_view.selected
()
(selected)
(self.similarity_view)
else:
selected = self.cluster_view.selected
()
(selected)
(self.cluster_view)
if similar:
(self.similarity_view)
def attach(self, gui):
self.gui = gui
(gui)
(self.cluster_view)
if self.quality:
(self.quality)
()
if self.similarity:
(self.similarity_view)
cv = self.cluster_view
((cv))
@gui.connect_
def on_close():
(cv, cv.state)
()
(self.on_cluster)
return self
def select(self, *cluster_ids):
'Select a list of clusters.'
if (cluster_ids and (cluster_ids[0], (tuple, list))):
cluster_ids = ((cluster_ids[0]) + (cluster_ids[1:]))
(cluster_ids)
@property
def selected(self):
return (self.cluster_view.selected + self.similarity_view.selected)
def merge(self, cluster_ids=None):
'Merge the selected clusters.'
if (cluster_ids is None):
cluster_ids = self.selected
if (((cluster_ids or [])) <= 1):
return
(cluster_ids)
(self.clustering)
def split(self, spike_ids=None, spike_clusters_rel=0):
'Split the selected spikes.'
if (spike_ids is None):
spike_ids = ('request_split')
spike_ids = (np.int64)
if ((spike_ids) == 0):
msg = 'You first need to select spikes in the feature view with a few Ctrl+Click around the spikes that you want to split.'
((msg))
return
(spike_ids)
(self.clustering)
def move(self, cluster_ids, group):
'Move clusters to a group.'
if ((cluster_ids) == 0):
return
('group', cluster_ids, group)
(self.cluster_meta)
def move_best(self, group=None):
'Move all selected best clusters to a group.'
(self.cluster_view.selected, group)
def move_similar(self, group=None):
'Move all selected similar clusters to a group.'
(self.similarity_view.selected, group)
def move_all(self, group=None):
'Move all selected clusters to a group.'
(self.selected, group)
def reset(self):
'Reset the wizard.'
()
(self.cluster_view)
def next_best(self):
'Select the next best cluster.'
(self.cluster_view)
def previous_best(self):
'Select the previous best cluster.'
()
def __next__(self):
'Select the next cluster.'
if (not self.selected):
(self.cluster_view)
else:
(self.similarity_view)
def previous(self):
'Select the previous cluster.'
()
def undo(self):
'Undo the last action.'
()
def redo(self):
'Undo the last undone action.'
()
def save(self):
'Save the manual clustering back to disk.'
spike_clusters = self.clustering.spike_clusters
groups = {c: (('group', c) or 'unsorted') for c in self.clustering.cluster_ids}
('request_save', spike_clusters, groups) |
'Tests of array utility functions.'
import os.path as op
import numpy as np
from pytest import raises
from ..array import _unique, _normalize, _index_of, _in_polygon, _spikes_in_clusters, _spikes_per_cluster, _flatten_per_cluster, _get_data_lim, select_spikes, Selector, chunk_bounds, regular_subset, excerpts, data_chunk, grouped_mean, get_excerpts, _concatenate_virtual_arrays, _range_from_slice, _pad, _get_padded, read_array, write_array
from phy.utils._types import _as_array
from phy.utils.testing import _assert_equal as ae
from ..mock import artificial_spike_clusters
def test_range_from_slice():
"Test '_range_from_slice'."
class _SliceTest(object):
'Utility class to make it more convenient to test slice objects.'
def __init__(self, **kwargs):
self._kwargs = kwargs
def __getitem__(self, item):
if (item, slice):
return (item)
with (ValueError):
()[:]
with (ValueError):
()[1:]
(()[:5], [0, 1, 2, 3, 4])
(()[1:5], [1, 2, 3, 4])
with (ValueError):
()[::2]
with (ValueError):
()[1::2]
(()[1:5:2], [1, 3])
with (ValueError):
()[:]
with (ValueError):
()[:]
with (ValueError):
()[:]
(()[:], [0, 1, 2, 3, 4])
(()[:], [1, 2, 3, 4])
(()[1:], [1, 2, 3, 4])
(()[:5], [1, 2, 3, 4])
(()[:5], [1, 3])
(()[:5:2], [1, 3])
(()[:], [0, 1, 2, 3, 4])
with (ValueError):
()[:3]
(()[:10], [0, 1, 2, 3, 4])
(()[:5], [0, 1, 2, 3, 4])
(()[:], [1, 2, 3, 4, 5])
(()[:6], [1, 2, 3, 4, 5])
with (ValueError):
()[:4]
(()[:], [1, 3])
(()[::2], [1, 3])
(()[1::2], [1, 3])
def test_pad():
arr = (10, 3)
((arr, 0, 'right'), arr[(:0, :)])
((arr, 3, 'right'), arr[(:3, :)])
((arr, 9), arr[(:9, :)])
((arr, 10), arr)
((arr, 12, 'right')[(:10, :)], arr)
((arr, 12)[(10:, :)], ((2, 3)))
((arr, 0, 'left'), arr[(:0, :)])
((arr, 3, 'left'), arr[(7:, :)])
((arr, 9, 'left'), arr[(1:, :)])
((arr, 10, 'left'), arr)
((arr, 12, 'left')[(2:, :)], arr)
((arr, 12, 'left')[(:2, :)], ((2, 3)))
with (ValueError):
(arr, (- 1))
def test_get_padded():
arr = ([1, 2, 3])[(:, np.newaxis)]
with (RuntimeError):
((), [1, 2, 3, 0, 0])
((), [2])
((), [1, 2, 3, 0, 0])
((), [0, 0, 1, 2, 3])
def test_get_data_lim():
arr = (10, 5)
if (not (0 < (arr) < 1)):
raise ()
if (not (0 < (arr, 2) < 1)):
raise ()
def test_unique():
'Test _unique() function'
([])
n_spikes = 300
n_clusters = 3
spike_clusters = (n_spikes, n_clusters)
((spike_clusters), (n_clusters))
def test_normalize():
'Test _normalize() function.'
n_channels = 10
positions = (1 + (2 * (n_channels, 2)))
positions_n = (positions)
(x_min, y_min) = ()
(x_max, y_max) = ()
(x_min, 0.0)
(x_max, 1.0)
(y_min, 0.0)
(y_max, 1.0)
positions_n = (positions)
(x_min, y_min) = ()
(x_max, y_max) = ()
((x_min, y_min), 0.0)
((x_max, y_max), 1.0)
((x_min + x_max), 1)
((y_min + y_max), 1)
def test_index_of():
'Test _index_of.'
arr = [36, 42, 42, 36, 36, 2, 42]
lookup = (arr)
((arr, lookup), [1, 2, 2, 1, 1, 0, 2])
def test_as_array():
((3), [3])
(([3]), [3])
((3.0), [3.0])
(([3.0]), [3.0])
with (ValueError):
(map)
def test_in_polygon():
polygon = [[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]]
points = ()
idx_expected = (((((points[(:, 0)] > 0) & (points[(:, 1)] > 0)) & (points[(:, 0)] < 1)) & (points[(:, 1)] < 1)))[0]
idx = ((points, polygon))[0]
(idx, idx_expected)
def test_read_write(tempdir):
arr = (np.float32)
path = (tempdir, 'test.npy')
(path, arr)
((path), arr)
((path), arr)
def test_concatenate_virtual_arrays_1():
arrs = [(5), (10, 12), ([0])]
c = (arrs)
if (not (c.shape == (8,))):
raise ()
if (not ((3) == 0)):
raise ()
if (not ((5) == 1)):
raise ()
(c[:], [0, 1, 2, 3, 4, 10, 11, 0])
(c[0], [0])
(c[4], [4])
(c[5], [10])
(c[6], [11])
(c[4:6], [4, 10])
(c[:6], [0, 1, 2, 3, 4, 10])
(c[4:], [4, 10, 11, 0])
(c[4:(- 1)], [4, 10, 11])
def test_concatenate_virtual_arrays_2():
arrs = [((2, 2)), ((3, 2))]
c = (arrs)
if (not (c.shape == (5, 2))):
raise ()
(c[(:, :)], ((((2, 2)), ((3, 2)))))
(c[(0:4, 0)], [0, 0, 1, 1])
def test_chunk_bounds():
chunks = (200, 100)
if (not ((chunks) == (0, 100, 0, 90))):
raise ()
if (not ((chunks) == (80, 180, 90, 170))):
raise ()
if (not ((chunks) == (160, 200, 170, 200))):
raise ()
def test_chunk():
data = (200, 4)
chunks = (data.shape[0], 100)
with (ValueError):
(data, (0, 0, 0))
if (not ((data, (0, 0)).shape == (0, 4))):
raise ()
ch = (chunks)
d = (data, ch)
d_o = (data, ch)
(d_o, data[0:100])
(d, data[0:90])
ch = (chunks)
d = (data, ch)
d_o = (data, ch)
(d_o, data[80:180])
(d, data[90:170])
def test_excerpts_1():
bounds = [(start, end) for (start, end) in (100)]
if (not (bounds == [(0, 10), (45, 55), (90, 100)])):
raise ()
def test_excerpts_2():
bounds = [(start, end) for (start, end) in (10)]
if (not (bounds == [(0, 10)])):
raise ()
def test_get_excerpts():
data = (100, 2)
subdata = (data)
if (not (subdata.shape == (50, 2))):
raise ()
(subdata[(:5, :)], data[(:5, :)])
(subdata[((- 5):, :)], data[((- 10):(- 5), :)])
data = (10, 2)
subdata = (data)
(subdata, data)
data = (10, 2)
subdata = (data)
(subdata, data)
if (not (((data)) == 0)):
raise ()
def test_regular_subset():
spikes = [2, 3, 5, 7, 11, 13, 17]
((spikes), spikes)
((spikes, 100), spikes)
((spikes, 100), spikes)
((spikes, 3), [2, 7, 17])
((spikes, 3), [3, 11])
def test_spikes_in_clusters():
'Test _spikes_in_clusters().'
n_spikes = 100
n_clusters = 5
spike_clusters = (n_spikes, n_clusters)
((spike_clusters, []), [])
for i in (n_clusters):
if (not ((spike_clusters[(spike_clusters, [i])] == i))):
raise ()
clusters = [1, 2, 3]
if (not ((spike_clusters[(spike_clusters, clusters)], clusters))):
raise ()
def test_spikes_per_cluster():
'Test _spikes_per_cluster().'
n_spikes = 100
n_clusters = 3
spike_clusters = (n_spikes, n_clusters)
if (not (not ([]))):
raise ()
spikes_per_cluster = (spike_clusters)
if (not ((()) == ((n_clusters)))):
raise ()
for i in (n_clusters):
(spikes_per_cluster[i], (spikes_per_cluster[i]))
if (not ((spike_clusters[spikes_per_cluster[i]] == i))):
raise ()
def test_flatten_per_cluster():
spc = {2: [2, 7, 11], 3: [3, 5], 5: []}
arr = (spc)
(arr, [2, 3, 5, 7, 11])
def test_grouped_mean():
spike_clusters = ([2, 3, 2, 2, 5])
arr = (spike_clusters * 10)
((arr, spike_clusters), [20, 30, 50])
def test_select_spikes():
with (AssertionError):
()
spikes = [2, 3, 5, 7, 11]
spc = (lambda c: (c, None))
(([]), [])
(([2, 3, 5]), spikes)
(([2, 5]), (2))
(([2, 3, 5], 0), spikes)
(([2, 3, 5], None), spikes)
(([2, 3, 5], 1), [2, 3])
(([2, 5], 2), [2])
sel = (spc)
if (not (() is None)):
raise ()
(([2, 5]), (2))
(([2, 5], 2), [2]) |
'Tests of waveform loader.'
import numpy as np
from numpy.testing import assert_array_equal as ae
from pytest import raises, yield_fixture
from phy.io.mock import artificial_traces, artificial_spike_samples
from phy.utils import Bunch
from ..waveform import _slice, WaveformLoader, WaveformExtractor, SpikeLoader
from ..filter import bandpass_filter, apply_filter
def test_extract_simple():
weak = 1.0
strong = 2.0
nc = 4
ns = 20
data = ()
data[(10, 0)] = 0.5
data[(11, 0)] = 1.5
data[(12, 0)] = 1.0
data[(10, 1)] = 1.5
data[(11, 1)] = 2.5
data[(12, 1)] = 2.0
component = ([[10, 0], [10, 1], [11, 0], [11, 1], [12, 0], [12, 1]])
we = ()
()
comp = (component)
(comp.comp_s, [10, 10, 11, 11, 12, 12])
(comp.comp_ch, [0, 1, 0, 1, 0, 1])
if (not ((comp.s_min, comp.s_max) == ((10 - 3), (12 + 4)))):
raise ()
if (not ((weak) == 0)):
raise ()
if (not ((strong) == 1)):
raise ()
(([((weak + strong) / 2.0)]), [0.5])
wave = (data, comp)
if (not (wave.shape == (((3 + 5) + 1), nc))):
raise ()
(wave[(3:6, :)], [[0.5, 1.5, 0.0, 0.0], [1.5, 2.5, 0.0, 0.0], [1.0, 2.0, 0.0, 0.0]])
masks = (data, wave, comp)
(masks, [0.5, 1.0, 0, 0])
s = (wave, comp)
if (not (11 <= s < 12)):
raise ()
wave_e = (data, s)
if (not (wave_e.shape[1] == wave.shape[1])):
raise ()
(wave[(3:6, :2)], wave_e[(3:6, :2)])
wave_a = (wave_e, s)
if (not (wave_a.shape == ((3 + 5), nc))):
raise ()
(s_f, masks_f, wave_f) = (component)
if (not (s_f == s)):
raise ()
(masks_f, masks)
(wave_f, wave_a)
we = ()
(s_f_o, masks_f_o, wave_f_o) = (component)
if (not (s_f == s_f_o)):
raise ()
if (not (wave_f, wave_f_o)):
raise ()
(masks_f_o, [0.5, 1.0, 0.0, 0.0])
def test_slice():
if (not ((0, (20, 20)) == (0, 20, None))):
raise ()
@()
def waveform_loader(request):
(scale_factor, dc_offset) = request.param
(n_samples_trace, n_channels) = (1000, 5)
h = 10
n_samples_waveforms = (2 * h)
n_spikes = (n_samples_trace // (2 * n_samples_waveforms))
traces = (n_samples_trace, n_channels)
spike_samples = (n_spikes)
with (ValueError):
(traces)
loader = ()
b = ()
(yield b)
def test_loader_edge_case():
wl = ()
wl.traces = (0, 2)
wl[0]
def test_loader_simple(waveform_loader):
b = waveform_loader
spike_samples = b.spike_samples
loader = b.loader
traces = loader.traces
dc_offset = (loader.dc_offset or 0)
scale_factor = (loader.scale_factor or 1)
(n_samples_traces, n_channels) = traces.shape
n_samples_waveforms = b.n_samples_waveforms
h = (n_samples_waveforms // 2)
if (not (loader.offset == 0)):
raise ()
if (not (loader.dc_offset in (dc_offset, None))):
raise ()
if (not (loader.scale_factor in (scale_factor, None))):
raise ()
def _transform(arr):
return ((arr - dc_offset) * scale_factor)
waveforms = loader[spike_samples[10:20]]
if (not (waveforms.shape == (10, n_samples_waveforms, n_channels))):
raise ()
t = spike_samples[15]
w1 = waveforms[(5, ...)]
w2 = (traces[((t - h):(t + h), :)])
if (not (w1, w2)):
raise ()
sl = (loader, spike_samples)
if (not (sl[15], w2)):
raise ()
def test_edges():
(n_samples_trace, n_channels) = (100, 10)
n_samples_waveforms = 20
traces = (n_samples_trace, n_channels)
b_filter = ()
filter_margin = 10
loader = (traces)
with (ValueError):
(200000)
ns = (n_samples_waveforms + filter_margin)
if (not ((0).shape == (ns, n_channels))):
raise ()
if (not ((5).shape == (ns, n_channels))):
raise ()
if (not (((n_samples_trace - 5)).shape == (ns, n_channels))):
raise ()
if (not (((n_samples_trace - 1)).shape == (ns, n_channels))):
raise ()
def test_loader_channels():
(n_samples_trace, n_channels) = (1000, 10)
n_samples_waveforms = 20
traces = (n_samples_trace, n_channels)
loader = (traces)
loader.traces = traces
channels = [2, 5, 7]
loader.channels = channels
if (not (loader.channels == channels)):
raise ()
if (not (loader[500].shape == (1, n_samples_waveforms, 3))):
raise ()
if (not (loader[[500, 501, 600, 300]].shape == (4, n_samples_waveforms, 3))):
raise ()
if (not (loader[3].shape == (1, n_samples_waveforms, 3))):
raise ()
if (not (loader[995].shape == (1, n_samples_waveforms, 3))):
raise ()
with (NotImplementedError):
loader[500:510]
def test_loader_filter():
traces = (np.int32)
(n_samples_trace, n_channels) = traces.shape
h = 3
def my_filter(x, axis=0):
return (x * x)
loader = (traces)
t = 10
waveform_filtered = loader[t]
traces_filtered = (traces)
if (not (waveform_filtered, traces_filtered[((t - h):(t + h), :)])):
raise () |
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2012 Kevin Minnick
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception definitions.
"""
class UnsupportedVersion(Exception):
"""Indicates that the user is trying to use an unsupported
version of the API"""
pass
class CommandError(Exception):
pass
class AuthorizationFailure(Exception):
pass
class NoUniqueMatch(Exception):
pass
class AuthSystemNotFound(Exception):
"""When the user specify a AuthSystem but not installed."""
def __init__(self, auth_system):
self.auth_system = auth_system
def __str__(self):
return "AuthSystemNotFound: %s" % repr(self.auth_system)
class NoTokenLookupException(Exception):
"""This form of authentication does not support looking up
endpoints from an existing token."""
pass
class EndpointNotFound(Exception):
"""Could not find Service or Region in Service Catalog."""
pass
class AmbiguousEndpoints(Exception):
"""Found more than one matching endpoint in Service Catalog."""
def __init__(self, endpoints=None):
self.endpoints = endpoints
def __str__(self):
return "AmbiguousEndpoints: %s" % repr(self.endpoints)
class ConnectionRefused(Exception):
"""
Connection refused: the server refused the connection.
"""
def __init__(self, response=None):
self.response = response
def __str__(self):
return "ConnectionRefused: %s" % repr(self.response)
class ClientException(Exception):
"""
The base exception class for all exceptions this library raises.
"""
def __init__(self, code, message=None, details=None, request_id=None):
self.code = code
self.message = message or self.__class__.message
self.details = details
self.request_id = request_id
def __str__(self):
formatted_string = "%s (HTTP %s)" % (self.message, self.code)
if self.request_id:
formatted_string += " (Request-ID: %s)" % self.request_id
return formatted_string
class BadRequest(ClientException):
"""
HTTP 400 - Bad request: you sent some malformed data.
"""
http_status = 400
message = "Bad request"
class Unauthorized(ClientException):
"""
HTTP 401 - Unauthorized: bad credentials.
"""
http_status = 401
message = "Unauthorized"
class Forbidden(ClientException):
"""
HTTP 403 - Forbidden: your credentials don't give you access to this
resource.
"""
http_status = 403
message = "Forbidden"
class NotFound(ClientException):
"""
HTTP 404 - Not found
"""
http_status = 404
message = "Not found"
class OverLimit(ClientException):
"""
HTTP 413 - Over limit: you're over the API limits for this time period.
"""
http_status = 413
message = "Over limit"
# NotImplemented is a python keyword.
class HTTPNotImplemented(ClientException):
"""
HTTP 501 - Not Implemented: the server does not support this operation.
"""
http_status = 501
message = "Not Implemented"
# In Python 2.4 Exception is old-style and thus doesn't have a __subclasses__()
# so we can do this:
# _code_map = dict((c.http_status, c)
# for c in ClientException.__subclasses__())
#
# Instead, we have to hardcode it:
_code_map = dict(
(c.http_status, c)
for c in [
BadRequest,
Unauthorized,
Forbidden,
NotFound,
OverLimit,
HTTPNotImplemented,
]
)
def from_response(response, body):
"""
Return an instance of an ClientException or subclass
based on an httplib2 response.
Usage::
resp, body = http.request(...)
if resp.status != 200:
raise exception_from_response(resp, body)
"""
cls = _code_map.get(response.status, ClientException)
request_id = response.get("x-compute-request-id")
if body:
message = "n/a"
details = "n/a"
if hasattr(body, "keys"):
error = body[list(body.keys())[0]]
message = error.get("message", None)
details = error.get("details", None)
return cls(
code=response.status,
message=message,
details=details,
request_id=request_id,
)
else:
return cls(code=response.status, request_id=request_id)
|
import unittest, os, sys
from custom_test_case import CustomTestCase
PROJECT_ROOT = os.path.dirname(__file__)
sys.path.append(os.path.join(PROJECT_ROOT, ".."))
from CodeConverter import CodeConverter
class TestBugfix(unittest.TestCase, CustomTestCase):
# For Bugfix
def test_string_including_spaces(self):
source = '[[UIAlertView alloc] initWithTitle:@"Warning" message:@" too many alerts! " "];'
expected = 'UIAlertView.alloc.initWithTitle("Warning",message:" too many alerts! " ");'
self.assertSentence(
CodeConverter(source)
.replace_nsstring()
.convert_square_brackets_expression()
.s,
expected,
)
def test_multiline_with_block_arg_wont_join_lines(self):
source = """[UIView animateWithDuration:0.2
animations:^{view.alpha = 0.0;}]
"""
expected = """[UIView animateWithDuration:0.2 animations:^{view.alpha = 0.0;}]
"""
self.assertSentence(CodeConverter(source).multilines_to_one_line().s, expected)
if __name__ == "__main__":
unittest.main()
|
import os
from django.db import models
from django.conf import settings
class Project(models.Model):
title = ()
slug = ()
description = ()
git = ()
docs = ()
class Meta():
ordering = ('title',)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
(*args)
path = ()
if (not (path)):
(('cd %s; git clone %s %s' % (settings.PROJECT_ROOT, self.git, self.slug)))
()
def get_absolute_url(self):
return ('project_detail', None, {'project': self.slug})
get_absolute_url = (get_absolute_url)
def get_path(self):
return (settings.PROJECT_ROOT, self.slug)
def get_docs_path(self):
return ((), self.docs)
def get_pickle_path(self):
return ((), '_build/pickle')
def update(self):
(('cd %s; git pull' % ()))
(('cd %s; make pickle' % ())) |
from django import forms
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_lazy as _
from lithium.wiki.models import Revision, Text
class EditForm(forms.Form):
text = forms.CharField(label=_("Text"), widget=forms.Textarea)
comment = forms.CharField(
label=_("Comment"),
max_length=255,
required=False,
help_text=_("A short description of the changes you have made."),
)
def __init__(self, request, page, *args, **kwargs):
super(EditForm, self).__init__(*args, **kwargs)
self.request = request
self.page = page
self.instance = Revision()
def save(self, *args, **kwargs):
if self.request.user.is_anonymous():
self.instance.author_ip = self.request.META["REMOTE_ADDR"]
else:
self.instance.author = self.request.user
if not self.page.pk:
self.page.save()
self.instance.page = self.page
text, created = Text.objects.get_or_create(content=self.cleaned_data["text"])
self.instance.text = text
self.instance.comment = self.cleaned_data["comment"]
if not created:
try:
revision = text.revision_set.latest("pub_date")
self.instance.comment = _(
"Reverted to revision %(revision)s by %(author)s"
) % {
"revision": revision.pk,
"author": revision.author or revision.author_ip,
}
except ObjectDoesNotExist:
pass
self.instance.save()
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="django-cuddlybuddly-thumbnail",
version="2.6",
license="BSD",
description="A Django app that supplies a template tag to generate "
"thumbnails. Specifically designed to be compatible with "
"properly implemented Django storage systems, which "
"unfortunately most are not.",
long_description=read("README.rst"),
author="Kyle MacFarlane",
author_email="kyle@deletethetrees.com",
package_dir={"": "src"},
packages=find_packages("src"),
namespace_packages=["cuddlybuddly"],
include_package_data=True,
zip_safe=False,
install_requires=["setuptools"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
],
)
|
"""
"""
import os
import json
from sqlalchemy import (
create_engine,
Table,
Column,
String,
Integer,
Float,
Text,
MetaData,
select,
ForeignKey,
bindparam,
delete,
and_,
)
from config import Configuration
engine = create_engine(Configuration.SQLALCHEMY_DATABASE_URI, echo=True)
metadata = MetaData()
tags = Table(
"tag",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String),
)
posts = Table(
"post",
metadata,
Column("id", Integer, primary_key=True),
)
posts_to_tags = Table(
"posts_to_tags",
metadata,
Column("tag_id", Integer, ForeignKey("tag.id")),
Column("post_id", Integer, ForeignKey("post.id")),
)
def eliminate_duplicates(conn):
tag_map = {}
update_batch = []
delete_batch = []
for row in conn.execute(
select([posts, tags])
.select_from(posts.join(posts_to_tags).join(tags))
.order_by(tags.c.id)
):
post_id = row[0]
tag_id = row[1]
tag_name = row[2]
# possible duplicate
if tag_name in tag_map:
preexisting_tag_id = tag_map.get(tag_name)
if preexisting_tag_id != tag_id:
update_batch.append(
{
"the_post_id": post_id,
"old_tag_id": tag_id,
"new_tag_id": preexisting_tag_id,
}
)
delete_batch.append(
{
"the_tag_id": tag_id,
}
)
else:
tag_map[tag_name] = tag_id
print(("update batch", update_batch))
if update_batch:
update_stmt = (
posts_to_tags.update()
.where(
and_(
posts_to_tags.c.post_id == bindparam("the_post_id"),
posts_to_tags.c.tag_id == bindparam("old_tag_id"),
)
)
.values(tag_id=bindparam("new_tag_id"))
)
# print(update_stmt)
# print(update_batch)
conn.execute(update_stmt, update_batch)
print(("delete batch", delete_batch))
if delete_batch:
delete_stmt = tags.delete().where(tags.c.id == bindparam("the_tag_id"))
# print(delete_stmt)
conn.execute(delete_stmt, delete_batch)
with engine.begin() as conn:
eliminate_duplicates(conn)
|
class Color(object):
def __init__(self, r=255, g=255, b=255, a=255):
self.r = r
self.g = g
self.b = b
self.a = a
@property
def hex(self):
return "%0.2X%0.2X%0.2X%0.2X" % (self.a, self.r, self.g, self.b)
def __hash__(self):
return (self.a << 24) + (self.r << 16) + (self.g << 8) + (self.b)
def __eq__(self, other):
if not other:
return False
return (
self.r == other.r
and self.g == other.g
and self.b == other.b
and self.a == other.a
)
def __str__(self):
return self.hex
Color.WHITE = Color(255, 255, 255, 255)
Color.BLACK = Color(0, 0, 0, 255)
|
def assertEquals(a, b):
result = (a == b)
if (not result):
((((a) + ' not equals to\n') + (b)))
if (not False):
raise ()
def assertNotEquals(a, b):
result = (a != b)
if (not result):
((((a) + ' equals to\n') + (b)))
if (not False):
raise () |
#! /usr/bin/python
# Opcodes, assigned their integer values, carefully chosen.
# XOR must always be 0, since XOR, O, O, O makes a useful NOP
# XOR, AND, OR, SUB, ADD, UND1, UND2, UND3, MHS, MLS, MHU, JMP, JZE, JNZ, JPO, JNE = range(16)
# Jumps no longer exist as opcodes, see branching_flags.py
(
XOR,
AND,
OR,
SUB,
ADD,
UND1,
UND2,
UND3,
MHS,
MLS,
MHU,
UND4,
UND5,
UND6,
UND7,
UND8,
) = list(range(16))
|
# Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
import logging
from sqlalchemy import sql
from acoustid import tables as schema, const
from acoustid.data.fingerprint import (
lookup_fingerprint,
insert_fingerprint,
inc_fingerprint_submission_count,
FingerprintSearcher,
)
from acoustid.data.musicbrainz import resolve_mbid_redirect
from acoustid.data.track import (
insert_track,
insert_mbid,
insert_puid,
merge_tracks,
insert_track_meta,
can_add_fp_to_track,
can_merge_tracks,
insert_track_foreignid,
)
logger = logging.getLogger(__name__)
def insert_submission(conn, data):
"""
Insert a new submission into the database
"""
with conn.begin():
insert_stmt = schema.submission.insert().values(
{
"fingerprint": data["fingerprint"],
"length": data["length"],
"bitrate": data.get("bitrate"),
"mbid": data.get("mbid"),
"puid": data.get("puid"),
"source_id": data.get("source_id"),
"format_id": data.get("format_id"),
"meta_id": data.get("meta_id"),
"foreignid_id": data.get("foreignid_id"),
}
)
id = conn.execute(insert_stmt).inserted_primary_key[0]
logger.debug("Inserted submission %r with data %r", id, data)
return id
def import_submission(conn, submission, index=None):
"""
Import the given submission into the main fingerprint database
"""
with conn.begin():
update_stmt = schema.submission.update().where(
schema.submission.c.id == submission["id"]
)
conn.execute(update_stmt.values(handled=True))
mbids = []
if submission["mbid"]:
mbids.append(resolve_mbid_redirect(conn, submission["mbid"]))
logger.info(
"Importing submission %d with MBIDs %s", submission["id"], ", ".join(mbids)
)
num_unique_items = len(set(submission["fingerprint"]))
if num_unique_items < const.FINGERPRINT_MIN_UNIQUE_ITEMS:
logger.info("Skipping, has only %d unique items", num_unique_items)
return
num_query_items = conn.execute(
"SELECT icount(acoustid_extract_query(%(fp)s))",
dict(fp=submission["fingerprint"]),
)
if not num_query_items:
logger.info("Skipping, no data to index")
return
searcher = FingerprintSearcher(conn, index, fast=False)
searcher.min_score = const.TRACK_MERGE_THRESHOLD
matches = searcher.search(submission["fingerprint"], submission["length"])
fingerprint = {
"id": None,
"track_id": None,
"fingerprint": submission["fingerprint"],
"length": submission["length"],
"bitrate": submission["bitrate"],
"format_id": submission["format_id"],
}
if matches:
match = matches[0]
all_track_ids = set()
possible_track_ids = set()
for m in matches:
if m["track_id"] in all_track_ids:
continue
all_track_ids.add(m["track_id"])
logger.debug(
"Fingerprint %d with track %d is %d%% similar",
m["id"],
m["track_id"],
m["score"] * 100,
)
if can_add_fp_to_track(
conn, m["track_id"], submission["fingerprint"], submission["length"]
):
possible_track_ids.add(m["track_id"])
if not fingerprint["track_id"]:
fingerprint["track_id"] = m["track_id"]
if m["score"] > const.FINGERPRINT_MERGE_THRESHOLD:
fingerprint["id"] = m["id"]
if len(possible_track_ids) > 1:
for group in can_merge_tracks(conn, possible_track_ids):
if fingerprint["track_id"] in group and len(group) > 1:
fingerprint["track_id"] = min(group)
group.remove(fingerprint["track_id"])
merge_tracks(conn, fingerprint["track_id"], list(group))
break
if not fingerprint["track_id"]:
fingerprint["track_id"] = insert_track(conn)
if not fingerprint["id"]:
fingerprint["id"] = insert_fingerprint(
conn, fingerprint, submission["id"], submission["source_id"]
)
else:
inc_fingerprint_submission_count(
conn, fingerprint["id"], submission["id"], submission["source_id"]
)
for mbid in mbids:
insert_mbid(
conn,
fingerprint["track_id"],
mbid,
submission["id"],
submission["source_id"],
)
if (
submission["puid"]
and submission["puid"] != "00000000-0000-0000-0000-000000000000"
):
insert_puid(
conn,
fingerprint["track_id"],
submission["puid"],
submission["id"],
submission["source_id"],
)
if submission["meta_id"]:
insert_track_meta(
conn,
fingerprint["track_id"],
submission["meta_id"],
submission["id"],
submission["source_id"],
)
if submission["foreignid_id"]:
insert_track_foreignid(
conn,
fingerprint["track_id"],
submission["foreignid_id"],
submission["id"],
submission["source_id"],
)
return fingerprint
def import_queued_submissions(conn, index=None, limit=100, ids=None):
"""
Import the given submission into the main fingerprint database
"""
query = schema.submission.select(schema.submission.c.handled == False).order_by(
schema.submission.c.mbid.nullslast(), schema.submission.c.id.desc()
)
if ids is not None:
query = query.where(schema.submission.c.id.in_(ids))
if limit is not None:
query = query.limit(limit)
count = 0
for submission in conn.execute(query):
import_submission(conn, submission, index=index)
count += 1
logger.debug("Imported %d submissions", count)
return count
def lookup_submission_status(db, ids):
if not ids:
return {}
source = schema.fingerprint_source.join(schema.fingerprint).join(schema.track)
query = sql.select(
[schema.fingerprint_source.c.submission_id, schema.track.c.gid], from_obj=source
).where(schema.fingerprint_source.c.submission_id.in_(ids))
results = {}
for id, track_gid in db.execute(query):
results[id] = track_gid
return results
|
"""initial
Revision ID: 57c4d22c87b8
Revises:
Create Date: 2016-02-10 21:12:42.367918
"""
# revision identifiers, used by Alembic.
revision = "57c4d22c87b8"
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table(
"account_google",
sa.Column("google_user_id", sa.String(), nullable=False),
sa.Column("account_id", sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint("google_user_id"),
)
op.create_index(
"account_google_idx_account_id", "account_google", ["account_id"], unique=False
)
op.create_table(
"account_stats_control",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("last_updated", sa.DateTime(timezone=True), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"acoustid_mb_replication_control",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("current_schema_sequence", sa.Integer(), nullable=False),
sa.Column("current_replication_sequence", sa.Integer(), nullable=True),
sa.Column("last_replication_date", sa.DateTime(timezone=True), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"application",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("version", sa.String(), nullable=False),
sa.Column("apikey", sa.String(), nullable=False),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.Column(
"active", sa.Boolean(), server_default=sa.text("true"), nullable=True
),
sa.Column("account_id", sa.Integer(), nullable=False),
sa.Column("email", sa.String(), nullable=True),
sa.Column("website", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("application_idx_apikey", "application", ["apikey"], unique=True)
op.create_table(
"fingerprint_index_queue",
sa.Column("fingerprint_id", sa.Integer(), nullable=False),
)
op.create_table(
"foreignid_vendor",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"foreignid_vendor_idx_name", "foreignid_vendor", ["name"], unique=True
)
op.create_table(
"format",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("format_idx_name", "format", ["name"], unique=True)
op.create_table(
"meta",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("track", sa.String(), nullable=True),
sa.Column("artist", sa.String(), nullable=True),
sa.Column("album", sa.String(), nullable=True),
sa.Column("album_artist", sa.String(), nullable=True),
sa.Column("track_no", sa.Integer(), nullable=True),
sa.Column("disc_no", sa.Integer(), nullable=True),
sa.Column("year", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"mirror_queue",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"txid",
sa.BigInteger(),
server_default=sa.text("txid_current()"),
nullable=False,
),
sa.Column("tblname", sa.String(), nullable=False),
sa.Column("op", sa.CHAR(length=1), nullable=False),
sa.Column("data", sa.Text(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"recording_acoustid",
sa.Column("id", sa.Integer(), autoincrement=False, nullable=False),
sa.Column("acoustid", postgresql.UUID(), nullable=False),
sa.Column("recording", postgresql.UUID(), nullable=False),
sa.Column(
"disabled", sa.Boolean(), server_default=sa.text("false"), nullable=False
),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.Column("updated", sa.DateTime(timezone=True), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("recording_acoustid_idx_acoustid"),
"recording_acoustid",
["acoustid"],
unique=False,
)
op.create_index(
"recording_acoustid_idx_uniq",
"recording_acoustid",
["recording", "acoustid"],
unique=True,
)
op.create_table(
"replication_control",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("current_schema_sequence", sa.Integer(), nullable=False),
sa.Column("current_replication_sequence", sa.Integer(), nullable=True),
sa.Column("last_replication_date", sa.DateTime(timezone=True), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"stats",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column(
"date", sa.Date(), server_default=sa.text("CURRENT_DATE"), nullable=False
),
sa.Column("value", sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("stats_idx_date", "stats", ["date"], unique=False)
op.create_index("stats_idx_name_date", "stats", ["name", "date"], unique=False)
op.create_table(
"stats_lookups",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("date", sa.Date(), nullable=False),
sa.Column("hour", sa.Integer(), nullable=False),
sa.Column("application_id", sa.Integer(), nullable=False),
sa.Column(
"count_nohits", sa.Integer(), server_default=sa.text("0"), nullable=False
),
sa.Column(
"count_hits", sa.Integer(), server_default=sa.text("0"), nullable=False
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("stats_lookups_idx_date", "stats_lookups", ["date"], unique=False)
op.create_table(
"stats_user_agents",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("date", sa.Date(), nullable=False),
sa.Column("application_id", sa.Integer(), nullable=False),
sa.Column("user_agent", sa.String(), nullable=False),
sa.Column("ip", sa.String(), nullable=False),
sa.Column("count", sa.Integer(), server_default=sa.text("0"), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"stats_user_agents_idx_date", "stats_user_agents", ["date"], unique=False
)
op.create_table(
"track",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.Column("new_id", sa.Integer(), nullable=True),
sa.Column("gid", postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(["new_id"], ["track.id"], name=op.f("track_fk_new_id")),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("track_idx_gid", "track", ["gid"], unique=True)
op.create_table(
"account",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("apikey", sa.String(), nullable=False),
sa.Column("mbuser", sa.String(), nullable=True),
sa.Column(
"anonymous", sa.Boolean(), server_default=sa.text("false"), nullable=True
),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.Column("lastlogin", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"submission_count",
sa.Integer(),
server_default=sa.text("0"),
nullable=False,
),
sa.Column("application_id", sa.Integer(), nullable=True),
sa.Column("application_version", sa.String(), nullable=True),
sa.Column("created_from", postgresql.INET(), nullable=True),
sa.ForeignKeyConstraint(
["application_id"],
["application.id"],
name=op.f("account_fk_application_id"),
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("account_idx_apikey", "account", ["apikey"], unique=True)
op.create_index("account_idx_mbuser", "account", ["mbuser"], unique=True)
op.create_table(
"fingerprint",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("fingerprint", postgresql.ARRAY(sa.Integer()), nullable=False),
sa.Column("length", sa.SmallInteger(), nullable=False),
sa.Column("bitrate", sa.SmallInteger(), nullable=True),
sa.Column("format_id", sa.Integer(), nullable=True),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.Column("track_id", sa.Integer(), nullable=False),
sa.Column("submission_count", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["format_id"], ["format.id"], name=op.f("fingerprint_fk_format_id")
),
sa.ForeignKeyConstraint(
["track_id"], ["track.id"], name=op.f("fingerprint_fk_track_id")
),
sa.PrimaryKeyConstraint("id"),
sa.CheckConstraint("length > 0", name=op.f("fingerprint_length_check")),
sa.CheckConstraint("bitrate > 0", name=op.f("fingerprint_bitrate_check")),
)
op.create_index("fingerprint_idx_length", "fingerprint", ["length"], unique=False)
op.create_index(
"fingerprint_idx_track_id", "fingerprint", ["track_id"], unique=False
)
op.create_table(
"foreignid",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("vendor_id", sa.Integer(), nullable=False),
sa.Column("name", sa.Text(), nullable=False),
sa.ForeignKeyConstraint(
["vendor_id"], ["foreignid_vendor.id"], name=op.f("foreignid_fk_vendor_id")
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("foreignid_idx_vendor", "foreignid", ["vendor_id"], unique=False)
op.create_index(
"foreignid_idx_vendor_name", "foreignid", ["vendor_id", "name"], unique=True
)
op.create_table(
"track_mbid",
sa.Column("track_id", sa.Integer(), nullable=False),
sa.Column("mbid", postgresql.UUID(), nullable=False),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("submission_count", sa.Integer(), nullable=False),
sa.Column(
"disabled", sa.Boolean(), server_default=sa.text("false"), nullable=False
),
sa.ForeignKeyConstraint(
["track_id"], ["track.id"], name=op.f("track_mbid_fk_track_id")
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("track_mbid_idx_mbid"), "track_mbid", ["mbid"], unique=False)
op.create_index(
"track_mbid_idx_uniq", "track_mbid", ["track_id", "mbid"], unique=False
)
op.create_table(
"track_meta",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("track_id", sa.Integer(), nullable=False),
sa.Column("meta_id", sa.Integer(), nullable=False),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.Column("submission_count", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["meta_id"], ["meta.id"], name=op.f("track_meta_fk_meta_id")
),
sa.ForeignKeyConstraint(
["track_id"], ["track.id"], name=op.f("track_meta_fk_track_id")
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("track_meta_idx_meta_id"), "track_meta", ["meta_id"], unique=False
)
op.create_index(
"track_meta_idx_uniq", "track_meta", ["track_id", "meta_id"], unique=False
)
op.create_table(
"track_puid",
sa.Column("track_id", sa.Integer(), nullable=False),
sa.Column("puid", postgresql.UUID(), nullable=False),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("submission_count", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["track_id"], ["track.id"], name=op.f("track_puid_fk_track_id")
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("track_puid_idx_puid"), "track_puid", ["puid"], unique=False)
op.create_index(
"track_puid_idx_uniq", "track_puid", ["track_id", "puid"], unique=False
)
op.create_table(
"account_openid",
sa.Column("openid", sa.String(), nullable=False),
sa.Column("account_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["account_id"], ["account.id"], name=op.f("account_openid_fk_account_id")
),
sa.PrimaryKeyConstraint("openid"),
)
op.create_index(
"account_openid_idx_account_id", "account_openid", ["account_id"], unique=False
)
op.create_table(
"source",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("application_id", sa.Integer(), nullable=False),
sa.Column("account_id", sa.Integer(), nullable=False),
sa.Column("version", sa.String(), nullable=True),
sa.ForeignKeyConstraint(
["account_id"], ["account.id"], name=op.f("source_fk_account_id")
),
sa.ForeignKeyConstraint(
["application_id"],
["application.id"],
name=op.f("source_fk_application_id"),
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"source_idx_uniq",
"source",
["application_id", "account_id", "version"],
unique=True,
)
op.create_table(
"stats_top_accounts",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("account_id", sa.Integer(), nullable=False),
sa.Column("count", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["account_id"],
["account.id"],
name=op.f("stats_top_accounts_fk_account_id"),
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"track_foreignid",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("track_id", sa.Integer(), nullable=False),
sa.Column("foreignid_id", sa.Integer(), nullable=False),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.Column("submission_count", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["foreignid_id"],
["foreignid.id"],
name=op.f("track_foreignid_fk_foreignid_id"),
),
sa.ForeignKeyConstraint(
["track_id"], ["track.id"], name=op.f("track_foreignid_fk_track_id")
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("track_foreignid_idx_foreignid_id"),
"track_foreignid",
["foreignid_id"],
unique=False,
)
op.create_index(
"track_foreignid_idx_uniq",
"track_foreignid",
["track_id", "foreignid_id"],
unique=False,
)
op.create_table(
"track_mbid_change",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("track_mbid_id", sa.Integer(), nullable=False),
sa.Column("account_id", sa.Integer(), nullable=False),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.Column("disabled", sa.Boolean(), nullable=False),
sa.Column("note", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(
["account_id"], ["account.id"], name=op.f("track_mbid_change_fk_account_id")
),
sa.ForeignKeyConstraint(
["track_mbid_id"],
["track_mbid.id"],
name=op.f("track_mbid_change_fk_track_mbid_id"),
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("track_mbid_change_idx_track_mbid_id"),
"track_mbid_change",
["track_mbid_id"],
unique=False,
)
op.create_table(
"track_mbid_flag",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("track_mbid_id", sa.Integer(), nullable=False),
sa.Column("account_id", sa.Integer(), nullable=False),
sa.Column(
"handled", sa.Boolean(), server_default=sa.text("false"), nullable=False
),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.ForeignKeyConstraint(
["account_id"], ["account.id"], name=op.f("track_mbid_flag_fk_account_id")
),
sa.ForeignKeyConstraint(
["track_mbid_id"],
["track_mbid.id"],
name=op.f("track_mbid_flag_fk_track_mbid_id"),
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"submission",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("fingerprint", postgresql.ARRAY(sa.Integer()), nullable=False),
sa.Column("length", sa.SmallInteger(), nullable=False),
sa.Column("bitrate", sa.SmallInteger(), nullable=True),
sa.Column("format_id", sa.Integer(), nullable=True),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.Column("source_id", sa.Integer(), nullable=False),
sa.Column("mbid", postgresql.UUID(), nullable=True),
sa.Column(
"handled", sa.Boolean(), server_default=sa.text("false"), nullable=True
),
sa.Column("puid", postgresql.UUID(), nullable=True),
sa.Column("meta_id", sa.Integer(), nullable=True),
sa.Column("foreignid_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["foreignid_id"], ["foreignid.id"], name=op.f("submission_fk_foreignid_id")
),
sa.ForeignKeyConstraint(
["format_id"], ["format.id"], name=op.f("submission_fk_format_id")
),
sa.ForeignKeyConstraint(
["meta_id"], ["meta.id"], name=op.f("submission_fk_meta_id")
),
sa.ForeignKeyConstraint(
["source_id"], ["source.id"], name=op.f("submission_fk_source_id")
),
sa.PrimaryKeyConstraint("id"),
sa.CheckConstraint("length > 0", name=op.f("submission_length_check")),
sa.CheckConstraint("bitrate > 0", name=op.f("submission_bitrate_check")),
)
op.create_index(
"submission_idx_handled",
"submission",
["id"],
unique=False,
postgresql_where=sa.text("handled = false"),
)
op.create_table(
"fingerprint_source",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("fingerprint_id", sa.Integer(), nullable=False),
sa.Column("submission_id", sa.Integer(), nullable=False),
sa.Column("source_id", sa.Integer(), nullable=False),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.ForeignKeyConstraint(
["fingerprint_id"],
["fingerprint.id"],
name=op.f("fingerprint_source_fk_fingerprint_id"),
),
sa.ForeignKeyConstraint(
["source_id"], ["source.id"], name=op.f("fingerprint_source_fk_source_id")
),
sa.ForeignKeyConstraint(
["submission_id"],
["submission.id"],
name=op.f("fingerprint_source_fk_submission_id"),
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"fingerprint_source_idx_submission_id",
"fingerprint_source",
["submission_id"],
unique=False,
)
op.create_table(
"track_foreignid_source",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("track_foreignid_id", sa.Integer(), nullable=False),
sa.Column("submission_id", sa.Integer(), nullable=False),
sa.Column("source_id", sa.Integer(), nullable=False),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.ForeignKeyConstraint(
["source_id"],
["source.id"],
name=op.f("track_foreignid_source_fk_source_id"),
),
sa.ForeignKeyConstraint(
["submission_id"],
["submission.id"],
name=op.f("track_foreignid_source_fk_submission_id"),
),
sa.ForeignKeyConstraint(
["track_foreignid_id"],
["track_foreignid.id"],
name=op.f("track_foreignid_source_fk_track_foreignid_id"),
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"track_mbid_source",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("track_mbid_id", sa.Integer(), nullable=False),
sa.Column("submission_id", sa.Integer(), nullable=True),
sa.Column("source_id", sa.Integer(), nullable=False),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.ForeignKeyConstraint(
["source_id"], ["source.id"], name=op.f("track_mbid_source_fk_source_id")
),
sa.ForeignKeyConstraint(
["submission_id"],
["submission.id"],
name=op.f("track_mbid_source_fk_submission_id"),
),
sa.ForeignKeyConstraint(
["track_mbid_id"],
["track_mbid.id"],
name=op.f("track_mbid_source_fk_track_mbid_id"),
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("track_mbid_source_idx_source_id"),
"track_mbid_source",
["source_id"],
unique=False,
)
op.create_index(
op.f("track_mbid_source_idx_track_mbid_id"),
"track_mbid_source",
["track_mbid_id"],
unique=False,
)
op.create_table(
"track_meta_source",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("track_meta_id", sa.Integer(), nullable=False),
sa.Column("submission_id", sa.Integer(), nullable=False),
sa.Column("source_id", sa.Integer(), nullable=False),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.ForeignKeyConstraint(
["source_id"], ["source.id"], name=op.f("track_meta_source_fk_source_id")
),
sa.ForeignKeyConstraint(
["submission_id"],
["submission.id"],
name=op.f("track_meta_source_fk_submission_id"),
),
sa.ForeignKeyConstraint(
["track_meta_id"],
["track_meta.id"],
name=op.f("track_meta_source_fk_track_meta_id"),
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"track_puid_source",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("track_puid_id", sa.Integer(), nullable=False),
sa.Column("submission_id", sa.Integer(), nullable=False),
sa.Column("source_id", sa.Integer(), nullable=False),
sa.Column(
"created",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=True,
),
sa.ForeignKeyConstraint(
["source_id"], ["source.id"], name=op.f("track_puid_source_fk_source_id")
),
sa.ForeignKeyConstraint(
["submission_id"],
["submission.id"],
name=op.f("track_puid_source_fk_submission_id"),
),
sa.ForeignKeyConstraint(
["track_puid_id"],
["track_puid.id"],
name=op.f("track_puid_source_fk_track_puid_id"),
),
sa.PrimaryKeyConstraint("id"),
)
def downgrade():
op.drop_table("track_puid_source")
op.drop_table("track_meta_source")
op.drop_index(
op.f("track_mbid_source_idx_track_mbid_id"), table_name="track_mbid_source"
)
op.drop_index(
op.f("track_mbid_source_idx_source_id"), table_name="track_mbid_source"
)
op.drop_table("track_mbid_source")
op.drop_table("track_foreignid_source")
op.drop_index(
"fingerprint_source_idx_submission_id", table_name="fingerprint_source"
)
op.drop_table("fingerprint_source")
op.drop_index("submission_idx_handled", table_name="submission")
op.drop_table("submission")
op.drop_table("track_mbid_flag")
op.drop_index(
op.f("track_mbid_change_idx_track_mbid_id"), table_name="track_mbid_change"
)
op.drop_table("track_mbid_change")
op.drop_index("track_foreignid_idx_uniq", table_name="track_foreignid")
op.drop_index(
op.f("track_foreignid_idx_foreignid_id"), table_name="track_foreignid"
)
op.drop_table("track_foreignid")
op.drop_table("stats_top_accounts")
op.drop_index("source_idx_uniq", table_name="source")
op.drop_table("source")
op.drop_index("account_openid_idx_account_id", table_name="account_openid")
op.drop_table("account_openid")
op.drop_index("track_puid_idx_uniq", table_name="track_puid")
op.drop_index(op.f("track_puid_idx_puid"), table_name="track_puid")
op.drop_table("track_puid")
op.drop_index("track_meta_idx_uniq", table_name="track_meta")
op.drop_index(op.f("track_meta_idx_meta_id"), table_name="track_meta")
op.drop_table("track_meta")
op.drop_index("track_mbid_idx_uniq", table_name="track_mbid")
op.drop_index(op.f("track_mbid_idx_mbid"), table_name="track_mbid")
op.drop_table("track_mbid")
op.drop_index("foreignid_idx_vendor_name", table_name="foreignid")
op.drop_index("foreignid_idx_vendor", table_name="foreignid")
op.drop_table("foreignid")
op.drop_index("fingerprint_idx_track_id", table_name="fingerprint")
op.drop_index("fingerprint_idx_length", table_name="fingerprint")
op.drop_table("fingerprint")
op.drop_index("account_idx_mbuser", table_name="account")
op.drop_index("account_idx_apikey", table_name="account")
op.drop_table("account")
op.drop_index("track_idx_gid", table_name="track")
op.drop_table("track")
op.drop_index("stats_user_agents_idx_date", table_name="stats_user_agents")
op.drop_table("stats_user_agents")
op.drop_index("stats_lookups_idx_date", table_name="stats_lookups")
op.drop_table("stats_lookups")
op.drop_index("stats_idx_name_date", table_name="stats")
op.drop_index("stats_idx_date", table_name="stats")
op.drop_table("stats")
op.drop_table("replication_control")
op.drop_index("recording_acoustid_idx_uniq", table_name="recording_acoustid")
op.drop_index(
op.f("recording_acoustid_idx_acoustid"), table_name="recording_acoustid"
)
op.drop_table("recording_acoustid")
op.drop_table("mirror_queue")
op.drop_table("meta")
op.drop_index("format_idx_name", table_name="format")
op.drop_table("format")
op.drop_index("foreignid_vendor_idx_name", table_name="foreignid_vendor")
op.drop_table("foreignid_vendor")
op.drop_table("fingerprint_index_queue")
op.drop_index("application_idx_apikey", table_name="application")
op.drop_table("application")
op.drop_table("acoustid_mb_replication_control")
op.drop_table("account_stats_control")
op.drop_index("account_google_idx_account_id", table_name="account_google")
op.drop_table("account_google")
|
# Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
from nose.tools import *
from tests import prepare_database, with_database
from acoustid.data.source import find_or_insert_source
@with_database
def test_find_or_insert_source(conn):
rows = conn.execute(
"SELECT id, account_id, application_id FROM source ORDER BY id"
).fetchall()
expected_rows = [
(1, 1, 1),
(2, 2, 2),
]
assert_equals(expected_rows, rows)
id = find_or_insert_source(conn, 1, 1)
assert_equals(1, id)
id = find_or_insert_source(conn, 2, 2)
assert_equals(2, id)
id = find_or_insert_source(conn, 1, 2)
assert_equals(3, id)
rows = conn.execute(
"SELECT id, account_id, application_id FROM source ORDER BY id"
).fetchall()
expected_rows = [
(1, 1, 1),
(2, 2, 2),
(3, 2, 1),
]
assert_equals(expected_rows, rows)
|
# coding=utf-8
try:
import collections
def isiterable(x):
return isinstance(x, collections.Iterable)
except ImportError:
def isiterable(x):
try:
iter(x)
return True
except TypeError:
return False
import django
if django.VERSION >= (1, 9):
add_to_builtins = None
else:
try:
from django.template.base import add_to_builtins
except ImportError:
from django.template.loader import add_to_builtins
try:
# django.utils.importlib is removed from Django 1.9
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
try:
# Django 1.7 or over use the new application loading system
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
try:
from django.utils.module_loading import import_string
except ImportError:
try:
from django.utils.module_loading import import_by_path as import_string
except ImportError:
def import_string(dotted_path):
try:
module_path, class_name = dotted_path.rsplit(".", 1)
except ValueError:
raise ImportError("%s doesn't look like a module path" % dotted_path)
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
raise ImportError(
'Module "%s" does not define a "%s" attribute/class'
% (module_path, class_name)
)
try:
# Python 3
from urllib.parse import urlparse
except ImportError:
# Python 2
from urllib.parse import urlparse
import sys
if sys.version_info >= (3, 0):
def isstr(x):
return isinstance(x, str)
else:
def isstr(x):
return isinstance(x, str)
try:
from django.util import six
except ImportError:
# Django 1.2/1.3 does not have six
import six
|
# coding=utf-8
from django.test import TestCase
from permission.handlers import PermissionHandler
from permission.handlers import LogicalPermissionHandler
from permission.tests.utils import create_user
from permission.tests.utils import create_article
from permission.tests.models import Article
from permission.tests.compat import MagicMock
from permission.tests.compat import override_settings
@override_settings(PERMISSION_DEFAULT_PERMISSION_HANDLER=PermissionHandler)
class PermissionPermissionHandlersTestCase(TestCase):
def setUp(self):
self.handler = PermissionHandler
self.user = create_user("john")
self.perm1 = "permission.add_article"
self.perm2 = "permission.change_article"
self.perm3 = "permission.delete_article"
self.article = create_article("test")
def test_constructor_with_model(self):
instance = self.handler(Article)
self.assertEqual(instance.app_label, "permission")
self.assertEqual(instance.model, Article)
# backward reference
self.assertEqual(Article._permission_handler, instance)
def test_constructor_with_app_label(self):
instance = self.handler("permission")
self.assertEqual(instance.app_label, "permission")
self.assertEqual(instance.model, None)
def test__get_app_perms_with_app_label(self):
instance = self.handler("permission")
perms = instance._get_app_perms()
self.assertEqual(
perms,
set(
[
"permission.add_article",
"permission.change_article",
"permission.delete_article",
"permission.add_bridge",
"permission.change_bridge",
"permission.delete_bridge",
]
),
)
def test__get_app_perms_with_model(self):
instance = self.handler(Article)
perms = instance._get_app_perms()
self.assertEqual(
perms,
set(
[
"permission.add_article",
"permission.change_article",
"permission.delete_article",
"permission.add_bridge",
"permission.change_bridge",
"permission.delete_bridge",
]
),
)
def test__get_model_perms(self):
instance = self.handler(Article)
perms = instance._get_model_perms()
self.assertEqual(
perms,
set(
[
"permission.add_article",
"permission.change_article",
"permission.delete_article",
]
),
)
def test_get_supported_permissions(self):
instance = self.handler(Article)
perms = instance.get_supported_permissions()
self.assertEqual(
perms,
set(
[
"permission.add_article",
"permission.change_article",
"permission.delete_article",
]
),
)
def test_get_supported_permissions_with_includes(self):
instance = self.handler(Article)
instance.includes = [
"permission.add_article",
"permission.change_article",
]
perms = instance.get_supported_permissions()
self.assertEqual(
perms,
set(
[
"permission.add_article",
"permission.change_article",
]
),
)
def test_get_supported_permissions_with_includes_change(self):
instance = self.handler(Article)
instance.includes = [
"permission.add_article",
"permission.change_article",
]
instance.get_supported_permissions()
instance.includes = [
"permission.change_article",
]
perms = instance.get_supported_permissions()
self.assertEqual(
perms,
set(
[
"permission.change_article",
]
),
)
def test_get_supported_permissions_with_excludes(self):
instance = self.handler(Article)
instance.excludes = [
"permission.add_article",
]
perms = instance.get_supported_permissions()
self.assertEqual(
perms,
set(
[
"permission.change_article",
"permission.delete_article",
]
),
)
def test_get_supported_permissions_with_excludes_change(self):
instance = self.handler(Article)
instance.excludes = [
"permission.add_article",
]
instance.get_supported_permissions()
instance.excludes = []
perms = instance.get_supported_permissions()
self.assertEqual(
perms,
set(
[
"permission.add_article",
"permission.change_article",
"permission.delete_article",
]
),
)
def test_get_supported_app_labels(self):
instance = self.handler(Article)
app_labels = instance.get_supported_app_labels()
self.assertEqual(
app_labels,
set(
[
"permission",
]
),
)
def test_get_supported_app_labels_with_includes(self):
instance = self.handler(Article)
instance.includes = [
"permission.add_article",
"permission.change_article",
]
app_labels = instance.get_supported_app_labels()
self.assertEqual(
app_labels,
set(
[
"permission",
]
),
)
def test_get_supported_app_labels_with_includes_change(self):
instance = self.handler(Article)
instance.includes = [
"permission.add_article",
"permission.change_article",
]
instance.get_supported_app_labels()
instance.includes = [
"permission.change_article",
]
app_labels = instance.get_supported_app_labels()
self.assertEqual(
app_labels,
set(
[
"permission",
]
),
)
def test_get_supported_app_labels_with_excludes(self):
instance = self.handler(Article)
instance.excludes = [
"permission.add_article",
]
app_labels = instance.get_supported_app_labels()
self.assertEqual(
app_labels,
set(
[
"permission",
]
),
)
def test_get_supported_app_labels_with_excludes_change(self):
instance = self.handler(Article)
instance.excludes = [
"permission.add_article",
]
instance.get_supported_app_labels()
instance.excludes = []
app_labels = instance.get_supported_app_labels()
self.assertEqual(
app_labels,
set(
[
"permission",
]
),
)
def test_has_perm_add_wihtout_obj(self):
instance = self.handler(Article)
self.assertRaises(NotImplementedError, instance.has_perm, self.user, self.perm1)
def test_has_perm_change_wihtout_obj(self):
instance = self.handler(Article)
self.assertRaises(NotImplementedError, instance.has_perm, self.user, self.perm2)
def test_has_perm_delete_wihtout_obj(self):
instance = self.handler(Article)
self.assertRaises(NotImplementedError, instance.has_perm, self.user, self.perm3)
def test_has_perm_add_wiht_obj(self):
instance = self.handler(Article)
self.assertRaises(
NotImplementedError, instance.has_perm, self.user, self.perm1, self.article
)
def test_has_perm_change_wiht_obj(self):
instance = self.handler(Article)
self.assertRaises(
NotImplementedError, instance.has_perm, self.user, self.perm2, self.article
)
def test_has_perm_delete_wiht_obj(self):
instance = self.handler(Article)
self.assertRaises(
NotImplementedError, instance.has_perm, self.user, self.perm3, self.article
)
def test_has_module_perms_success(self):
instance = self.handler(Article)
user = MagicMock()
user.has_perm.return_value = True
self.assertTrue(instance.has_module_perms(user, "permission"))
self.assertTrue(user.has_perm.called)
def test_has_module_perms_fail(self):
instance = self.handler(Article)
user = MagicMock()
user.has_perm.return_value = True
self.assertFalse(instance.has_module_perms(user, "unknown"))
self.assertFalse(user.has_perm.called)
@override_settings(
PERMISSION_DEFAULT_PERMISSION_HANDLER=LogicalPermissionHandler,
)
class PermissionLogicalPermissionHandlerTestCase(TestCase):
def setUp(self):
# make sure all caches are removed
Article._permission_logics = set()
self.handler = LogicalPermissionHandler
self.user = create_user("john")
self.perm1 = "permission.add_article"
self.perm2 = "permission.change_article"
self.perm3 = "permission.delete_article"
self.article = create_article("test")
from permission.logics import PermissionLogic
from permission import add_permission_logic
self.mock_logic1 = MagicMock(spec=PermissionLogic)
self.mock_logic1.has_perm = MagicMock(return_value=False)
self.mock_logic2 = MagicMock(spec=PermissionLogic)
self.mock_logic2.has_perm = MagicMock(return_value=False)
add_permission_logic(Article, self.mock_logic1)
add_permission_logic(Article, self.mock_logic2)
def test_constructor_with_app_label(self):
self.assertRaises(AttributeError, self.handler, "permission")
def test_has_perm_non_related_permission(self):
instance = self.handler(Article)
instance.get_supported_permissions = MagicMock(
return_value=[
"permission.add_article",
"permission.change_article",
"permission.delete_article",
]
)
self.assertFalse(instance.has_perm(self.user, "unknown"))
self.assertFalse(instance.has_perm(self.user, "unknown", self.article))
def test_has_perm_permission_logics_called(self):
instance = self.handler(Article)
instance.get_supported_permissions = MagicMock(
return_value=[
"permission.add_article",
"permission.change_article",
"permission.delete_article",
]
)
self.assertFalse(self.mock_logic1.has_perm.called)
self.assertFalse(self.mock_logic2.has_perm.called)
self.assertFalse(instance.has_perm(self.user, "permission.add_article"))
self.assertTrue(self.mock_logic1.has_perm.called)
self.assertTrue(self.mock_logic2.has_perm.called)
self.assertEqual(self.mock_logic1.has_perm.call_count, 1)
self.assertEqual(self.mock_logic2.has_perm.call_count, 1)
# permission check should be cached thus `has_perm` should not be
# called twice for same user instance
self.assertFalse(instance.has_perm(self.user, "permission.add_article"))
self.assertEqual(self.mock_logic1.has_perm.call_count, 1)
self.assertEqual(self.mock_logic2.has_perm.call_count, 1)
|
SHELLPY_PARAMS = "SHELLPY_PARAMS"
|
#!/usr/bin/env python
import sys
import os
import numpy
def make_index(scene_root, scene_dir, verbose=False):
title = scene_root
files = ""
for filename in os.listdir(scene_dir):
if "thumb" in filename:
continue
if filename == "index.html":
continue
filesize = os.path.getsize(os.path.join(scene_dir, filename))
if filesize > 100000:
nice_size = " (%.1fMB)" % (filesize / 1048576.0)
else:
nice_size = " (%.1fKB)" % (filesize / 1024.0)
files += '<li><a href="%s">%s</a>%s</li>\n' % (filename, filename, nice_size)
src_dir = os.path.dirname(__file__)
doc = open(src_dir + "/index_template.html").read()
doc = doc.replace("@@@TITLE@@@", title)
doc = doc.replace("@@@FILES@@@", files)
doc = doc.replace("@@@SCENE_ROOT@@@", scene_root)
open(scene_dir + "/index.html", "w").write(doc)
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: scene_index_maker.py <scene_root> <scene_dir_path>")
sys.exit(1)
make_index(sys.argv[1], sys.argv[2])
|
import bh_plugin
import re
import sublime
class BracketRemove(bh_plugin.BracketPluginCommand):
def decrease_indent_level(self, edit, row_first, row_last):
"""
Decrease indent level on removal
"""
tab_size = self.view.settings().get("tab_size", 4)
indents = re.compile(
r"^(?:\t| {%d}| *)((?:\t| {%d}| )*)([\s\S]*)" % (tab_size, tab_size)
)
if not self.single_line:
for x in reversed(list(range(row_first, row_last + 1))):
line = self.view.full_line(self.view.text_point(x, 0))
text = self.view.substr(line)
m = indents.match(text)
if m:
self.view.replace(edit, line, m.group(1) + m.group(2))
def run(
self, edit, name, remove_content=False, remove_indent=False, remove_block=False
):
"""
Remove the given bracket and adjust its indentation if desired
"""
if remove_content:
self.view.replace(edit, sublime.Region(self.left.begin, self.right.end), "")
else:
row_first = self.view.rowcol(self.left.end)[0] + 1
row_last = self.view.rowcol(self.right.begin)[0] - 1
self.single_line = not row_first <= row_last
if remove_block and not self.single_line:
self.view.replace(edit, self.view.full_line(self.right.toregion()), "")
else:
self.view.replace(edit, self.right.toregion(), "")
if remove_indent:
self.decrease_indent_level(edit, row_first, row_last)
if remove_block and not self.single_line:
self.view.replace(edit, self.view.full_line(self.left.toregion()), "")
else:
self.view.replace(edit, self.left.toregion(), "")
self.left = None
self.right = None
self.nobracket = True
def plugin():
return BracketRemove
|
import sublime, sublime_plugin
class EchoCommand(sublime_plugin.ApplicationCommand):
def run(self, **kwargs):
print kwargs
|
class ClientException(Exception):
"""If a client could not fetch information"""
def __str__(self):
return self.args[0]
|
import threading
class BackgroundDownloader(threading.Thread):
"""
Downloads information from one or more URLs in the background.
Normal usage is to use one BackgroundDownloader per domain name.
:param settings:
A dict containing at least the following fields:
`cache_length`,
`debug`,
`timeout`,
`user_agent`,
`http_proxy`,
`https_proxy`,
`proxy_username`,
`proxy_password`
:param providers:
An array of providers that can download the URLs
"""
def __init__(self, settings, providers):
self.settings = settings
self.urls = []
self.providers = providers
self.used_providers = {}
threading.Thread.__init__(self)
def add_url(self, url):
"""
Adds a URL to the list to download
:param url:
The URL to download info about
"""
self.urls.append(url)
def get_provider(self, url):
"""
Returns the provider for the URL specified
:param url:
The URL to return the provider for
:return:
The provider object for the URL
"""
return self.used_providers.get(url)
def run(self):
for url in self.urls:
for provider_class in self.providers:
if provider_class.match_url(url):
provider = provider_class(url, self.settings)
break
provider.prefetch()
self.used_providers[url] = provider
|
import os
import time
import sublime
from .open_compat import open_compat, read_compat
class HttpCache(object):
"""
A data store for caching HTTP response data.
"""
def __init__(self, ttl):
self.base_path = os.path.join(sublime.packages_path(), 'User', 'Package Control.cache')
if not os.path.exists(self.base_path):
os.mkdir(self.base_path)
self.clear(int(ttl))
def clear(self, ttl):
"""
Removes all cache entries older than the TTL
:param ttl:
The number of seconds a cache entry should be valid for
"""
ttl = int(ttl)
for filename in os.listdir(self.base_path):
path = os.path.join(self.base_path, filename)
# There should not be any folders in the cache dir, but we
# ignore to prevent an exception
if os.path.isdir(path):
continue
mtime = os.stat(path).st_mtime
if mtime < time.time() - ttl:
os.unlink(path)
def get(self, key):
"""
Returns a cached value
:param key:
The key to fetch the cache for
:return:
The (binary) cached value, or False
"""
cache_file = os.path.join(self.base_path, key)
if not os.path.exists(cache_file):
return False
with open_compat(cache_file, 'rb') as f:
return read_compat(f)
def has(self, key):
cache_file = os.path.join(self.base_path, key)
return os.path.exists(cache_file)
def path(self, key):
"""
Returns the filesystem path to the key
:param key:
The key to get the path for
:return:
The absolute filesystem path to the cache file
"""
return os.path.join(self.base_path, key)
def set(self, key, content):
"""
Saves a value in the cache
:param key:
The key to save the cache with
:param content:
The (binary) content to cache
"""
cache_file = os.path.join(self.base_path, key)
with open_compat(cache_file, 'wb') as f:
f.write(content)
|
import sublime
class ThreadProgress():
"""
Animates an indicator, [= ], in the status area while a thread runs
:param thread:
The thread to track for activity
:param message:
The message to display next to the activity indicator
:param success_message:
The message to display once the thread is complete
"""
def __init__(self, thread, message, success_message):
self.thread = thread
self.message = message
self.success_message = success_message
self.addend = 1
self.size = 8
sublime.set_timeout(lambda: self.run(0), 100)
def run(self, i):
if not self.thread.is_alive():
if hasattr(self.thread, 'result') and not self.thread.result:
sublime.status_message('')
return
sublime.status_message(self.success_message)
return
before = i % self.size
after = (self.size - 1) - before
sublime.status_message('%s [%s=%s]' % \
(self.message, ' ' * before, ' ' * after))
if not after:
self.addend = -1
if not before:
self.addend = 1
i += self.addend
sublime.set_timeout(lambda: self.run(i), 100)
|
"""
This package contains implementations of various streaming corpus I/O format.
"""
# bring corpus classes directly into package namespace, to save some typing
from .indexedcorpus import IndexedCorpus # must appear before the other classes
from .mmcorpus import MmCorpus
from .bleicorpus import BleiCorpus
from .svmlightcorpus import SvmLightCorpus
from .lowcorpus import LowCorpus
from .dictionary import Dictionary
from .hashdictionary import HashDictionary
from .wikicorpus import WikiCorpus
from .textcorpus import TextCorpus
from .ucicorpus import UciCorpus
from .malletcorpus import MalletCorpus
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
USAGE: %(program)s SIZE_OF_JOBS_QUEUE
Dispatcher process which orchestrates distributed LSI computations. Run this \
script only once, on any node in your cluster.
Example: python -m gensim.models.lsi_dispatcher
"""
import os, sys, logging, threading, time
from queue import Queue
from gensim import utils
logger = logging.getLogger("gensim.models.lsi_dispatcher")
# How many jobs (=chunks of N documents) to keep "pre-fetched" in a queue?
# A small number is usually enough, unless iteration over the corpus is very very
# slow (slower than the actual computation of LSI), in which case you can override
# this value from command line. ie. run "python ./lsi_dispatcher.py 100"
MAX_JOBS_QUEUE = 10
# timeout for the Queue object put/get blocking methods.
# it should really be infinity, but then keyboard interrupts don't work.
# so this is really just a hack, see http://bugs.python.org/issue1360
HUGE_TIMEOUT = 365 * 24 * 60 * 60 # one year
class Dispatcher(object):
"""
Dispatcher object that communicates and coordinates individual workers.
There should never be more than one dispatcher running at any one time.
"""
def __init__(self, maxsize=0):
"""
Note that the constructor does not fully initialize the dispatcher;
use the `initialize()` function to populate it with workers etc.
"""
self.maxsize = maxsize
self.workers = {}
self.callback = None # a pyro proxy to this object (unknown at init time, but will be set later)
def initialize(self, **model_params):
"""
`model_params` are parameters used to initialize individual workers (gets
handed all the way down to worker.initialize()).
"""
self.jobs = Queue(maxsize=self.maxsize)
self.lock_update = threading.Lock()
self._jobsdone = 0
self._jobsreceived = 0
# locate all available workers and store their proxies, for subsequent RMI calls
self.workers = {}
with utils.getNS() as ns:
import Pyro4
self.callback = Pyro4.Proxy("PYRONAME:gensim.lsi_dispatcher") # = self
self.callback._pyroOneway.add(
"jobdone"
) # make sure workers transfer control back to dispatcher asynchronously
for name, uri in ns.list(prefix="gensim.lsi_worker").items():
try:
worker = Pyro4.Proxy(uri)
workerid = len(self.workers)
# make time consuming methods work asynchronously
worker._pyroOneway.add("requestjob")
worker._pyroOneway.add("exit")
logger.info("registering worker #%i from %s" % (workerid, uri))
worker.initialize(
workerid, dispatcher=self.callback, **model_params
)
self.workers[workerid] = worker
except Pyro4.errors.PyroError:
logger.exception(
"unresponsive worker at %s, deleting it from the name server"
% uri
)
ns.remove(name)
if not self.workers:
raise RuntimeError(
"no workers found; run some lsi_worker scripts on your machines first!"
)
def getworkers(self):
"""
Return pyro URIs of all registered workers.
"""
return [worker._pyroUri for worker in self.workers.values()]
def getjob(self, worker_id):
logger.info("worker #%i requesting a new job" % worker_id)
job = self.jobs.get(block=True, timeout=1)
logger.info(
"worker #%i got a new job (%i left)" % (worker_id, self.jobs.qsize())
)
return job
def putjob(self, job):
self._jobsreceived += 1
self.jobs.put(job, block=True, timeout=HUGE_TIMEOUT)
logger.info("added a new job (len(queue)=%i items)" % self.jobs.qsize())
def getstate(self):
"""
Merge projections from across all workers and return the final projection.
"""
logger.info("end of input, assigning all remaining jobs")
logger.debug(
"jobs done: %s, jobs received: %s" % (self._jobsdone, self._jobsreceived)
)
while self._jobsdone < self._jobsreceived:
time.sleep(0.5) # check every half a second
# TODO: merge in parallel, so that we're done in `log_2(workers)` merges,
# and not `workers - 1` merges!
# but merging only takes place once, after all input data has been processed,
# so the overall effect would be small... compared to the amount of coding :-)
logger.info("merging states from %i workers" % len(self.workers))
workers = list(self.workers.items())
result = workers[0][1].getstate()
for workerid, worker in workers[1:]:
logger.info("pulling state from worker %s" % workerid)
result.merge(worker.getstate())
logger.info("sending out merged projection")
return result
def reset(self):
"""
Initialize all workers for a new decomposition.
"""
for workerid, worker in self.workers.items():
logger.info("resetting worker %s" % workerid)
worker.reset()
worker.requestjob()
self._jobsdone = 0
self._jobsreceived = 0
@utils.synchronous("lock_update")
def jobdone(self, workerid):
"""
A worker has finished its job. Log this event and then asynchronously
transfer control back to the worker.
In this way, control flow basically oscillates between dispatcher.jobdone()
worker.requestjob().
"""
self._jobsdone += 1
logger.info("worker #%s finished job #%i" % (workerid, self._jobsdone))
worker = self.workers[workerid]
worker.requestjob() # tell the worker to ask for another job, asynchronously (one-way)
def jobsdone(self):
"""Wrap self._jobsdone, needed for remote access through proxies"""
return self._jobsdone
def exit(self):
"""
Terminate all registered workers and then the dispatcher.
"""
for workerid, worker in self.workers.items():
logger.info("terminating worker %s" % workerid)
worker.exit()
logger.info("terminating dispatcher")
os._exit(0) # exit the whole process (not just this thread ala sys.exit())
# endclass Dispatcher
def main():
logging.basicConfig(
format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO
)
logger.info("running %s" % " ".join(sys.argv))
program = os.path.basename(sys.argv[0])
# make sure we have enough cmd line parameters
if len(sys.argv) < 1:
print((globals()["__doc__"] % locals()))
sys.exit(1)
if len(sys.argv) < 2:
maxsize = MAX_JOBS_QUEUE
else:
maxsize = int(sys.argv[1])
utils.pyro_daemon("gensim.lsi_dispatcher", Dispatcher(maxsize=maxsize))
logger.info("finished running %s" % program)
if __name__ == "__main__":
main()
|
import numpy as np
from utils import env_paths as paths
from base import Train
import time
class TrainModel(Train):
def __init__(
self, model, output_freq=1, pickle_f_custom_freq=None, f_custom_eval=None
):
super(TrainModel, self).__init__(model, pickle_f_custom_freq, f_custom_eval)
self.output_freq = output_freq
def train_model(
self,
f_train,
train_args,
f_test,
test_args,
f_validate,
validation_args,
n_train_batches=600,
n_valid_batches=1,
n_test_batches=1,
n_epochs=100,
anneal=None,
):
self.write_to_logger("### MODEL PARAMS ###")
self.write_to_logger(self.model.model_info())
self.write_to_logger("### TRAINING PARAMS ###")
self.write_to_logger(
"Train -> %s: %s"
% (
";".join(list(train_args["inputs"].keys())),
str(list(train_args["inputs"].values())),
)
)
self.write_to_logger(
"Test -> %s: %s"
% (
";".join(list(test_args["inputs"].keys())),
str(list(test_args["inputs"].values())),
)
)
if anneal is not None:
for t in anneal:
key, freq, rate, min_val = t
self.write_to_logger(
"Anneal %s %0.4f after %i epochs with minimum value %f."
% (key, rate, int(freq), min_val)
)
self.write_to_logger("### TRAINING MODEL ###")
if self.custom_eval_func is not None:
self.custom_eval_func(
self.model, paths.get_custom_eval_path(0, self.model.root_path)
)
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch += 1
start_time = time.time()
train_outputs = []
for i in range(n_train_batches):
train_output = f_train(i, *list(train_args["inputs"].values()))
train_outputs.append(train_output)
self.eval_train[epoch] = np.mean(np.array(train_outputs), axis=0)
self.model.after_epoch()
end_time = time.time() - start_time
if anneal is not None:
for t in anneal:
key, freq, rate, min_val = t
new_val = train_args["inputs"][key] * rate
if new_val < min_val:
train_args["inputs"][key] = min_val
elif epoch % freq == 0:
train_args["inputs"][key] = new_val
if epoch % self.output_freq == 0:
if n_test_batches == 1:
self.eval_test[epoch] = f_test(*list(test_args["inputs"].values()))
else:
test_outputs = []
for i in range(n_test_batches):
test_output = f_test(i, *list(test_args["inputs"].values()))
test_outputs.append(test_output)
self.eval_test[epoch] = np.mean(np.array(test_outputs), axis=0)
if f_validate is not None:
if n_valid_batches == 1:
self.eval_validation[epoch] = f_validate(
*list(validation_args["inputs"].values())
)
else:
valid_outputs = []
for i in range(n_valid_batches):
valid_output = f_validate(
i, *list(validation_args["inputs"].values())
)
valid_outputs.append(valid_output)
self.eval_validation[epoch] = np.mean(
np.array(valid_outputs), axis=0
)
else:
self.eval_validation[epoch] = [0.0] * len(
list(validation_args["outputs"].keys())
)
# Formatting the output string from the generic and the user-defined values.
output_str = "epoch=%0" + str(len(str(n_epochs))) + "i; time=%0.2f;"
output_str %= (epoch, end_time)
def concatenate_output_str(out_str, d):
for k, v in zip(list(d.keys()), list(d.values())):
out_str += " %s=%s;" % (k, v)
return out_str
output_str = concatenate_output_str(output_str, train_args["outputs"])
output_str = concatenate_output_str(output_str, test_args["outputs"])
output_str = concatenate_output_str(
output_str, validation_args["outputs"]
)
outputs = [float(o) for o in self.eval_train[epoch]]
outputs += [float(o) for o in self.eval_test[epoch]]
outputs += [float(o) for o in self.eval_validation[epoch]]
output_str %= tuple(outputs)
self.write_to_logger(output_str)
if (
self.pickle_f_custom_freq is not None
and epoch % self.pickle_f_custom_freq == 0
):
if self.custom_eval_func is not None:
self.custom_eval_func(
self.model,
paths.get_custom_eval_path(epoch, self.model.root_path),
)
self.plot_eval(
self.eval_train, list(train_args["outputs"].keys()), "_train"
)
self.plot_eval(
self.eval_test, list(test_args["outputs"].keys()), "_test"
)
self.plot_eval(
self.eval_validation,
list(validation_args["outputs"].keys()),
"_validation",
)
self.dump_dicts()
self.model.dump_model()
if self.pickle_f_custom_freq is not None:
self.model.dump_model()
|
'\nTests for the accumulate module.\n'
import unittest
import random
from anytop import accumulate
class FloatRangeTestCase(unittest.TestCase):
def test_range(self):
r = (0, 2, 2)
((r), [(0.0, 1.0), (1.0, 2.0)])
((r), 2)
def test_get_bin(self):
r = (0, 2, 2)
(((- 0.1)), None)
((0.0), (0.0, 1.0))
((1e-05), (0.0, 1.0))
((0.99999), (0.0, 1.0))
((1.0), (1.0, 2.0))
((1.00001), (1.0, 2.0))
((1.99999), (1.0, 2.0))
((2.0), None)
((2.1), None)
def test_get_bin_2(self):
frange = (1, 7, 6)
((frange), [(1.0, 2.0), (2.0, 3.0), (3.0, 4.0), (4.0, 5.0), (5.0, 6.0), (6.0, 7.0)])
((0), None)
((1), (1.0, 2.0))
((2), (2.0, 3.0))
((3), (3.0, 4.0))
((4), (4.0, 5.0))
((5), (5.0, 6.0))
((6), (6.0, 7.0))
((7), None)
class NumericAccumulatorTestCase(unittest.TestCase):
def test_dist(self):
data = [0, 1, 1, 2, 3, 3, 3, 4, 6, 6, 6, 7, 7, 8]
(data)
frange = (1, 7, 6)
if (not (frange.end == 7.0)):
raise ()
((6), (6.0, 7.0))
acc = ()
(data)
(acc, (data))
dist = (frange)
expected_dist = {None: 4, (1.0, 2.0): 2, (2.0, 3.0): 1, (3.0, 4.0): 3, (4.0, 5.0): 1, (5.0, 6.0): 0, (6.0, 7.0): 3}
for k in expected_dist:
(dist[k], expected_dist[k])
if (not ((expected_dist))):
raise ()
class AccumulatorTestCase(unittest.TestCase):
def test_basic(self):
data = ['a', 'a', 'b', 'c', 'a']
acc = ()
for x in data:
(x)
expected_dist = {'a': 3, 'b': 1, 'c': 1}
dist = ()
for k in expected_dist:
(dist[k], expected_dist[k])
if (not (expected_dist)):
raise ()
class WindowAccumulatorTestCase(unittest.TestCase):
def test_basic(self):
acc = (3)
for x in ['a', 'a', 'b']:
(x)
dist = ()
(dist['a'], 2)
(dist['b'], 1)
('c')
('b')
dist = ()
(dist['b'], 2)
(dist['c'], 1)
def suite():
return (((FloatRangeTestCase), (NumericAccumulatorTestCase)))
if (__name__ == '__main__'):
(()) |
import numpy as np
from theano import function
import theano.tensor as T
def make_tensor(dim):
'\n Returns a new Theano tensor with no broadcastable dimensions.\n dim: the total number of dimensions of the tensor.\n '
return ()
def broadcasted_add(a, b):
'\n a: a 3D theano tensor\n b: a 4D theano tensor\n Returns c, a 4D theano tensor, where\n\n c[i, j, k, l] = a[l, k, i] + b[i, j, k, l]\n\n for all i, j, k, l\n '
return ((2, 'x', 1, 0) + b)
def partial_max(a):
'\n a: a 4D theano tensor\n\n Returns b, a theano matrix, where\n\n b[i, j] = max_{k,l} a[i, k, l, j]\n\n for all i, j\n '
return ()
if (__name__ == '__main__'):
a = (3)
b = (4)
c = (a, b)
d = (c)
f = ([a, b], d)
rng = ([1, 2, 3])
a_value = (a.dtype)
b_value = (b.dtype)
c_value = ((a_value, (2, 1, 0))[(:, None, :, :)] + b_value)
expected = ()
actual = (a_value, b_value)
if (not (actual, expected)):
raise ((actual, expected))
('SUCCESS!') |
from theano import tensor
def sequence_map(fn, input_state, mask=None):
batch_size = input_state.shape[1]
n_timesteps = input_state.shape[0]
input_dim = input_state.shape[2]
if mask is not None:
idx_mask = mask.nonzero()
input_state = input_state[idx_mask[0], idx_mask[1]]
else:
input_state = input_state.reshape((n_timesteps * batch_size, input_dim))
output_state_flat = fn(input_state)
output_dim = output_state_flat.shape[1]
output_state = tensor.zeros((n_timesteps, batch_size, output_dim))
if mask is not None:
output_state = tensor.inc_subtensor(
output_state[idx_mask[0], idx_mask[1]], output_state_flat
)
output_state = (output_state, mask)
else:
output_state = output_state_flat.reshape((n_timesteps, batch_size, output_dim))
return output_state
|
# coding: utf-8
from email.utils import getaddresses
from .compat import (
string_types,
is_callable,
formataddr as compat_formataddr,
to_unicode,
to_native,
)
from .utils import (
SafeMIMEText,
SafeMIMEMultipart,
sanitize_address,
parse_name_and_email,
load_email_charsets,
encode_header as encode_header_,
renderable,
format_date_header,
parse_name_and_email_list,
cached_property,
)
from .exc import BadHeaderError
from .backend import ObjectFactory, SMTPBackend
from .store import MemoryFileStore, BaseFile
from .signers import DKIMSigner
load_email_charsets() # sic!
class BaseMessage(object):
"""
Base email message with html part, text part and attachments.
"""
attachment_cls = BaseFile
filestore_cls = MemoryFileStore
policy = None
def __init__(
self,
charset=None,
message_id=None,
date=None,
subject=None,
mail_from=None,
mail_to=None,
headers=None,
html=None,
text=None,
attachments=None,
cc=None,
bcc=None,
):
self._attachments = None
self.charset = charset or "utf-8" # utf-8 is standard de-facto, yeah
self._message_id = message_id
self.set_subject(subject)
self.set_date(date)
self.set_mail_from(mail_from)
self.set_mail_to(mail_to)
self.set_cc(cc)
self.set_bcc(bcc)
self.set_headers(headers)
self.set_html(html=html)
self.set_text(text=text)
self.render_data = {}
if attachments:
for a in attachments:
self.attachments.add(a)
def set_mail_from(self, mail_from):
# In: ('Alice', '<alice@me.com>' )
self._mail_from = mail_from and parse_name_and_email(mail_from) or None
def get_mail_from(self):
# Out: ('Alice', '<alice@me.com>') or None
return self._mail_from
mail_from = property(get_mail_from, set_mail_from)
def set_mail_to(self, mail_to):
self._mail_to = parse_name_and_email_list(mail_to)
def get_mail_to(self):
return self._mail_to
mail_to = property(get_mail_to, set_mail_to)
def set_cc(self, addr):
self._cc = parse_name_and_email_list(addr)
def get_cc(self):
return self._cc
cc = property(get_cc, set_cc)
def set_bcc(self, addr):
self._bcc = parse_name_and_email_list(addr)
def get_bcc(self):
return self._bcc
bcc = property(get_bcc, set_bcc)
def get_recipients_emails(self):
"""
Returns message recipient's emails for actual sending.
:return: list of emails
"""
return list(
set(
[a[1] for a in self._mail_to]
+ [a[1] for a in self._cc]
+ [a[1] for a in self._bcc]
)
)
def set_headers(self, headers):
self._headers = headers or {}
def set_html(self, html, url=None):
if hasattr(html, "read"):
html = html.read()
self._html = html
self._html_url = url
def get_html(self):
return self._html
html = property(get_html, set_html)
def set_text(self, text, url=None):
if hasattr(text, "read"):
text = text.read()
self._text = text
self._text_url = url
def get_text(self):
return self._text
text = property(get_text, set_text)
@property
@renderable
def html_body(self):
return self._html
@property
@renderable
def text_body(self):
return self._text
def set_subject(self, value):
self._subject = value
@renderable
def get_subject(self):
return self._subject
subject = property(get_subject, set_subject)
def render(self, **kwargs):
self.render_data = kwargs
def set_date(self, value):
self._date = value
def get_date(self):
v = self._date
if v is False:
return None
if is_callable(v):
v = v()
if not isinstance(v, string_types):
v = format_date_header(v)
return v
date = property(get_date, set_date)
message_date = date
@property
def message_id(self):
mid = self._message_id
if mid is False:
return None
return is_callable(mid) and mid() or mid
@message_id.setter
def message_id(self, value):
self._message_id = value
@property
def attachments(self):
if self._attachments is None:
self._attachments = self.filestore_cls(self.attachment_cls)
return self._attachments
def attach(self, **kwargs):
if "content_disposition" not in kwargs:
kwargs["content_disposition"] = "attachment"
self.attachments.add(kwargs)
class MessageBuildMixin(object):
ROOT_PREAMBLE = "This is a multi-part message in MIME format.\n"
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = set(
[
"from",
"sender",
"reply-to",
"to",
"cc",
"bcc",
"resent-from",
"resent-sender",
"resent-to",
"resent-cc",
"resent-bcc",
]
)
before_build = None
after_build = None
def encode_header(self, value):
if value:
return encode_header_(value, self.charset)
else:
return value
def encode_address_header(self, pair):
if not pair:
return None
name, email = pair
return compat_formataddr((name or "", email))
encode_name_header = encode_address_header # legacy name
def set_header(self, msg, key, value, encode=True):
if value is None:
# TODO: may be remove header here ?
return
if not isinstance(value, string_types):
value = to_unicode(value)
# Prevent header injection
if "\n" in value or "\r" in value:
raise BadHeaderError(
"Header values can't contain newlines (got %r for header %r)"
% (value, key)
)
if key.lower() in self.ADDRESS_HEADERS:
value = ", ".join(
sanitize_address(addr, self.charset) for addr in getaddresses((value,))
)
msg[key] = encode and self.encode_header(value) or value
def _build_root_message(self, message_cls=None, **kw):
msg = (message_cls or SafeMIMEMultipart)(**kw)
if self.policy:
msg.policy = self.policy
msg.preamble = self.ROOT_PREAMBLE
self.set_header(msg, "Date", self.date, encode=False)
self.set_header(msg, "Message-ID", self.message_id, encode=False)
if self._headers:
for name, value in list(self._headers.items()):
self.set_header(msg, name, value)
subject = self.subject
if subject is not None:
self.set_header(msg, "Subject", subject)
self.set_header(
msg, "From", self.encode_address_header(self._mail_from), encode=False
)
if self._mail_to:
self.set_header(
msg,
"To",
", ".join([self.encode_address_header(addr) for addr in self._mail_to]),
encode=False,
)
if self._cc:
self.set_header(
msg,
"Cc",
", ".join([self.encode_address_header(addr) for addr in self._cc]),
encode=False,
)
return msg
def _build_html_part(self):
text = self.html_body
if text:
p = SafeMIMEText(text, "html", charset=self.charset)
p.set_charset(self.charset)
return p
def _build_text_part(self):
text = self.text_body
if text:
p = SafeMIMEText(text, "plain", charset=self.charset)
p.set_charset(self.charset)
return p
def build_message(self, message_cls=None):
if self.before_build:
self.before_build(self)
msg = self._build_root_message(message_cls)
rel = SafeMIMEMultipart("related")
msg.attach(rel)
alt = SafeMIMEMultipart("alternative")
rel.attach(alt)
_text = self._build_text_part()
_html = self._build_html_part()
if not (_html or _text):
raise ValueError("Message must contain 'html' or 'text'")
if _text:
alt.attach(_text)
if _html:
alt.attach(_html)
for f in self.attachments:
part = f.mime
if part:
if f.is_inline:
rel.attach(part)
else:
msg.attach(part)
if self.after_build:
self.after_build(self, msg)
return msg
_build_message = build_message
def as_message(self, message_cls=None):
msg = self.build_message(message_cls=message_cls)
if self._signer:
msg = self.sign_message(msg)
return msg
message = as_message
def as_string(self, message_cls=None):
"""
Returns message as string.
Note: this method costs one less message-to-string conversions
for dkim in compare to self.as_message().as_string()
Changes:
v0.4.2: now returns bytes, not native string
"""
r = to_native(self.build_message(message_cls=message_cls).as_string())
if self._signer:
r = self.sign_string(r)
return r
class MessageSendMixin(object):
smtp_pool_factory = ObjectFactory
smtp_cls = SMTPBackend
@cached_property
def smtp_pool(self):
return self.smtp_pool_factory(cls=self.smtp_cls)
def send(
self,
to=None,
set_mail_to=True,
mail_from=None,
set_mail_from=False,
render=None,
smtp_mail_options=None,
smtp_rcpt_options=None,
smtp=None,
):
if render is not None:
self.render(**render)
if smtp is None:
smtp = {"host": "localhost", "port": 25, "timeout": 5}
if isinstance(smtp, dict):
smtp = self.smtp_pool[smtp]
if not hasattr(smtp, "sendmail"):
raise ValueError(
"smtp must be a dict or an object with method 'sendmail'. got %s"
% type(smtp)
)
to_addrs = None
if to:
if set_mail_to:
self.set_mail_to(to)
else:
to_addrs = [a[1] for a in parse_name_and_email_list(to)]
to_addrs = to_addrs or self.get_recipients_emails()
if not to_addrs:
raise ValueError("No to-addr")
if mail_from:
if set_mail_from:
self.set_mail_from(mail_from)
from_addr = self._mail_from[1]
else:
mail_from = parse_name_and_email(mail_from)
from_addr = mail_from[1]
else:
from_addr = self._mail_from[1]
if not from_addr:
raise ValueError('No "from" addr')
params = dict(
from_addr=from_addr,
to_addrs=to_addrs,
msg=self,
mail_options=smtp_mail_options,
rcpt_options=smtp_rcpt_options,
)
return smtp.sendmail(**params)
class MessageTransformerMixin(object):
transformer_cls = None
_transformer = None
def create_transformer(self, transformer_cls=None, **kw):
cls = transformer_cls or self.transformer_cls
if cls is None:
from .transformer import MessageTransformer # avoid cyclic import
cls = MessageTransformer
self._transformer = cls(message=self, **kw)
return self._transformer
def destroy_transformer(self):
self._transformer = None
@property
def transformer(self):
if self._transformer is None:
self.create_transformer()
return self._transformer
def set_html(self, **kw):
# When html set, remove old transformer
self.destroy_transformer()
BaseMessage.set_html(self, **kw)
class MessageSignMixin(object):
signer_cls = DKIMSigner
_signer = None
def sign(self, **kwargs):
self._signer = self.signer_cls(**kwargs)
return self
dkim = sign
def sign_message(self, msg):
"""
Add sign header to email.Message
"""
return self._signer.sign_message(msg)
def sign_string(self, message_string):
"""
Add sign header to message-as-a-string
"""
return self._signer.sign_message_string(message_string)
class Message(
MessageSendMixin,
MessageTransformerMixin,
MessageSignMixin,
MessageBuildMixin,
BaseMessage,
):
"""
Email message with:
- DKIM signer
- smtp send
- Message.transformer object
"""
pass
def html(**kwargs):
return Message(**kwargs)
class DjangoMessageProxy(object):
"""
Class obsoletes with emails.django_.DjangoMessage
Class looks like django.core.mail.EmailMessage for standard django email backend.
Example usage:
message = emails.Message(html='...', subject='...', mail_from='robot@company.ltd')
connection = django.core.mail.get_connection()
message.set_mail_to('somebody@somewhere.net')
connection.send_messages([DjangoMessageProxy(message), ])
"""
def __init__(self, message, recipients=None, context=None):
self._message = message
self._recipients = recipients
self._context = context and context.copy() or {}
self.from_email = message.mail_from[1]
self.encoding = message.charset
def recipients(self):
return self._recipients or [r[1] for r in self._message.mail_to]
def message(self):
self._message.render(**self._context)
return self._message.message()
|
import pytest
from emails.template import MakoTemplate, StringTemplate, JinjaTemplate
from emails.template.base import BaseTemplate
def test_template_cache():
t = ('A')
t._template = 'XXX'
('B')
if (not (t._template is None)):
raise ()
if (not (t.template_text == 'B')):
raise ()
def test_templates_basics():
valid_result = 'Hello, world!'
for (cls, tmpl) in ((StringTemplate, 'Hello, ${name}!'), (MakoTemplate, 'Hello, ${name}!'), (JinjaTemplate, 'Hello, {{name}}!')):
if (not (() == valid_result)):
raise ()
def test_string_template_safe_subst():
(() == '42')
with (KeyError):
(() == '42') |
"""
Some network wide and also application specific parameters
"""
import os
MAX_HANDSHAKE_SIZE = 2**16
MAX_REQUEST_SIZE = 2**16
MAX_BLOB_REQUEST_SIZE = 2**16
MAX_RESPONSE_INFO_SIZE = 2**16
MAX_BLOB_INFOS_TO_REQUEST = 20
BLOBFILES_DIR = ".blobfiles"
BLOB_SIZE = 2**21
MIN_BLOB_DATA_PAYMENT_RATE = 0.005 # points/megabyte
MIN_BLOB_INFO_PAYMENT_RATE = 0.02 # points/1000 infos
MIN_VALUABLE_BLOB_INFO_PAYMENT_RATE = 0.05 # points/1000 infos
MIN_VALUABLE_BLOB_HASH_PAYMENT_RATE = 0.05 # points/1000 infos
MAX_CONNECTIONS_PER_STREAM = 5
DEFAULT_MAX_SEARCH_RESULTS = 25
DEFAULT_MAX_KEY_FEE = 100.0
KNOWN_DHT_NODES = [("104.236.42.182", 4000)]
POINTTRADER_SERVER = "http://ec2-54-187-192-68.us-west-2.compute.amazonaws.com:2424"
# POINTTRADER_SERVER = 'http://127.0.0.1:2424'
CRYPTSD_FILE_EXTENSION = ".cryptsd"
API_INTERFACE = "localhost"
API_ADDRESS = "lbryapi"
API_PORT = 5279
ICON_PATH = "app.icns"
APP_NAME = "LBRY"
DEFAULT_WALLET = "lbryum"
API_CONNECTION_STRING = "http://%s:%i/%s" % (API_INTERFACE, API_PORT, API_ADDRESS)
UI_ADDRESS = "http://" + API_INTERFACE + ":" + str(API_PORT)
PROTOCOL_PREFIX = "lbry"
DEFAULT_TIMEOUT = 30
|
import logging
from twisted.internet import defer
from zope.interface import implements
from lbrynet.interfaces import IQueryHandlerFactory, IQueryHandler
log = logging.getLogger(__name__)
class BlobAvailabilityHandlerFactory(object):
implements(IQueryHandlerFactory)
def __init__(self, blob_manager):
self.blob_manager = blob_manager
######### IQueryHandlerFactory #########
def build_query_handler(self):
q_h = BlobAvailabilityHandler(self.blob_manager)
return q_h
def get_primary_query_identifier(self):
return "requested_blobs"
def get_description(self):
return "Blob Availability - blobs that are available to be uploaded"
class BlobAvailabilityHandler(object):
implements(IQueryHandler)
def __init__(self, blob_manager):
self.blob_manager = blob_manager
self.query_identifiers = ["requested_blobs"]
######### IQueryHandler #########
def register_with_request_handler(self, request_handler, peer):
request_handler.register_query_handler(self, self.query_identifiers)
def handle_queries(self, queries):
if self.query_identifiers[0] in queries:
log.debug("Received the client's list of requested blobs")
d = self._get_available_blobs(queries[self.query_identifiers[0]])
def set_field(available_blobs):
return {"available_blobs": available_blobs}
d.addCallback(set_field)
return d
return defer.succeed({})
######### internal #########
def _get_available_blobs(self, requested_blobs):
d = self.blob_manager.completed_blobs(requested_blobs)
return d
|
"""
Interfaces which are implemented by various classes within LBRYnet.
"""
from zope.interface import Interface
class IPeerFinder(Interface):
"""
Used to find peers by sha384 hashes which they claim to be associated with.
"""
def find_peers_for_blob(self, blob_hash):
"""
Look for peers claiming to be associated with a sha384 hashsum.
@param blob_hash: The sha384 hashsum to use to look up peers.
@type blob_hash: string, hex encoded
@return: a Deferred object which fires with a list of Peer objects
@rtype: Deferred which fires with [Peer]
"""
class IRequestSender(Interface):
"""
Used to connect to a peer, send requests to it, and return the responses to those requests.
"""
def add_request(self, request):
"""
Add a request to the next message that will be sent to the peer
@param request: a request to be sent to the peer in the next message
@type request: ClientRequest
@return: Deferred object which will callback with the response to this request, a dict
@rtype: Deferred which fires with dict
"""
def add_blob_request(self, blob_request):
"""
Add a request for a blob to the next message that will be sent to the peer.
This will cause the protocol to call blob_request.write(data) for all incoming
data, after the response message has been parsed out, until blob_request.finished_deferred fires.
@param blob_request: the request for the blob
@type blob_request: ClientBlobRequest
@return: Deferred object which will callback with the response to this request
@rtype: Deferred which fires with dict
"""
class IRequestCreator(Interface):
"""
Send requests, via an IRequestSender, to peers.
"""
def send_next_request(self, peer, protocol):
"""
Create a Request object for the peer and then give the protocol that request.
@param peer: the Peer object which the request will be sent to.
@type peer: Peer
@param protocol: the protocol to pass the request to.
@type protocol: object which implements IRequestSender
@return: Deferred object which will callback with True or False depending on whether a Request was sent
@rtype: Deferred which fires with boolean
"""
def get_new_peers(self):
"""
Get some new peers which the request creator wants to send requests to.
@return: Deferred object which will callback with [Peer]
@rtype: Deferred which fires with [Peer]
"""
class IMetadataHandler(Interface):
"""
Get metadata for the IDownloadManager.
"""
def get_initial_blobs(self):
"""
Return metadata about blobs that are known to be associated with the stream at the time that the
stream is set up.
@return: Deferred object which will call back with a list of BlobInfo objects
@rtype: Deferred which fires with [BlobInfo]
"""
def final_blob_num(self):
"""
If the last blob in the stream is known, return its blob_num. Otherwise, return None.
@return: integer representing the final blob num in the stream, or None
@rtype: integer or None
"""
class IDownloadManager(Interface):
"""
Manage the downloading of an associated group of blobs, referred to as a stream.
These objects keep track of metadata about the stream, are responsible for starting and stopping
other components, and handle communication between other components.
"""
def start_downloading(self):
"""
Load the initial metadata about the stream and then start the other components.
@return: Deferred which fires when the other components have been started.
@rtype: Deferred which fires with boolean
"""
def resume_downloading(self):
"""
Start the other components after they have been stopped.
@return: Deferred which fires when the other components have been started.
@rtype: Deferred which fires with boolean
"""
def pause_downloading(self):
"""
Stop the other components.
@return: Deferred which fires when the other components have been stopped.
@rtype: Deferred which fires with boolean
"""
def add_blobs_to_download(self, blobs):
"""
Add blobs to the list of blobs that should be downloaded
@param blobs: list of BlobInfos that are associated with the stream being downloaded
@type blobs: [BlobInfo]
@return: DeferredList which fires with the result of adding each previously unknown BlobInfo
to the list of known BlobInfos.
@rtype: DeferredList which fires with [(boolean, Failure/None)]
"""
def stream_position(self):
"""
Returns the blob_num of the next blob needed in the stream.
If the stream already has all of the blobs it needs, then this will return the blob_num
of the last blob in the stream plus 1.
@return: the blob_num of the next blob needed, or the last blob_num + 1.
@rtype: integer
"""
def needed_blobs(self):
"""
Returns a list of BlobInfos representing all of the blobs that the stream still needs to download.
@return: the list of BlobInfos representing blobs that the stream still needs to download.
@rtype: [BlobInfo]
"""
def final_blob_num(self):
"""
If the last blob in the stream is known, return its blob_num. If not, return None.
@return: The blob_num of the last blob in the stream, or None if it is unknown.
@rtype: integer or None
"""
def handle_blob(self, blob_num):
"""
This function is called when the next blob in the stream is ready to be handled, whatever that may mean.
@param blob_num: The blob_num of the blob that is ready to be handled.
@type blob_num: integer
@return: A Deferred which fires when the blob has been 'handled'
@rtype: Deferred which can fire with anything
"""
class IConnectionManager(Interface):
"""
Connects to peers so that IRequestCreators can send their requests.
"""
def get_next_request(self, peer, protocol):
"""
Ask all IRequestCreators belonging to this object to create a Request for peer and give it to protocol
@param peer: the peer which the request will be sent to.
@type peer: Peer
@param protocol: the protocol which the request should be sent to by the IRequestCreator.
@type protocol: IRequestSender
@return: Deferred object which will callback with True or False depending on whether the IRequestSender
should send the request or hang up
@rtype: Deferred which fires with boolean
"""
def protocol_disconnected(self, peer, protocol):
"""
Inform the IConnectionManager that the protocol has been disconnected
@param peer: The peer which the connection was to.
@type peer: Peer
@param protocol: The protocol which was disconnected.
@type protocol: Protocol
@return: None
"""
class IProgressManager(Interface):
"""
Responsible for keeping track of the progress of the download.
Specifically, it is their responsibility to decide which blobs need to be downloaded and keep track of
the progress of the download
"""
def stream_position(self):
"""
Returns the blob_num of the next blob needed in the stream.
If the stream already has all of the blobs it needs, then this will return the blob_num
of the last blob in the stream plus 1.
@return: the blob_num of the next blob needed, or the last blob_num + 1.
@rtype: integer
"""
def needed_blobs(self):
"""
Returns a list of BlobInfos representing all of the blobs that the stream still needs to download.
@return: the list of BlobInfos representing blobs that the stream still needs to download.
@rtype: [BlobInfo]
"""
def blob_downloaded(self, blob, blob_info):
"""
Mark that a blob has been downloaded and does not need to be downloaded again
@param blob: the blob that has been downloaded.
@type blob: Blob
@param blob_info: the metadata of the blob that has been downloaded.
@type blob_info: BlobInfo
@return: None
"""
class IBlobHandler(Interface):
"""
Responsible for doing whatever should be done with blobs that have been downloaded.
"""
def blob_downloaded(self, blob, blob_info):
"""
Do whatever the downloader is supposed to do when a blob has been downloaded
@param blob: The downloaded blob
@type blob: Blob
@param blob_info: The metadata of the downloaded blob
@type blob_info: BlobInfo
@return: A Deferred which fires when the blob has been handled.
@rtype: Deferred which can fire with anything
"""
class IRateLimited(Interface):
"""
Have the ability to be throttled (temporarily stopped).
"""
def throttle_upload(self):
"""
Stop uploading data until unthrottle_upload is called.
@return: None
"""
def throttle_download(self):
"""
Stop downloading data until unthrottle_upload is called.
@return: None
"""
def unthrottle_upload(self):
"""
Resume uploading data at will until throttle_upload is called.
@return: None
"""
def unthrottle_downlad(self):
"""
Resume downloading data at will until throttle_download is called.
@return: None
"""
class IRateLimiter(Interface):
"""
Can keep track of download and upload rates and can throttle objects which implement the
IRateLimited interface.
"""
def report_dl_bytes(self, num_bytes):
"""
Inform the IRateLimiter that num_bytes have been downloaded.
@param num_bytes: the number of bytes that have been downloaded
@type num_bytes: integer
@return: None
"""
def report_ul_bytes(self, num_bytes):
"""
Inform the IRateLimiter that num_bytes have been uploaded.
@param num_bytes: the number of bytes that have been uploaded
@type num_bytes: integer
@return: None
"""
def register_protocol(self, protocol):
"""
Register an IRateLimited object with the IRateLimiter so that the IRateLimiter can throttle it
@param protocol: An object implementing the interface IRateLimited
@type protocol: Object implementing IRateLimited
@return: None
"""
def unregister_protocol(self, protocol):
"""
Unregister an IRateLimited object so that it won't be throttled any more.
@param protocol: An object implementing the interface IRateLimited, which was previously registered with this
IRateLimiter via "register_protocol"
@type protocol: Object implementing IRateLimited
@return: None
"""
class IRequestHandler(Interface):
"""
Pass client queries on to IQueryHandlers
"""
def register_query_handler(self, query_handler, query_identifiers):
"""
Register a query handler, which will be passed any queries that
match any of the identifiers in query_identifiers
@param query_handler: the object which will handle queries matching the given query_identifiers
@type query_handler: Object implementing IQueryHandler
@param query_identifiers: A list of strings representing the query identifiers
for queries that should be passed to this handler
@type query_identifiers: [string]
@return: None
"""
def register_blob_sender(self, blob_sender):
"""
Register a blob sender which will be called after the response has
finished to see if it wants to send a blob
@param blob_sender: the object which will upload the blob to the client.
@type blob_sender: IBlobSender
@return: None
"""
class IBlobSender(Interface):
"""
Upload blobs to clients.
"""
def send_blob_if_requested(self, consumer):
"""
If a blob has been requested, write it to 'write' func of the consumer and then
callback the returned deferred when it has all been written
@param consumer: the object implementing IConsumer which the file will be written to
@type consumer: object which implements IConsumer
@return: Deferred which will fire when the blob sender is done, which will be
immediately if no blob should be sent.
@rtype: Deferred which fires with anything
"""
class IQueryHandler(Interface):
"""
Respond to requests from clients.
"""
def register_with_request_handler(self, request_handler, peer):
"""
Register with the request handler to receive queries
@param request_handler: the object implementing IRequestHandler to register with
@type request_handler: object implementing IRequestHandler
@param peer: the Peer which this query handler will be answering requests from
@type peer: Peer
@return: None
"""
def handle_queries(self, queries):
"""
Return responses to queries from the client.
@param queries: a dict representing the query_identifiers:queries that should be handled
@type queries: {string: dict}
@return: a Deferred object which will callback with a dict of query responses
@rtype: Deferred which fires with {string: dict}
"""
class IQueryHandlerFactory(Interface):
"""
Construct IQueryHandlers to handle queries from each new client that connects.
"""
def build_query_handler(self):
"""
Create an object that implements the IQueryHandler interface
@return: object that implements IQueryHandler
"""
class IStreamDownloaderOptions(Interface):
def get_downloader_options(self, sd_validator, payment_rate_manager):
"""
Return the list of options that can be used to modify IStreamDownloader behavior
@param sd_validator: object containing stream metadata, which the options may depend on
@type sd_validator: object which implements IStreamDescriptorValidator interface
@param payment_rate_manager: The payment rate manager currently in effect for the downloader
@type payment_rate_manager: PaymentRateManager
@return: [DownloadOption]
@rtype: [DownloadOption]
"""
class IStreamDownloaderFactory(Interface):
"""
Construct IStreamDownloaders and provide options that will be passed to those IStreamDownloaders.
"""
def can_download(self, sd_validator, payment_rate_manager):
"""
Decide whether the downloaders created by this factory can download the stream described by sd_validator
@param sd_validator: object containing stream metadata
@type sd_validator: object which implements IStreamDescriptorValidator interface
@param payment_rate_manager: The payment rate manager currently in effect for the downloader
@type payment_rate_manager: PaymentRateManager
@return: True if the downloaders can download the stream, False otherwise
@rtype: bool
"""
def make_downloader(self, sd_validator, options, payment_rate_manager):
"""
Create an object that implements the IStreamDownloader interface
@param sd_validator: object containing stream metadata which will be given to the IStreamDownloader
@type sd_validator: object which implements IStreamDescriptorValidator interface
@param options: a list of values that will be used by the IStreamDownloaderFactory to
construct the IStreamDownloader. the options are in the same order as they were given
by get_downloader_options.
@type options: [Object]
@param payment_rate_manager: the PaymentRateManager which the IStreamDownloader should use.
@type payment_rate_manager: PaymentRateManager
@return: a Deferred which fires with the downloader object
@rtype: Deferred which fires with IStreamDownloader
"""
def get_description(self):
"""
Return a string detailing what this downloader does with streams
@return: short description of what the IStreamDownloader does.
@rtype: string
"""
class IStreamDownloader(Interface):
"""
Use metadata and data from the network for some useful purpose.
"""
def start(self):
"""
start downloading the stream
@return: a Deferred which fires when the stream is finished downloading, or errbacks when the stream is
cancelled.
@rtype: Deferred which fires with anything
"""
def insufficient_funds(self, err):
"""
this function informs the stream downloader that funds are too low to finish downloading.
@return: None
"""
class IStreamDescriptorValidator(Interface):
"""
Pull metadata out of Stream Descriptor Files and perform some
validation on the metadata.
"""
def validate(self):
"""
@return: whether the stream descriptor passes validation checks
@rtype: boolean
"""
def info_to_show(self):
"""
@return: A list of tuples representing metadata that should be presented to the user before starting the
download
@rtype: [(string, string)]
"""
class ILBRYWallet(Interface):
"""
Send and receive payments.
To send a payment, a payment reservation must be obtained first. This guarantees that a payment
isn't promised if it can't be paid. When the service in question is rendered, the payment
reservation must be given to the ILBRYWallet along with the final price. The reservation can also
be canceled.
"""
def stop(self):
"""
Send out any unsent payments, close any connections, and stop checking for incoming payments.
@return: None
"""
def start(self):
"""
Set up any connections and start checking for incoming payments
@return: None
"""
def get_info_exchanger(self):
"""
Get the object that will be used to find the payment addresses of peers.
@return: The object that will be used to find the payment addresses of peers.
@rtype: An object implementing IRequestCreator
"""
def get_wallet_info_query_handler_factory(self):
"""
Get the object that will be used to give our payment address to peers.
This must return an object implementing IQueryHandlerFactory. It will be used to
create IQueryHandler objects that will be registered with an IRequestHandler.
@return: The object that will be used to give our payment address to peers.
@rtype: An object implementing IQueryHandlerFactory
"""
def reserve_points(self, peer, amount):
"""
Ensure a certain amount of points are available to be sent as payment, before the service is rendered
@param peer: The peer to which the payment will ultimately be sent
@type peer: Peer
@param amount: The amount of points to reserve
@type amount: float
@return: A ReservedPoints object which is given to send_points once the service has been rendered
@rtype: ReservedPoints
"""
def cancel_point_reservation(self, reserved_points):
"""
Return all of the points that were reserved previously for some ReservedPoints object
@param reserved_points: ReservedPoints previously returned by reserve_points
@type reserved_points: ReservedPoints
@return: None
"""
def send_points(self, reserved_points, amount):
"""
Schedule a payment to be sent to a peer
@param reserved_points: ReservedPoints object previously returned by reserve_points.
@type reserved_points: ReservedPoints
@param amount: amount of points to actually send, must be less than or equal to the
amount reserved in reserved_points
@type amount: float
@return: Deferred which fires when the payment has been scheduled
@rtype: Deferred which fires with anything
"""
def get_balance(self):
"""
Return the balance of this wallet
@return: Deferred which fires with the balance of the wallet
@rtype: Deferred which fires with float
"""
def add_expected_payment(self, peer, amount):
"""
Increase the number of points expected to be paid by a peer
@param peer: the peer which is expected to pay the points
@type peer: Peer
@param amount: the amount of points expected to be paid
@type amount: float
@return: None
"""
|
"""
A plugin-enabled console application for interacting with the LBRY network called lbrynet-console.
lbrynet-console can be used to download and upload LBRY Files and includes plugins for streaming
LBRY Files to an external application and to download unknown chunks of data for the purpose of
re-uploading them. It gives the user some control over how much will be paid for data and
metadata and also what types of queries from clients.
"""
|
#!/usr/bin/env python
# from distutils.core import setup
from setuptools import setup, find_packages
setup(
name="oauth",
version="1.0.1",
description="Library for OAuth version 1.0a.",
author="Leah Culver",
author_email="leah.culver@gmail.com",
url="http://code.google.com/p/oauth",
packages=find_packages(),
license="MIT License",
zip_safe=True,
)
|
import click
from click.testing import CliRunner
from textkit.filter.filter_punc import filterpunc
from textkit.filter.filter_words import filterwords
from textkit.filter.filter_lengths import filterlengths
from tests.utils import create_single_output, create_multifile_output, compare_results
def test_filterlengths():
runner = ()
with ():
filename = 'in.txt'
sentence = 'Hello\nWorld\n!\nI\n.\nnot\nwin\n'
(filename, sentence)
result = (filterlengths, [filename])
tokens = ('\n')
expected_tokens = ['Hello', 'World', 'not', 'win']
if (not (result.exit_code == 0)):
raise ()
(tokens, expected_tokens)
result = (filterlengths, ['-m', '4', filename])
tokens = ('\n')
expected_tokens = ['Hello', 'World']
if (not (result.exit_code == 0)):
raise ()
(tokens, expected_tokens)
def test_filterpunc():
runner = ()
with ():
filename = 'in.txt'
sentence = 'Hello\nWorld\n!\nI\n.\nnot'
expected_tokens = ['Hello', 'World', 'I', 'not']
(filename, sentence)
result = (filterpunc, [filename])
tokens = ('\n')
if (not (result.exit_code == 0)):
raise ()
(tokens, expected_tokens)
def test_filterwords():
runner = ()
with ():
filename = 'in.txt'
sentence = 'Hello\nWorld\n!\nI\nam\nnot\na\ncrook\n.'
expected_tokens = ['Hello', 'World', '!', 'crook', '.']
(filename, sentence)
result = (filterwords, ['--language', 'english', filename])
tokens = ('\n')
if (not (result.exit_code == 0)):
raise ()
(tokens, expected_tokens)
def test_filterwords_custom():
runner = ()
with ():
filename = 'in.txt'
sentence = 'Hello\nWorld\n!\nI\nam\nnot\na\ncrook\n.'
expected_tokens = ['World', '!', 'crook', '.']
custom_stopword_filename = 'custom.txt'
custom_stopwords = 'hello\n'
(filename, sentence)
(custom_stopword_filename, custom_stopwords)
result = (filterwords, ['--custom', 'custom.txt', filename])
tokens = ('\n')
if (not (result.exit_code == 0)):
raise ()
(tokens, expected_tokens) |
#!/usr/bin/env python
"""
Demonstrate diagonal matrix creation on the GPU.
"""
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
import pycuda.driver as drv
import numpy as np
import skcuda.linalg as culinalg
import skcuda.misc as cumisc
culinalg.init()
# Double precision is only supported by devices with compute
# capability >= 1.3:
import string
demo_types = [np.float32, np.complex64]
if cumisc.get_compute_capability(pycuda.autoinit.device) >= 1.3:
demo_types.extend([np.float64, np.complex128])
for t in demo_types:
print("Testing real diagonal matrix creation for type " + str(np.dtype(t)))
v = np.array([1, 2, 3, 4, 5, 6], t)
v_gpu = gpuarray.to_gpu(v)
d_gpu = culinalg.diag(v_gpu)
print("Success status: ", np.all(d_gpu.get() == np.diag(v)))
|
#!/usr/bin/env python
"""
Python interface to CUSOLVER functions.
Note: this module does not explicitly depend on PyCUDA.
"""
from . import cudart
if int(cudart._cudart_version) < 7000:
raise ImportError("CUSOLVER library only available in CUDA 7.0 and later")
import ctypes
import sys
import numpy as np
from . import cuda
# Load library:
_version_list = [7.5, 7.0]
if "linux" in sys.platform:
_libcusolver_libname_list = ["libcusolver.so"] + [
"libsolver.so.%s" % v for v in _version_list
]
elif sys.platform == "darwin":
_libcusolver_libname_list = ["libcusolver.dylib"]
elif sys.platform == "win32":
if sys.maxsize > 2**32:
_libcusolver_libname_list = ["cusolver.dll"] + [
"cusolver64_%s.dll" % int(10 * v) for v in _version_list
]
else:
_libcusolver_libname_list = ["cusolver.dll"] + [
"cusolver32_%s.dll" % int(10 * v) for v in _version_list
]
else:
raise RuntimeError("unsupported platform")
# Print understandable error message when library cannot be found:
_libcusolver = None
for _libcusolver_libname in _libcusolver_libname_list:
try:
if sys.platform == "win32":
_libcusolver = ctypes.windll.LoadLibrary(_libcusolver_libname)
else:
_libcusolver = ctypes.cdll.LoadLibrary(_libcusolver_libname)
except OSError:
pass
else:
break
if _libcusolver == None:
raise OSError("cusolver library not found")
class CUSOLVER_ERROR(Exception):
"""CUSOLVER error."""
pass
class CUSOLVER_STATUS_NOT_INITIALIZED(CUSOLVER_ERROR):
"""CUSOLVER library not initialized."""
pass
class CUSOLVER_STATUS_ALLOC_FAILED(CUSOLVER_ERROR):
"""CUSOLVER memory allocation failed."""
pass
class CUSOLVER_STATUS_INVALID_VALUE(CUSOLVER_ERROR):
"""Invalid value passed to CUSOLVER function."""
pass
class CUSOLVER_STATUS_ARCH_MISMATCH(CUSOLVER_ERROR):
"""CUSOLVER architecture mismatch."""
pass
class CUSOLVER_STATUS_MAPPING_ERROR(CUSOLVER_ERROR):
"""CUSOLVER mapping error."""
pass
class CUSOLVER_STATUS_EXECUTION_FAILED(CUSOLVER_ERROR):
"""CUSOLVER execution failed."""
pass
class CUSOLVER_STATUS_INTERNAL_ERROR(CUSOLVER_ERROR):
"""CUSOLVER internal error."""
pass
class CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED(CUSOLVER_ERROR):
"""Matrix type not supported by CUSOLVER."""
pass
class CUSOLVER_STATUS_NOT_SUPPORTED(CUSOLVER_ERROR):
"""Operation not supported by CUSOLVER."""
pass
class CUSOLVER_STATUS_ZERO_PIVOT(CUSOLVER_ERROR):
"""Zero pivot encountered by CUSOLVER."""
pass
class CUSOLVER_STATUS_INVALID_LICENSE(CUSOLVER_ERROR):
"""Invalid CUSOLVER license."""
pass
CUSOLVER_EXCEPTIONS = {
1: CUSOLVER_STATUS_NOT_INITIALIZED,
2: CUSOLVER_STATUS_ALLOC_FAILED,
3: CUSOLVER_STATUS_INVALID_VALUE,
4: CUSOLVER_STATUS_ARCH_MISMATCH,
5: CUSOLVER_STATUS_MAPPING_ERROR,
6: CUSOLVER_STATUS_EXECUTION_FAILED,
7: CUSOLVER_STATUS_INTERNAL_ERROR,
8: CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED,
9: CUSOLVER_STATUS_NOT_SUPPORTED,
10: CUSOLVER_STATUS_ZERO_PIVOT,
11: CUSOLVER_STATUS_INVALID_LICENSE,
}
def cusolverCheckStatus(status):
"""
Raise CUSOLVER exception.
Raise an exception corresponding to the specified CUSOLVER error
code.
Parameters
----------
status : int
CUSOLVER error code.
See Also
--------
CUSOLVER_EXCEPTIONS
"""
if status != 0:
try:
raise CUSOLVER_EXCEPTIONS[status]
except KeyError:
raise CUSOLVER_ERROR
# Helper functions:
_libcusolver.cusolverDnCreate.restype = int
_libcusolver.cusolverDnCreate.argtypes = [ctypes.c_void_p]
def cusolverDnCreate():
"""
Create cuSolverDn context.
Returns
-------
handle : int
cuSolverDn context.
References
----------
`cusolverDnCreate <http://docs.nvidia.com/cuda/cusolver/index.html#cuSolverDNcreate>`_
"""
handle = ctypes.c_void_p()
status = _libcusolver.cusolverDnCreate(ctypes.byref(handle))
cusolverCheckStatus(status)
return handle.value
_libcusolver.cusolverDnDestroy.restype = int
_libcusolver.cusolverDnDestroy.argtypes = [ctypes.c_void_p]
def cusolverDnDestroy(handle):
"""
Destroy cuSolverDn context.
Parameters
----------
handle : int
cuSolverDn context.
References
----------
`cusolverDnDestroy <http://docs.nvidia.com/cuda/cusolver/index.html#cuSolverDNdestroy>`_
"""
status = _libcusolver.cusolverDnDestroy(handle)
cusolverCheckStatus(status)
_libcusolver.cusolverDnSetStream.restype = int
_libcusolver.cusolverDnSetStream.argtypes = [ctypes.c_int, ctypes.c_int]
def cusolverDnSetStream(handle, stream):
"""
Set stream used by cuSolverDN library.
Parameters
----------
handle : int
cuSolverDN context.
stream : int
Stream to be used.
References
----------
`cusolverDnSetStream <http://docs.nvidia.com/cuda/cusolver/index.html#cudssetstream>`_
"""
status = _libcusolver.cusolverDnSetStream(handle, stream)
cusolverCheckStatus(status)
_libcusolver.cusolverDnGetStream.restype = int
_libcusolver.cusolverDnGetStream.argtypes = [ctypes.c_int, ctypes.c_void_p]
def cusolverDnGetStream(handle):
"""
Get stream used by cuSolverDN library.
Parameters
----------
handle : int
cuSolverDN context.
Returns
-------
stream : int
Stream used by context.
References
----------
`cusolverDnGetStream <http://docs.nvidia.com/cuda/cusolver/index.html#cudsgetstream>`_
"""
stream = ctypes.c_int()
status = _libcusolver.cusolverDnGetStream(handle, ctypes.byref(stream))
cusolverCheckStatus(status)
return status.value
# Dense solver functions:
# SGETRF, DGETRF, CGETRF, ZGETRF
_libcusolver.cusolverDnSgetrf_bufferSize.restype = int
_libcusolver.cusolverDnSgetrf_bufferSize.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnSgetrf_bufferSize(handle, m, n, A, lda):
"""
Calculate size of work buffer used by cusolverDnSgetrf.
References
----------
`cusolver<t>nSgetrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrf>`_
"""
Lwork = ctypes.c_int()
status = _libcusolver.cusolverDnSgetrf_bufferSize(
handle, m, n, int(A), n, ctypes.byref(Lwork)
)
cusolverCheckStatus(status)
return Lwork.value
_libcusolver.cusolverDnSgetrf.restype = int
_libcusolver.cusolverDnSgetrf.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
]
def cusolverDnSgetrf(handle, m, n, A, lda, Workspace, devIpiv, devInfo):
"""
Compute LU factorization of a real single precision m x n matrix.
References
----------
`cusolverDn<t>getrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrf>`_
"""
status = _libcusolver.cusolverDnSgetrf(
handle, m, n, int(A), lda, int(Workspace), int(devIpiv), int(devInfo)
)
cusolverCheckStatus(status)
_libcusolver.cusolverDnDgetrf_bufferSize.restype = int
_libcusolver.cusolverDnDgetrf_bufferSize.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnDgetrf_bufferSize(handle, m, n, A, lda):
"""
Calculate size of work buffer used by cusolverDnDgetrf.
References
----------
`cusolverDn<t>getrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrf>`_
"""
Lwork = ctypes.c_int()
status = _libcusolver.cusolverDnDgetrf_bufferSize(
handle, m, n, int(A), n, ctypes.byref(Lwork)
)
cusolverCheckStatus(status)
return Lwork.value
_libcusolver.cusolverDnDgetrf.restype = int
_libcusolver.cusolverDnDgetrf.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
]
def cusolverDnDgetrf(handle, m, n, A, lda, Workspace, devIpiv, devInfo):
"""
Compute LU factorization of a real double precision m x n matrix.
References
----------
`cusolverDn<t>getrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrf>`_
"""
status = _libcusolver.cusolverDnDgetrf(
handle, m, n, int(A), lda, int(Workspace), int(devIpiv), int(devInfo)
)
cusolverCheckStatus(status)
_libcusolver.cusolverDnCgetrf_bufferSize.restype = int
_libcusolver.cusolverDnCgetrf_bufferSize.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnCgetrf_bufferSize(handle, m, n, A, lda):
"""
Calculate size of work buffer used by cusolverDnCgetrf.
References
----------
`cusolverDn<t>getrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrf>`_
"""
Lwork = ctypes.c_int()
status = _libcusolver.cusolverDnCgetrf_bufferSize(
handle, m, n, int(A), n, ctypes.byref(Lwork)
)
cusolverCheckStatus(status)
return Lwork.value
_libcusolver.cusolverDnCgetrf.restype = int
_libcusolver.cusolverDnCgetrf.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
]
def cusolverDnCgetrf(handle, m, n, A, lda, Workspace, devIpiv, devInfo):
"""
Compute LU factorization of a complex single precision m x n matrix.
References
----------
`cusolverDn<t>getrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrf>`_
"""
status = _libcusolver.cusolverDnCgetrf(
handle, m, n, int(A), lda, int(Workspace), int(devIpiv), int(devInfo)
)
cusolverCheckStatus(status)
_libcusolver.cusolverDnZgetrf_bufferSize.restype = int
_libcusolver.cusolverDnZgetrf_bufferSize.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnZgetrf_bufferSize(handle, m, n, A, lda):
"""
Calculate size of work buffer used by cusolverDnZgetrf.
References
----------
`cusolverDn<t>getrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrf>`_
"""
Lwork = ctypes.c_int()
status = _libcusolver.cusolverDnZgetrf_bufferSize(
handle, m, n, int(A), n, ctypes.byref(Lwork)
)
cusolverCheckStatus(status)
return Lwork.value
_libcusolver.cusolverDnZgetrf.restype = int
_libcusolver.cusolverDnZgetrf.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_void_p,
]
def cusolverDnZgetrf(handle, m, n, A, lda, Workspace, devIpiv, devInfo):
"""
Compute LU factorization of a complex double precision m x n matrix.
References
----------
`cusolverDn<t>getrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrf>`_
"""
status = _libcusolver.cusolverDnZgetrf(
handle, m, n, int(A), lda, int(Workspace), int(devIpiv), int(devInfo)
)
cusolverCheckStatus(status)
# SGETRS, DGETRS, CGETRS, ZGETRS
_libcusolver.cusolverDnSgetrs.restype = int
_libcusolver.cusolverDnSgetrs.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnSgetrs(handle, trans, n, nrhs, A, lda, devIpiv, B, ldb, devInfo):
"""
Solve real single precision linear system.
References
----------
`cusolverDn<t>getrs <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrs>`_
"""
status = _libcusolver.cusolverDnSgetrs(
handle, trans, n, nrhs, int(A), lda, int(devIpiv), int(B), ldb, int(devInfo)
)
cusolverCheckStatus(status)
_libcusolver.cusolverDnDgetrs.restype = int
_libcusolver.cusolverDnDgetrs.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnDgetrs(handle, trans, n, nrhs, A, lda, devIpiv, B, ldb, devInfo):
"""
Solve real double precision linear system.
References
----------
`cusolverDn<t>getrs <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrs>`_
"""
status = _libcusolver.cusolverDnDgetrs(
handle, trans, n, nrhs, int(A), lda, int(devIpiv), int(B), ldb, int(devInfo)
)
cusolverCheckStatus(status)
_libcusolver.cusolverDnCgetrs.restype = int
_libcusolver.cusolverDnCgetrs.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnCgetrs(handle, trans, n, nrhs, A, lda, devIpiv, B, ldb, devInfo):
"""
Solve complex single precision linear system.
References
----------
`cusolverDn<t>getrs <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrs>`_
"""
status = _libcusolver.cusolverDnCgetrs(
handle, trans, n, nrhs, int(A), lda, int(devIpiv), int(B), ldb, int(devInfo)
)
cusolverCheckStatus(status)
_libcusolver.cusolverDnZgetrs.restype = int
_libcusolver.cusolverDnZgetrs.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnZgetrs(handle, trans, n, nrhs, A, lda, devIpiv, B, ldb, devInfo):
"""
Solve complex double precision linear system.
References
----------
`cusolverDn<t>getrs <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-getrs>`_
"""
status = _libcusolver.cusolverDnZgetrs(
handle, trans, n, nrhs, int(A), lda, int(devIpiv), int(B), ldb, int(devInfo)
)
cusolverCheckStatus(status)
# SGESVD, DGESVD, CGESVD, ZGESVD
_libcusolver.cusolverDnSgesvd_bufferSize.restype = int
_libcusolver.cusolverDnSgesvd_bufferSize.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnSgesvd_bufferSize(handle, m, n):
"""
Calculate size of work buffer used by cusolverDnSgesvd.
References
----------
`cusolverDn<t>gesvd <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-gesvd>`_
"""
Lwork = ctypes.c_int()
status = _libcusolver.cusolverDnSgesvd_bufferSize(handle, m, n, ctypes.byref(Lwork))
cusolverCheckStatus(status)
return Lwork.value
_libcusolver.cusolverDnSgesvd.restype = int
_libcusolver.cusolverDnSgesvd.argtypes = [
ctypes.c_void_p,
ctypes.c_char,
ctypes.c_char,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
]
def cusolverDnSgesvd(
handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, Work, Lwork, rwork, devInfo
):
"""
Compute real single precision singular value decomposition.
References
----------
`cusolverDn<t>gesvd <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-gesvd>`_
"""
jobu = jobu.encode("ascii")
jobvt = jobvt.encode("ascii")
status = _libcusolver.cusolverDnSgesvd(
handle,
jobu,
jobvt,
m,
n,
int(A),
lda,
int(S),
int(U),
ldu,
int(VT),
ldvt,
int(Work),
Lwork,
int(rwork),
int(devInfo),
)
cusolverCheckStatus(status)
_libcusolver.cusolverDnDgesvd_bufferSize.restype = int
_libcusolver.cusolverDnDgesvd_bufferSize.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnDgesvd_bufferSize(handle, m, n):
"""
Calculate size of work buffer used by cusolverDnDgesvd.
References
----------
`cusolverDn<t>gesvd <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-gesvd>`_
"""
Lwork = ctypes.c_int()
status = _libcusolver.cusolverDnDgesvd_bufferSize(handle, m, n, ctypes.byref(Lwork))
cusolverCheckStatus(status)
return Lwork.value
_libcusolver.cusolverDnDgesvd.restype = int
_libcusolver.cusolverDnDgesvd.argtypes = [
ctypes.c_void_p,
ctypes.c_char,
ctypes.c_char,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
]
def cusolverDnDgesvd(
handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, Work, Lwork, rwork, devInfo
):
"""
Compute real double precision singular value decomposition.
References
----------
`cusolverDn<t>gesvd <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-gesvd>`_
"""
jobu = jobu.encode("ascii")
jobvt = jobvt.encode("ascii")
status = _libcusolver.cusolverDnDgesvd(
handle,
jobu,
jobvt,
m,
n,
int(A),
lda,
int(S),
int(U),
ldu,
int(VT),
ldvt,
int(Work),
Lwork,
int(rwork),
int(devInfo),
)
cusolverCheckStatus(status)
_libcusolver.cusolverDnCgesvd_bufferSize.restype = int
_libcusolver.cusolverDnCgesvd_bufferSize.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnCgesvd_bufferSize(handle, m, n):
"""
Calculate size of work buffer used by cusolverDnCgesvd.
References
----------
`cusolverDn<t>gesvd <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-gesvd>`_
"""
Lwork = ctypes.c_int()
status = _libcusolver.cusolverDnCgesvd_bufferSize(handle, m, n, ctypes.byref(Lwork))
cusolverCheckStatus(status)
return Lwork.value
_libcusolver.cusolverDnCgesvd.restype = int
_libcusolver.cusolverDnCgesvd.argtypes = [
ctypes.c_void_p,
ctypes.c_char,
ctypes.c_char,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
]
def cusolverDnCgesvd(
handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, Work, Lwork, rwork, devInfo
):
"""
Compute complex single precision singular value decomposition.
References
----------
`cusolverDn<t>gesvd <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-gesvd>`_
"""
jobu = jobu.encode("ascii")
jobvt = jobvt.encode("ascii")
status = _libcusolver.cusolverDnCgesvd(
handle,
jobu,
jobvt,
m,
n,
int(A),
lda,
int(S),
int(U),
ldu,
int(VT),
ldvt,
int(Work),
Lwork,
int(rwork),
int(devInfo),
)
cusolverCheckStatus(status)
_libcusolver.cusolverDnZgesvd_bufferSize.restype = int
_libcusolver.cusolverDnZgesvd_bufferSize.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnZgesvd_bufferSize(handle, m, n):
"""
Calculate size of work buffer used by cusolverDnZgesvd.
References
----------
`cusolverDn<t>gesvd <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-gesvd>`_
"""
Lwork = ctypes.c_int()
status = _libcusolver.cusolverDnZgesvd_bufferSize(handle, m, n, ctypes.byref(Lwork))
cusolverCheckStatus(status)
return Lwork.value
_libcusolver.cusolverDnZgesvd.restype = int
_libcusolver.cusolverDnZgesvd.argtypes = [
ctypes.c_void_p,
ctypes.c_char,
ctypes.c_char,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
]
def cusolverDnZgesvd(
handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, Work, Lwork, rwork, devInfo
):
"""
Compute complex double precision singular value decomposition.
References
----------
`cusolverDn<t>gesvd <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-gesvd>`_
"""
jobu = jobu.encode("ascii")
jobvt = jobvt.encode("ascii")
status = _libcusolver.cusolverDnZgesvd(
handle,
jobu,
jobvt,
m,
n,
int(A),
lda,
int(S),
int(U),
ldu,
int(VT),
ldvt,
int(Work),
Lwork,
int(rwork),
int(devInfo),
)
cusolverCheckStatus(status)
# SGEQRF, DGEQRF, CGEQRF, ZGEQRF
_libcusolver.cusolverDnSgeqrf_bufferSize.restype = int
_libcusolver.cusolverDnSgeqrf_bufferSize.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnSgeqrf_bufferSize(handle, m, n, A, lda):
"""
Calculate size of work buffer used by cusolverDnSgeqrf.
References
----------
`cusolverDn<t>geqrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-geqrf>`_
"""
Lwork = ctypes.c_int()
status = _libcusolver.cusolverDnSgeqrf_bufferSize(
handle, m, n, int(A), n, ctypes.byref(Lwork)
)
cusolverCheckStatus(status)
return Lwork.value
_libcusolver.cusolverDnSgeqrf.restype = int
_libcusolver.cusolverDnSgeqrf.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnSgeqrf(handle, m, n, A, lda, TAU, Workspace, Lwork, devInfo):
"""
Compute QR factorization of a real single precision m x n matrix.
References
----------
`cusolverDn<t>geqrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-geqrf>`_
"""
status = _libcusolver.cusolverDnSgeqrf(
handle, m, n, int(A), lda, int(TAU), int(Workspace), Lwork, int(devInfo)
)
cusolverCheckStatus(status)
_libcusolver.cusolverDnDgeqrf_bufferSize.restype = int
_libcusolver.cusolverDnDgeqrf_bufferSize.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnDgeqrf_bufferSize(handle, m, n, A, lda):
"""
Calculate size of work buffer used by cusolverDnDgeqrf.
References
----------
`cusolverDn<t>geqrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-geqrf>`_
"""
Lwork = ctypes.c_int()
status = _libcusolver.cusolverDnDgeqrf_bufferSize(
handle, m, n, int(A), n, ctypes.byref(Lwork)
)
cusolverCheckStatus(status)
return Lwork.value
_libcusolver.cusolverDnDgeqrf.restype = int
_libcusolver.cusolverDnDgeqrf.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnDgeqrf(handle, m, n, A, lda, TAU, Workspace, Lwork, devInfo):
"""
Compute QR factorization of a real double precision m x n matrix.
References
----------
`cusolverDn<t>geqrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-geqrf>`_
"""
status = _libcusolver.cusolverDnDgeqrf(
handle, m, n, int(A), lda, int(TAU), int(Workspace), Lwork, int(devInfo)
)
cusolverCheckStatus(status)
_libcusolver.cusolverDnCgeqrf_bufferSize.restype = int
_libcusolver.cusolverDnCgeqrf_bufferSize.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnCgeqrf_bufferSize(handle, m, n, A, lda):
"""
Calculate size of work buffer used by cusolverDnCgeqrf.
References
----------
`cusolverDn<t>geqrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-geqrf>`_
"""
Lwork = ctypes.c_int()
status = _libcusolver.cusolverDnCgeqrf_bufferSize(
handle, m, n, int(A), n, ctypes.byref(Lwork)
)
cusolverCheckStatus(status)
return Lwork.value
_libcusolver.cusolverDnCgeqrf.restype = int
_libcusolver.cusolverDnCgeqrf.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnCgeqrf(handle, m, n, A, lda, TAU, Workspace, Lwork, devInfo):
"""
Compute QR factorization of a complex single precision m x n matrix.
References
----------
`cusolverDn<t>geqrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-geqrf>`_
"""
status = _libcusolver.cusolverDnCgeqrf(
handle, m, n, int(A), lda, int(TAU), int(Workspace), Lwork, int(devInfo)
)
cusolverCheckStatus(status)
_libcusolver.cusolverDnZgeqrf_bufferSize.restype = int
_libcusolver.cusolverDnZgeqrf_bufferSize.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnZgeqrf_bufferSize(handle, m, n, A, lda):
"""
Calculate size of work buffer used by cusolverDnZgeqrf.
References
----------
`cusolverDn<t>geqrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-geqrf>`_
"""
Lwork = ctypes.c_int()
status = _libcusolver.cusolverDnZgeqrf_bufferSize(
handle, m, n, int(A), n, ctypes.byref(Lwork)
)
cusolverCheckStatus(status)
return Lwork.value
_libcusolver.cusolverDnZgeqrf.restype = int
_libcusolver.cusolverDnZgeqrf.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_void_p,
]
def cusolverDnZgeqrf(handle, m, n, A, lda, TAU, Workspace, Lwork, devInfo):
"""
Compute QR factorization of a complex double precision m x n matrix.
References
----------
`cusolverDn<t>geqrf <http://docs.nvidia.com/cuda/cusolver/index.html#cuds-lt-t-gt-geqrf>`_
"""
status = _libcusolver.cusolverDnZgeqrf(
handle, m, n, int(A), lda, int(TAU), int(Workspace), Lwork, int(devInfo)
)
cusolverCheckStatus(status)
|
from django.core.management.base import BaseCommand
from django.contrib.contenttypes.models import ContentType
from django_orphaned.app_settings import ORPHANED_APPS_MEDIABASE_DIRS
from itertools import chain
from optparse import make_option
import os
import shutil
from django.conf import settings
class Command(BaseCommand):
help = "Delete all orphaned files"
base_options = (
make_option(
"--info",
action="store_true",
dest="info",
default=False,
help="If provided, the files will not be deleted.",
),
)
option_list = BaseCommand.option_list + base_options
def handle(self, **options):
self.only_info = options.get("info")
for app in list(ORPHANED_APPS_MEDIABASE_DIRS.keys()):
if "root" in ORPHANED_APPS_MEDIABASE_DIRS[app]:
needed_files = []
all_files = []
possible_empty_dirs = []
empty_dirs = []
total_freed_bytes = 0
total_freed = "0"
delete_files = []
skip = ORPHANED_APPS_MEDIABASE_DIRS[app].get("skip", ())
exclude = ORPHANED_APPS_MEDIABASE_DIRS[app].get("exclude", ())
for model in ContentType.objects.filter(app_label=app):
mc = model.model_class()
if mc is None:
continue
fields = []
for field in mc._meta.fields:
if (
field.get_internal_type() == "FileField"
or field.get_internal_type() == "ImageField"
):
fields.append(field.name)
# we have found a model with FileFields
if len(fields) > 0:
files = mc.objects.all().values_list(*fields)
needed_files.extend(
[
os.path.join(settings.MEDIA_ROOT, file)
for file in [
_f for _f in chain.from_iterable(files) if _f
]
]
)
# traverse root folder and store all files and empty directories
def should_skip(dir):
for skip_dir in skip:
if dir.startswith(skip_dir):
return True
return False
# process each root of the app
app_roots = ORPHANED_APPS_MEDIABASE_DIRS[app]["root"]
if isinstance(app_roots, str): # backwards compatibility
app_roots = [app_roots]
for app_root in app_roots:
for root, dirs, files in os.walk(app_root):
if should_skip(root):
continue
if len(files) > 0:
for basename in files:
if basename not in exclude:
all_files.append(os.path.join(root, basename))
elif not os.path.samefile(root, app_root):
possible_empty_dirs.append(root)
# ignore empty dirs with subdirs + files
for ed in possible_empty_dirs:
dont_delete = False
for files in all_files:
try:
if files.index(ed) == 0:
dont_delete = True
except ValueError:
pass
for skip_dir in skip:
try:
if skip_dir.index(ed) == 0:
dont_delete = True
except ValueError:
pass
if not dont_delete:
empty_dirs.append(ed)
# select deleted files (delete_files = all_files - needed_files)
aa = set(all_files)
delete_files = list(aa.difference(needed_files))
delete_files.sort()
empty_dirs.sort()
empty_dirs = set(empty_dirs) # remove possible duplicates
# to be freed
for df in delete_files:
total_freed_bytes += os.path.getsize(df)
total_freed = "%0.1f MB" % (total_freed_bytes / (1024 * 1024.0))
# only show
if self.only_info:
print("\r\n=== %s ===" % app)
if len(empty_dirs) > 0:
print("\r\nFollowing empty dirs will be removed:\r\n")
for file in empty_dirs:
print(" ", file)
if len(delete_files) > 0:
print("\r\nFollowing files will be deleted:\r\n")
for file in delete_files:
print(" ", file)
print(
"\r\nTotally %s files will be deleted, and "
"totally %s will be freed.\r\n"
% (len(delete_files), total_freed)
)
else:
print("No files to delete!")
# DELETE NOW!
else:
for file in delete_files:
os.remove(file)
for dirs in empty_dirs:
shutil.rmtree(dirs, ignore_errors=True)
|
__author__ = 'leif'
import json
import urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse
from keys import BING_API_KEY
def run_query(search_terms):
root_url = 'https://api.datamarket.azure.com/Bing/Search/'
source = 'Web'
results_per_page = 10
offset = 0
query = (search_terms)
query = (query)
search_url = (root_url, source, results_per_page, offset, query)
username = ''
password_mgr = ()
(None, search_url, username, BING_API_KEY)
results = []
try:
handler = (password_mgr)
opener = (handler)
(opener)
response = ()
json_response = (response)
for result in json_response['d']['results']:
({'title': result['Title'], 'link': result['Url'], 'summary': result['Description']})
except urllib.error.URLError as e:
('Error when querying the Bing API: ', e)
return results
def main():
query = ('Please enter a query: ')
results = (query)
rank = 1
for result in results:
((rank))
(result['title'])
(result['link'])
(result['summary'])
()
rank += 1
if (__name__ == '__main__'):
() |
from jinja2 import Undefined
from contextlib import contextmanager
from werkzeug.local import LocalStack, LocalProxy
from lektor.reporter import reporter
from lektor.utils import make_relative_url
_ctx_stack = LocalStack()
def url_to(*args, **kwargs):
"""Calculates a URL to another record."""
ctx = get_ctx()
if ctx is None:
raise RuntimeError("No context found")
return ctx.url_to(*args, **kwargs)
def get_asset_url(asset):
"""Calculates the asset URL relative to the current record."""
ctx = get_ctx()
if ctx is None:
raise RuntimeError("No context found")
asset = site_proxy.get_asset(asset)
if asset is None:
return Undefined("Asset not found")
info = ctx.build_state.get_file_info(asset.source_filename)
return "%s?h=%s" % (
ctx.source.url_to("!" + asset.url_path),
info.checksum[:8],
)
@LocalProxy
def site_proxy():
"""Returns the current pad."""
ctx = get_ctx()
if ctx is None:
return Undefined(hint="Cannot access the site from here", name="site")
return ctx.pad
@LocalProxy
def config_proxy():
"""Returns the current config."""
return site_proxy.db.config
def get_ctx():
"""Returns the current context."""
return _ctx_stack.top
def get_locale(default="en_US"):
"""Returns the current locale."""
ctx = get_ctx()
if ctx is not None:
rv = ctx.locale
if rv is not None:
return rv
return ctx.pad.db.config.site_locale
return default
class Context(object):
"""The context is a thread local object that provides the system with
general information about in which state it is. The context is created
whenever a source is processed and can be accessed by template engine and
other things.
It's considered read and write and also accumulates changes that happen
during processing of the object.
"""
def __init__(self, artifact):
self.artifact = artifact
self.source = artifact.source_obj
self.exc_info = None
self.build_state = self.artifact.build_state
self.pad = self.build_state.pad
# Processing information
self.referenced_dependencies = set()
self.sub_artifacts = []
self.flow_block_render_stack = []
self._forced_base_url = None
# General cache system where other things can put their temporary
# stuff in.
self.cache = {}
self._dependency_collectors = []
@property
def env(self):
"""The environment of the context."""
return self.pad.db.env
@property
def record(self):
"""If the source is a record it will be available here."""
rv = self.source
if rv is not None and rv.source_classification == "record":
return rv
@property
def locale(self):
"""Returns the current locale if it's available, otherwise `None`.
This does not fall back to the site locale.
"""
source = self.source
if source is not None:
alt_cfg = self.pad.db.config["ALTERNATIVES"].get(source.alt)
if alt_cfg:
return alt_cfg["locale"]
def push(self):
_ctx_stack.push(self)
def pop(self):
_ctx_stack.pop()
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop()
@property
def base_url(self):
"""The URL path for the current context."""
if self._forced_base_url:
return self._forced_base_url
if self.source is not None:
return self.source.url_path
return "/"
def url_to(self, path, alt=None, absolute=False, external=False):
"""Returns a URL to another path."""
if self.source is None:
raise RuntimeError(
"Can only generate paths to other pages if "
"the context has a source document set."
)
rv = self.source.url_to(path, alt=alt, absolute=True)
if absolute:
return rv
elif external:
return self.pad.make_absolute_url(rv)
return make_relative_url(self.base_url, rv)
def sub_artifact(self, *args, **kwargs):
"""Decorator version of :func:`add_sub_artifact`."""
def decorator(f):
self.add_sub_artifact(build_func=f, *args, **kwargs)
return f
return decorator
def add_sub_artifact(
self,
artifact_name,
build_func=None,
sources=None,
source_obj=None,
config_hash=None,
):
"""Sometimes it can happen that while building an artifact another
artifact needs building. This function is generally used to record
this request.
"""
aft = self.build_state.new_artifact(
artifact_name=artifact_name,
sources=sources,
source_obj=source_obj,
config_hash=config_hash,
)
self.sub_artifacts.append((aft, build_func))
reporter.report_sub_artifact(aft)
def record_dependency(self, filename):
"""Records a dependency from processing."""
self.referenced_dependencies.add(filename)
for coll in self._dependency_collectors:
coll(filename)
@contextmanager
def gather_dependencies(self, func):
"""For the duration of the `with` block the provided function will be
invoked for all dependencies encountered.
"""
self._dependency_collectors.append(func)
try:
yield
finally:
self._dependency_collectors.pop()
@contextmanager
def changed_base_url(self, value):
"""Temporarily overrides the URL path of the context."""
old = self._forced_base_url
self._forced_base_url = value
try:
yield
finally:
self._forced_base_url = old
|
import os
import flask
import pytest
from werkzeug.exceptions import HTTPException
app = (__name__)
def test_index_html(webui):
info = webui.lektor_info
def resolve(to_resolve):
with (to_resolve):
return (to_resolve)
with (HTTPException) as exc:
('/dir_with_index_html')
if (not (exc.value.response.status_code == 301)):
raise ()
if (not (exc.value.response.headers['Location'] == 'dir_with_index_html/')):
raise ()
artifact = 'dir_with_index_html/index.html'
artifact_path = (info.output_path, artifact)
if (not (('/dir_with_index_html/') == (artifact, artifact_path))):
raise ()
with (HTTPException) as exc:
('/dir_with_index_htm')
if (not (exc.value.response.status_code == 301)):
raise ()
if (not (exc.value.response.headers['Location'] == 'dir_with_index_htm/')):
raise ()
artifact = 'dir_with_index_htm/index.htm'
artifact_path = (info.output_path, artifact)
if (not (('/dir_with_index_htm/') == (artifact, artifact_path))):
raise ()
artifact_path = (info.output_path, 'empty')
if (not (('/empty') == (None, artifact_path))):
raise ()
artifact_path = (info.output_path, 'doesnt_exist')
if (not (('/doesnt_exist') == (None, artifact_path))):
raise () |
' Search Imgur for a random image '
import configparser
import datetime
import json
import logging
import random
import os
import requests
from hogar.Utils.StringUtils import ignore_case_replace
from hogar.static import values as static_values
logger = (__name__)
api_base = 'https://api.imgur.com/3/'
search_api = 'https://api.imgur.com/3/gallery/search/top/?q={term}'
credits_api = 'https://api.imgur.com/3/credits'
def enabled():
'\n Enabled\n\n Is this plugin enabled. Returning false here\n will cause this plugin to be ignored by the\n framework entirely.\n\n --\n @return bool\n '
return True
def applicable_types():
'\n Applicable Types\n\n Returns the type of messages this plugin is for.\n See: hogar.static.values\n\n --\n @return list\n '
return ['text']
def commands():
"\n Commands\n\n In the case of text plugins, returns the commands\n that this plugin should trigger for. For other\n message types, a empty list should be returned.\n\n If your plugin applies to any command (in the\n case of text messages), simply supply the a\n wildcard in the list ie. ['*']\n\n --\n @return list\n "
return ['img']
def should_reply():
'\n Should Reply\n\n Specifies wether a reply should be sent to the original\n sender of the message that triggered this plugin.\n\n --\n @return bool\n '
return True
def reply_type():
'\n Reply Type\n\n Specifies the type of reply that should be sent to the\n sender. This is an optional function. See hogar.static.values\n for available types.\n\n --\n @return str\n '
return 'text'
def _get_client_id():
'\n Get Client ID\n\n Get the configured Imgur Client-ID\n\n --\n @return str\n\n '
config = ()
(((__file__), '../../../settings.ini'))
return ('imgur', 'client_id', '')
def _allow_nsfw():
'\n Allow NSFW\n\n Check if the results may have nsfw\n entries\n\n --\n @return bool\n '
config = ()
(((__file__), '../../../settings.ini'))
return ('imgur', 'nsfw')
def _client_id_set():
'\n Client ID Set\n\n Check if the Imgur ClientID has been set.\n\n --\n @return bool\n '
if ((()) > 0):
return True
return False
def _get_headers():
'\n Get Headers\n\n Get headers prepared for a request to the Imgur API\n\n @return dict\n '
return {'User-Agent': (), 'Authorization': ()}
def _ask_imgur(url):
'\n Make an Api request to imgur, searching\n for a term.\n\n --\n @param url:str\n\n @return str\n\n '
try:
response = (url)
response = (())
except Exception as e:
return ()
return response
def _get_random_image(term):
'\n Get Random Image\n\n Get a random image from Imgur\n\n --\n @param term:str\n\n @return str\n '
data = (())
if (('data' not in data) or ((data['data']) <= 0)):
return 'Imgur query had no results'
is_album = True
while is_album:
if ():
image_data = (data['data'])
else:
nsfw = True
while nsfw:
image_data = (data['data'])
if (not image_data['nsfw']):
nsfw = False
if (not image_data['is_album']):
is_album = False
response = '\nTitle: {title}\nLink: {link}'
return ()
def _get_credits():
'\n Get Credits\n\n Ask Imgur how many request credits are left\n\n --\n @return str\n '
data = (credits_api)
response = '\nRemaining Requests: {remaining}\nRequest Reset At: {reset_at}'
return ()
def run(message):
'\n Run\n\n Run the custom plugin specific code. A returned\n string is the message that will be sent back\n to the user.\n\n --\n @param message:dict The message sent by the user\n\n @return str\n '
if (not ()):
return 'Imgur Client-ID not set. Get one at https://api.imgur.com/oauth2/addclient'
text = message['text']
if ('@'):
text = ()
if ('/'):
text = ()
for command in ():
text = ()
if (text == 'credits'):
response = ()
else:
response = (text)
return response |
# -*- coding: utf-8 -*-
#
# Flask-OAuthlib documentation build configuration file, created by
# sphinx-quickstart on Fri May 17 21:54:48 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.append(os.path.abspath("_themes"))
sys.path.append(os.path.abspath("."))
sys.path.append(os.path.abspath(".."))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Flask-OAuthlib"
import datetime
copyright = (
'2013 - %i, <a href="http://lepture.com/">Hsiaoming Yang</a>'
% datetime.datetime.utcnow().year
)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import flask_oauthlib
version = flask_oauthlib.__version__
# The full version, including alpha/beta/rc tags.
release = flask_oauthlib.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "flask"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "flask-oauthlib.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
"index": ["brand.html", "sidebarintro.html", "searchbox.html"],
"**": ["brand.html", "localtoc.html", "relations.html", "searchbox.html"],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Flask-OAuthlibdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"Flask-OAuthlib.tex",
"Flask-OAuthlib Documentation",
"Hsiaoming Yang",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "flask-oauthlib", "Flask-OAuthlib Documentation", ["Hsiaoming Yang"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"Flask-OAuthlib",
"Flask-OAuthlib Documentation",
"Hsiaoming Yang",
"Flask-OAuthlib",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
# coding: utf-8
import base64
from flask import request, Response
from oauthlib.common import to_unicode, bytes_type
def _get_uri_from_request(request):
"""
The uri returned from request.uri is not properly urlencoded
(sometimes it's partially urldecoded) This is a weird hack to get
werkzeug to return the proper urlencoded string uri
"""
uri = request.base_url
if request.query_string:
uri += "?" + request.query_string.decode("utf-8")
return uri
def extract_params():
"""Extract request params."""
uri = _get_uri_from_request(request)
http_method = request.method
headers = dict(request.headers)
if "wsgi.input" in headers:
del headers["wsgi.input"]
if "wsgi.errors" in headers:
del headers["wsgi.errors"]
body = request.form.to_dict()
return uri, http_method, body, headers
def to_bytes(text, encoding="utf-8"):
"""Make sure text is bytes type."""
if not text:
return text
if not isinstance(text, bytes_type):
text = text.encode(encoding)
return text
def decode_base64(text, encoding="utf-8"):
"""Decode base64 string."""
text = to_bytes(text, encoding)
return to_unicode(base64.b64decode(text), encoding)
def create_response(headers, body, status):
"""Create response class for Flask."""
response = Response(body or "")
for k, v in list(headers.items()):
response.headers[str(k)] = v
response.status_code = status
return response
|
# coding: utf-8
"""
flask_storage.s3
~~~~~~~~~~~~~~~~~~~
S3 storage, save the file to Amazon S3.
:copyright: (c) 2013 Hsiaoming Yang.
"""
import mimetypes
from werkzeug import cached_property
from boto.s3.connection import S3Connection
from ._base import BaseStorage, urljoin
from ._utils import ConfigItem
class S3Storage(BaseStorage):
_params = ConfigItem("connection_params")
access_key = ConfigItem("access_key", required=True)
secret_key = ConfigItem("secret_key", required=True)
bucket_name = ConfigItem("bucket", required=True)
base_dir = ConfigItem("base_dir")
base_url = ConfigItem("base_url")
@cached_property
def _connection(self):
return S3Connection(self.access_key, self.secret_key, **self._params)
@cached_property
def bucket(self):
if self.bucket_name not in self._connection:
return self._connection.create_bucket(self.bucket_name)
return self._connection.get_bucket(self.bucket_name)
def url(self, filename):
"""Generate the url for a filename.
:param filename: filename for generating the url
"""
if self.base_dir:
filename = "%s/%s" % (self.base_dir, filename)
return urljoin(self.base_url, filename)
def read(self, filename):
if self.base_dir:
filename = "%s/%s" % (self.base_dir, filename)
k = self.bucket.get_key(filename)
if not k:
return None
return k.read()
def _generate_key(self, filename, headers=None):
if self.base_dir:
filename = "%s/%s" % (self.base_dir, filename)
k = self.bucket.new_key(filename)
if not headers or "Content-Type" not in headers:
ct = mimetypes.guess_type(filename)[0]
if ct:
k.set_metadata("Content-Type", ct)
return k
def write(self, filename, body, headers=None):
k = self._generate_key(filename, headers)
# since Flask-Storage is designed for public storage
# we need to set it public-read
return k.set_contents_from_string(body, headers=headers, policy="public-read")
def save(self, storage, filename, headers=None, check=True):
"""Save a storage (`werkzeug.FileStorage`) with the specified
filename.
:param storage: The storage to be saved.
:param filename: The destination of the storage.
"""
if check:
self.check(storage)
k = self._generate_key(filename)
return k.set_contents_from_stream(
storage.stream, headers=headers, policy="public-read"
)
|
"""
Flask-WTF
=========
Simple integration of Flask and WTForms, including CSRF, file upload
and Recaptcha integration.
Links
-----
* `documentation <https://flask-wtf.readthedocs.org>`_
* `development version
<http://github.com/lepture/flask-wtf>`_
"""
try:
import multiprocessing
except ImportError:
pass
import re
from setuptools import setup
with open("flask_wtf/__init__.py") as f:
m = re.findall(r"__version__\s*=\s*\'(.*)\'", f.read())
version = m[0]
setup(
name="Flask-WTF",
version=version,
url="http://github.com/lepture/flask-wtf",
license="BSD",
author="Dan Jacob",
author_email="danjac354@gmail.com",
maintainer="Hsiaoming Yang",
maintainer_email="me@lepture.com",
description="Simple integration of Flask and WTForms",
long_description=__doc__,
packages=["flask_wtf", "flask_wtf.recaptcha"],
test_suite="nose.collector",
zip_safe=False,
platforms="any",
install_requires=[
"Flask",
"Werkzeug",
"WTForms",
],
tests_require=[
"nose",
"Flask-Babel",
],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
|