text
stringlengths 26
2.53M
|
---|
<|endoftext|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Provides ``mapping`` of url paths to request handlers.
"""
from bootstrap import Bootstrap
from fund import InstantPaymentNotificationHandler
from fund import ThankYouHandler
from view import *
mapping = [
(r"/", Index),
(r"/ipn", InstantPaymentNotificationHandler),
(r"/thank-you", ThankYouHandler),
(r"/about\/?", About),
(r"/guide\/?", Guide),
(r"/guide/download\/?", Download),
(r"/guide/standards\/?", Standards),
(r"/community\/?", Community),
(r"/news\/?", News),
(r"/support\/?", Support),
(r"/contact\/?", Contact),
(r"/press\/?", Press),
(r"/legal/terms", Terms),
(r"/library\/?", Library),
(r"/library/sketchup\/?", Library),
(r"/library/series/(\w+)\/?", Library),
(r"/library/users\/?", Users),
(r"/library/users/([0-9]+)\/?", User),
(r"/library/designs/([0-9]+)\/?", Design),
(r"/library/designs/([0-9]+)/(edit)\/?", Design),
(r"/library/designs\/?", Design),
(r"/library/designs/add\/?", Design),
(r"/library/designs/add/sketchup\/?", Design),
(r"/redirect/success/([0-9]+)\/?", RedirectSuccess),
(r"/redirect/error\/?", RedirectError),
(r"/redirect/after/delete\/?", RedirectAfterDelete),
(r"/admin/moderate\/?", Moderate),
(r"/admin/bootstrap\/?", Bootstrap),
(r"/activity", ActivityScreen),
(r"/txns", TxnList),
(r"/blob64/([^/]+)/([^/]+)\/?", Base64Blob),
(r"/blob64/([^/]+)\/?", Base64Blob),
(r"/i18n/message_strings.json", MessageStrings),
(r"/.*", NotFound),
]
<|endoftext|> |
<|endoftext|># -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2015 François-Xavier Bourlet (bombela+zerorpc@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import msgpack
import gevent.pool
import gevent.queue
import gevent.event
import gevent.local
import gevent.lock
import logging
import sys
import gevent_zmq as zmq
from .exceptions import TimeoutExpired
from .context import Context
from .channel_base import ChannelBase
if sys.version_info < (2, 7):
def get_pyzmq_frame_buffer(frame):
return frame.buffer[:]
else:
def get_pyzmq_frame_buffer(frame):
return frame.buffer
logger = logging.getLogger(__name__)
class SequentialSender(object):
def __init__(self, socket):
self._socket = socket
def _send(self, parts):
e = None
for i in range(len(parts) - 1):
try:
self._socket.send(parts[i], copy=False, flags=zmq.SNDMORE)
except (gevent.GreenletExit, gevent.Timeout) as e:
if i == 0:
raise
self._socket.send(parts[i], copy=False, flags=zmq.SNDMORE)
try:
self._socket.send(parts[-1], copy=False)
except (gevent.GreenletExit, gevent.Timeout) as e:
self._socket.send(parts[-1], copy=False)
if e:
raise e
def __call__(self, parts, timeout=None):
if timeout:
with gevent.Timeout(timeout):
self._send(parts)
else:
self._send(parts)
class SequentialReceiver(object):
def __init__(self, socket):
self._socket = socket
def _recv(self):
e = None
parts = []
while True:
try:
part = self._socket.recv(copy=False)
except (gevent.GreenletExit, gevent.Timeout) as e:
if len(parts) == 0:
raise
part = self._socket.recv(copy=False)
parts.append(part)
if not part.more:
break
if e:
raise e
return parts
def __call__(self, timeout=None):
if timeout:
with gevent.Timeout(timeout):
return self._recv()
else:
return self._recv()
class Sender(SequentialSender):
def __init__(self, socket):
self._socket = socket
self._send_queue = gevent.queue.Channel()
self._send_task = gevent.spawn(self._sender)
def close(self):
if self._send_task:
self._send_task.kill()
def _sender(self):
for parts in self._send_queue:
super(Sender, self)._send(parts)
def __call__(self, parts, timeout=None):
try:
self._send_queue.put(parts, timeout=timeout)
except gevent.queue.Full:
raise TimeoutExpired(timeout)
class Receiver(SequentialReceiver):
def __init__(self, socket):
self._socket = socket
self._recv_queue = gevent.queue.Channel()
self._recv_task = gevent.spawn(self._recver)
def close(self):
if self._recv_task:
self._recv_task.kill()
self._recv_queue = None
def _recver(self):
while True:
parts = super(Receiver, self)._recv()
self._recv_queue.put(parts)
def __call__(self, timeout=None):
try:
return self._recv_queue.get(timeout=timeout)
except gevent.queue.Empty:
raise TimeoutExpired(timeout)
class Event(object):
__slots__ = ["_name", "_args", "_header", "_identity"]
def __init__(self, name, args, context, header=None):
self._name = name
self._args = args
if header is None:
self._header = {"message_id": context.new_msgid(), "v": 3}
else:
self._header = header
self._identity = None
@property
def header(self):
return self._header
@property
def name(self):
return self._name
@name.setter
def name(self, v):
self._name = v
@property
def args(self):
return self._args
@property
def identity(self):
return self._identity
@identity.setter
def identity(self, v):
self._identity = v
def pack(self):
return msgpack.Packer(use_bin_type=True).pack(
(self._header, self._name, self._args)
)
@staticmethod
def unpack(blob):
unpacker = msgpack.Unpacker(encoding="utf-8")
unpacker.feed(blob)
unpacked_msg = unpacker.unpack()
try:
(header, name, args) = unpacked_msg
except Exception as e:
raise Exception('invalid msg format "{0}": {1}'.format(unpacked_msg, e))
# Backward compatibility
if not isinstance(header, dict):
header = {}
return Event(name, args, None, header)
def __str__(self, ignore_args=False):
if ignore_args:
args = "[...]"
else:
args = self._args
try:
args = "<<{0}>>".format(str(self.unpack(self._args)))
except Exception:
pass
if self._identity:
identity = ", ".join(repr(x.bytes) for x in self._identity)
return "<{0}> {1} {2} {3}".format(identity, self._name, self._header, args)
return "{0} {1} {2}".format(self._name, self._header, args)
class Events(ChannelBase):
def __init__(self, zmq_socket_type, context=None):
self._debug = False
self._zmq_socket_type = zmq_socket_type
self._context = context or Context.get_instance()
self._socket = self._context.socket(zmq_socket_type)
if zmq_socket_type in (zmq.PUSH, zmq.PUB, zmq.DEALER, zmq.ROUTER):
self._send = Sender(self._socket)
elif zmq_socket_type in (zmq.REQ, zmq.REP):
self._send = SequentialSender(self._socket)
else:
self._send = None
if zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.DEALER, zmq.ROUTER):
self._recv = Receiver(self._socket)
elif zmq_socket_type in (zmq.REQ, zmq.REP):
self._recv = SequentialReceiver(self._socket)
else:
self._recv = None
@property
def recv_is_supported(self):
return self._recv is not None
@property
def emit_is_supported(self):
return self._send is not None
def __del__(self):
try:
if not self._socket.closed:
self.close()
except (AttributeError, TypeError):
pass
def close(self):
try:
self._send.close()
except AttributeError:
pass
try:
self._recv.close()
except AttributeError:
pass
self._socket.close()
@property
def debug(self):
return self._debug
@debug.setter
def debug(self, v):
if v != self._debug:
self._debug = v
if self._debug:
logger.debug("debug enabled")
else:
logger.debug("debug disabled")
def _resolve_endpoint(self, endpoint, resolve=True):
if resolve:
endpoint = self._context.hook_resolve_endpoint(endpoint)
if isinstance(endpoint, (tuple, list)):
r = []
for sub_endpoint in endpoint:
r.extend(self._resolve_endpoint(sub_endpoint, resolve))
return r
return [endpoint]
def connect(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.connect(endpoint_))
logger.debug("connected to %s (status=%s)", endpoint_, r[-1])
return r
def bind(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.bind(endpoint_))
logger.debug("bound to %s (status=%s)", endpoint_, r[-1])
return r
def disconnect(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.disconnect(endpoint_))
logging.debug("disconnected from %s (status=%s)", endpoint_, r[-1])
return r
def new_event(self, name, args, xheader=None):
event = Event(name, args, context=self._context)
if xheader:
event.header.update(xheader)
return event
def emit_event(self, event, timeout=None):
if self._debug:
logger.debug("--> %s", event)
if event.identity:
parts = list(event.identity or list())
parts.extend(["", event.pack()])
elif self._zmq_socket_type in (zmq.DEALER, zmq.ROUTER):
parts = ("", event.pack())
else:
parts = (event.pack(),)
self._send(parts, timeout)
def recv(self, timeout=None):
parts = self._recv(timeout=timeout)
if len(parts) > 2:
identity = parts[0:-2]
blob = parts[-1]
elif len(parts) == 2:
identity = parts[0:-1]
blob = parts[-1]
else:
identity = None
blob = parts[0]
event = Event.unpack(get_pyzmq_frame_buffer(blob))
event.identity = identity
if self._debug:
logger.debug("<-- %s", event)
return event
def setsockopt(self, *args):
return self._socket.setsockopt(*args)
@property
def context(self):
return self._context
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
"""Django's command line utility."""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
<|endoftext|> |
<|endoftext|>"""Installer for hippybot
"""
import os
cwd = os.path.dirname(__file__)
__version__ = open(os.path.join(cwd, "hippybot", "version.txt"), "r").read().strip()
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name="hippybot",
description="Python Hipchat bot",
long_description=open("README.rst").read(),
version=__version__,
author="Wes Mason",
author_email="wes[at]1stvamp[dot]org",
url="http://github.com/1stvamp/hippybot",
packages=find_packages(exclude=["ez_setup"]),
install_requires=open("requirements.txt").readlines(),
package_data={"hippybot": ["version.txt"]},
include_package_data=True,
extras_require={
"plugins": open("extras_requirements.txt").readlines(),
},
entry_points={
"console_scripts": [
"hippybot = hippybot.bot:main",
],
},
license="BSD",
)
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "twobuntu.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
<|endoftext|> |
<|endoftext|># -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="Category",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"name",
models.CharField(
help_text=b"The name of the category.", max_length=40
),
),
(
"image",
models.ImageField(
help_text=b"A representative image.",
null=True,
upload_to=b"categories",
blank=True,
),
),
],
options={
"ordering": ("name",),
"verbose_name_plural": "Categories",
},
bases=(models.Model,),
),
]
<|endoftext|> |
<|endoftext|>import twitter
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.db import transaction
from django.shortcuts import redirect, render
from twobuntu.news.forms import AddItemForm
@user_passes_test(lambda u: u.is_staff)
def add(request):
"""
Add news items to the home page.
"""
if request.method == "POST":
form = AddItemForm(data=request.POST)
if form.is_valid():
item = form.save(commit=False)
item.reporter = request.user
try:
with transaction.atomic():
item.save()
except twitter.TwitterError as e:
messages.error(
request,
'Twitter error: "%s" Please try again.' % e.message[0]["message"],
)
else:
messages.info(request, "Your news item has been published!")
return redirect("home")
else:
form = AddItemForm()
return render(
request,
"form.html",
{
"title": "Add Item",
"form": form,
"description": "Enter the details for the news item below.",
"action": "Add",
},
)
<|endoftext|> |
<|endoftext|># -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010-2015, 2degrees Limited.
# All Rights Reserved.
#
# This file is part of django-wsgi <https://github.com/2degrees/django-wsgi/>,
# which is subject to the provisions of the BSD at
# <http://dev.2degreesnetwork.com/p/2degrees-license.html>. A copy of the
# license should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS"
# AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST
# INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Exceptions raised by :mod:`django_wsgi.`
"""
__all__ = ("DjangoWSGIException", "ApplicationCallError")
class DjangoWSGIException(Exception):
"""Base class for exceptions raised by :mod:`django_wsgi`."""
pass
class ApplicationCallError(DjangoWSGIException):
"""
Exception raised when an embedded WSGI application was not called properly.
"""
pass
<|endoftext|> |
<|endoftext|>import boto
import boto.s3.connection
from django.conf import settings
import logging
log = logging.getLogger(__name__)
def get_s3_connection():
if settings.S3_ACCESS_KEY and settings.S3_SECRET_KEY and settings.S3_HOST:
log.debug(
"Connecting to {}, with secure connection is {}".format(
settings.S3_HOST, settings.S3_SECURE_CONNECTION
)
)
return boto.connect_s3(
aws_access_key_id=settings.S3_ACCESS_KEY,
aws_secret_access_key=settings.S3_SECRET_KEY,
host=settings.S3_HOST,
is_secure=settings.S3_SECURE_CONNECTION,
calling_format=boto.s3.connection.OrdinaryCallingFormat(),
)
return None
def get_or_create_bucket(s3_connection):
bucket = s3_connection.get_bucket(settings.S3_BUCKET_NAME)
if bucket is None:
bucket = s3_connection.create_bucket(settings.S3_BUCKET_NAME)
return bucket
<|endoftext|> |
<|endoftext|>from django.db import models
import datetime
from common.models import Project
class Stage(models.Model):
name = models.CharField(max_length=128)
project = models.ForeignKey(Project)
text = models.TextField(default="", blank=True)
link = models.URLField(default=None, blank=True, null=True)
state = models.CharField(max_length=24, default="info", blank=True)
weight = models.IntegerField(default=0)
updated = models.DateTimeField(default=datetime.datetime.now())
def save(self, *args, **kwargs):
self.updated = datetime.datetime.now()
return super(Stage, self).save(*args, **kwargs)
def __str__(self):
return self.name
<|endoftext|> |
<|endoftext|># -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
("testreport", "0026_testresult_launch_item_id"),
]
operations = [
migrations.AddField(
model_name="testplan",
name="filter",
field=models.TextField(
default=b"",
max_length=128,
verbose_name="Started by filter",
blank=True,
),
preserve_default=True,
),
migrations.AddField(
model_name="testplan",
name="main",
field=models.BooleanField(
default=False, verbose_name="Show in short statistic"
),
preserve_default=True,
),
]
<|endoftext|> |
<|endoftext|>import gevent
from gevent import monkey
monkey.patch_all()
import time
import smtplib
TEST_MAIL = """
Date: Wed, 30 Jul 2014 03:29:50 +0800 (CST)
From: =?utf-8?B?6IGU5oOz?= <client@gsmtpd.org>
To: test@gsmtpd.org
Message-ID: <766215193.1675381406662190229.JavaMail.root@USS-01>
Subject: =?utf-8?B?6IGU5oOz56e75Yqo5LqS6IGU572R5pyN5Yqh5rOo5YaM56Gu6K6k6YKu5Lu2?=
MIME-Version: 1.0
Content-Type: multipart/mixed;
boundary="----=_Part_335076_1490382245.1406662190222"
------=_Part_335076_1490382245.1406662190222
Content-Type: multipart/related;
boundary="----=_Part_335077_605133107.1406662190222"
------=_Part_335077_605133107.1406662190222
Content-Type: text/html;charset=utf-8
Content-Transfer-Encoding: quoted-printable
<html><head></head><body>=E5=B0=8A=E6=95=AC=E7=9A=84=E7=94=A8=E6=88=B7=EF=
=BC=9A<br/>=E6=82=A8=E5=A5=BD=EF=BC=81<br/>=E8=AF=B7=E7=82=B9=E5=87=BB=E8=
=81=94=E6=83=B3=E5=B8=90=E5=8F=B7=E7=A1=AE=E8=AE=A4=E9=93=BE=E6=8E=A5=EF=BC=
=8C=E4=BB=A5=E6=A0=A1=E9=AA=8C=E6=82=A8=E7=9A=84=E8=81=94=E6=83=B3=E5=B8=90=
=E5=8F=B7=EF=BC=9A<br/><a href=3D"https://passport.lenovo.com/wauthen/verif=
yuser?username=3D&vc=3DuHwf&accountid=3D1358934&lenovoid.=
cb=3D&lenovoid.realm=3Dthinkworld.lenovo.com&lang=3Dzh_CN&display=3D&lenovo=
id.ctx=3D&lenovoid.action=3D&lenovoid.lang=3D&lenovoid.uinfo=3D&lenovoid.vp=
=3D&verifyFlag=3Dnull">https://passport.lenovo.com/wauthen/verifyuser?usern=
ame=3o.org&vc=3DuHwf&accountid=3&lenovoid.cb=3D&lenov=
oid.realm=3Dthinkworld.lenovo.com&lang=3Dzh_CN&display=3D&lenovoid.ctx=3D&l=
enovoid.action=3D&lenovoid.lang=3D&lenovoid.uinfo=3D&lenovoid.vp=3D&verifyF=
lag=3Dnull</a><br/>=EF=BC=88=E5=A6=82=E6=9E=9C=E4=B8=8A=E9=9D=A2=E7=9A=84=
=E9=93=BE=E6=8E=A5=E6=97=A0=E6=B3=95=E7=82=B9=E5=87=BB=EF=BC=8C=E6=82=A8=E4=
=B9=9F=E5=8F=AF=E4=BB=A5=E5=A4=8D=E5=88=B6=E9=93=BE=E6=8E=A5=EF=BC=8C=E7=B2=
=98=E8=B4=B4=E5=88=B0=E6=82=A8=E6=B5=8F=E8=A7=88=E5=99=A8=E7=9A=84=E5=9C=B0=
=E5=9D=80=E6=A0=8F=E5=86=85=EF=BC=8C=E7=84=B6=E5=90=8E=E6=8C=89=E2=80=9C=E5=
=9B=9E=E8=BD=A6=E2=80=9D=E9=94=AE)=E3=80=82<br/>=E6=9D=A5=E8=87=AA=E8=81=94=
=E6=83=B3=E5=B8=90=E5=8F=B7</body></html>
------=_Part_335077_605133107.1406662190222--
------=_Part_335076_1490382245.1406662190222--
"""
def timeit(func):
def wrap(num, port, *args, **kwargs):
max_rqs = 0
for _ in range(3):
conns = [smtplib.SMTP(port=port) for x in range(num)]
list(map(lambda x: x.connect("127.0.0.1", port), conns))
start_at = time.time()
func(num, conns, **kwargs)
interval = time.time() - start_at
for con in conns:
try:
con.quit()
con.close()
except Exception:
pass
gevent.sleep(3)
rqs = num / interval
max_rqs = max(rqs, max_rqs)
return max_rqs
return wrap
@timeit
def helo(num, conns):
tasks = [gevent.spawn(x.helo) for x in conns]
gevent.joinall(tasks)
@timeit
def send(num, conns):
tasks = [
gevent.spawn(x.sendmail, "r@r.com", ["test@test.org"], TEST_MAIL) for x in conns
]
gevent.joinall(tasks)
def main(port, num):
print("%d %s %s" % (num, helo(num, port), send(num, port)))
if __name__ == "__main__":
import sys
try:
main(int(sys.argv[1]), int(sys.argv[2]))
except IndexError:
print("python concurrency.py <port> <connection number>")
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
import sys
import json
if sys.version_info < (3,):
def b(x):
return x
def s(x):
return x
else:
def b(x):
return bytes(x, "utf-8")
def s(x):
return x.decode("utf-8")
def parse_payload(payload):
if not isinstance(payload, str):
payload = " ".join(payload)
try:
json.loads(payload)
except ValueError:
kv = payload.split(" ", 1)
if len(kv) > 1:
payload = '{"%s": "%s"}' % (kv[0], kv[1])
else:
payload = "%s" % kv[0]
return payload
def requires_elements(xs, dictionary):
missing_values = []
for x in xs:
if x not in dictionary:
missing_values.append(x)
if missing_values:
err_msg = ", ".join(missing_values)
raise KeyError("Missing values %s" % (err_msg))
<|endoftext|> |
<|endoftext|>from flask_resty import Api, GenericModelView
from marshmallow import fields, Schema
import pytest
from sqlalchemy import Column, Integer, String
import helpers
# -----------------------------------------------------------------------------
@pytest.yield_fixture
def models(db):
class Widget(db.Model):
__tablename__ = "widgets"
id_1 = Column(Integer, primary_key=True)
id_2 = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
db.create_all()
yield {
"widget": Widget,
}
db.drop_all()
@pytest.fixture
def schemas():
class WidgetSchema(Schema):
id_1 = fields.Integer(as_string=True)
id_2 = fields.Integer(as_string=True)
name = fields.String(required=True)
return {
"widget": WidgetSchema(),
}
@pytest.fixture(autouse=True)
def routes(app, models, schemas):
class WidgetViewBase(GenericModelView):
model = models["widget"]
schema = schemas["widget"]
id_fields = ("id_1", "id_2")
class WidgetListView(WidgetViewBase):
def get(self):
return self.list()
def post(self):
return self.create(allow_client_id=True)
class WidgetView(WidgetViewBase):
def get(self, id_1, id_2):
return self.retrieve((id_1, id_2))
def patch(self, id_1, id_2):
return self.update((id_1, id_2), partial=True)
def delete(self, id_1, id_2):
return self.destroy((id_1, id_2))
api = Api(app)
api.add_resource(
"/widgets",
WidgetListView,
WidgetView,
id_rule="<int:id_1>/<int:id_2>",
)
@pytest.fixture(autouse=True)
def data(db, models):
db.session.add_all(
(
models["widget"](id_1=1, id_2=2, name="Foo"),
models["widget"](id_1=1, id_2=3, name="Bar"),
models["widget"](id_1=4, id_2=5, name="Baz"),
)
)
db.session.commit()
# -----------------------------------------------------------------------------
def test_list(client):
response = client.get("/widgets")
assert response.status_code == 200
assert helpers.get_data(response) == [
{
"id_1": "1",
"id_2": "2",
"name": "Foo",
},
{
"id_1": "1",
"id_2": "3",
"name": "Bar",
},
{
"id_1": "4",
"id_2": "5",
"name": "Baz",
},
]
def test_retrieve(client):
response = client.get("/widgets/1/2")
assert response.status_code == 200
assert helpers.get_data(response) == {
"id_1": "1",
"id_2": "2",
"name": "Foo",
}
def test_create(client):
response = helpers.request(
client,
"POST",
"/widgets",
{
"id_1": "4",
"id_2": "6",
"name": "Qux",
},
)
assert response.status_code == 201
assert response.headers["Location"] == "http://localhost/widgets/4/6"
assert helpers.get_data(response) == {
"id_1": "4",
"id_2": "6",
"name": "Qux",
}
def test_update(client):
update_response = helpers.request(
client,
"PATCH",
"/widgets/1/2",
{
"id_1": "1",
"id_2": "2",
"name": "Qux",
},
)
assert update_response.status_code == 204
retrieve_response = client.get("/widgets/1/2")
assert retrieve_response.status_code == 200
assert helpers.get_data(retrieve_response) == {
"id_1": "1",
"id_2": "2",
"name": "Qux",
}
def test_destroy(client):
destroy_response = client.delete("/widgets/1/2")
assert destroy_response.status_code == 204
retrieve_response = client.get("/widgets/1/2")
assert retrieve_response.status_code == 404
<|endoftext|> |
<|endoftext|>from .dogpile import Dogpile
<|endoftext|> |
<|endoftext|>"""
RPi-Tron-Radio
Raspberry Pi Web-Radio with 2.8" TFT Touchscreen and Tron-styled graphical interface
GitHub: http://github.com/5volt-junkie/RPi-Tron-Radio
Blog: http://5volt-junkie.net
MIT License: see license.txt
"""
import pygame
from pygame.locals import *
import time
import datetime
import sys
import os
import glob
import subprocess
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
#colors R G B
white = (255, 255, 255)
red = (255, 0, 0)
green = ( 0, 255, 0)
blue = ( 0, 0, 255)
black = ( 0, 0, 0)
cyan = ( 50, 255, 255)
magenta = (255, 0, 255)
yellow = (255, 255, 0)
orange = (255, 127, 0)
#screen size
width = 320
height = 240
size = (width, height)
screen = pygame.display.set_mode(size)
pygame.init()
#disable mouse cursor
pygame.mouse.set_visible(False)
#define font
font = pygame.font.Font(None, 25)
#screensaver
screensaver_timer = 5 #time until screensaver will be enabled, in minutes
screensaver = False
#load default skin
menu = 1
skin_number = 1
max_skins = 8
font_color = cyan
skin1 = pygame.image.load("skins/skin_tron_m1.png")
skin2 = pygame.image.load("skins/skin_tron_m2.png")
skin = skin1
screen.blit(skin, (0, 0))
#initial volume settings
subprocess.call('mpc volume 100' , shell=True)
reboot_label = font.render("rebooting...", 1, (font_color))
poweroff_label = font.render("shutting down", 1, (font_color))
song_title = " "
playlist = " "
def reboot():
screen.fill(black)
screen.blit(reboot_label, (10, 100))
pygame.display.flip()
time.sleep(5)
subprocess.call('mpc stop' , shell=True)
subprocess.call('reboot' , shell=True)
def poweroff():
screen.fill(black)
screen.blit(poweroff_label, (10, 100))
pygame.display.flip()
time.sleep(5)
subprocess.call('mpc stop' , shell=True)
subprocess.call('poweroff' , shell=True)
#copy playing title to favorite.txt
def favorite():
print(song_title)
f = open ('/var/www/favorite.txt' , 'a')
f.write('-' + song_title + '\n')
f.close()
#function runs if touchscreen was touched (and screensaver is disabled)
def on_touch():
#x_min x_max y_min y_max
if 13 <= pos[0] <= 75 and 121 <= pos[1] <= 173:
#print "button1 was pressed"
button(1)
if 90 <= pos[0] <= 152 and 121 <= pos[1] <= 173:
#print "button2 was pressed"
button(2)
if 167 <= pos[0] <= 229 and 121 <= pos[1] <= 173:
#print "button3 was pressed"
button(3)
if 244 <= pos[0] <= 306 and 121 <= pos[1] <= 173:
#print "button4 was pressed"
button(4)
if 13 <= pos[0] <= 75 and 181 <= pos[1] <= 233:
#print "button5 was pressed"
button(5)
if 90 <= pos[0] <= 152 and 181 <= pos[1] <= 233:
#print "button6 was pressed"
button(6)
if 167 <= pos[0] <= 229 and 181 <= pos[1] <= 233:
#print "button7 was pressed"
button(7)
if 244 <= pos[0] <= 306 and 181 <= pos[1] <= 233:
#print "button8 was pressed"
button(8)
#which button (and which menu) was presed on touch
def button(number):
global menu
if menu == 1:
if number == 1:
subprocess.call('mpc play' , shell=True)
#print "play"
if number == 2:
subprocess.call('mpc pause' , shell=True)
#print "pause"
if number == 3:
subprocess.call('mpc volume +5' , shell=True)
#print "vol +x"
if number == 4:
subprocess.call('mpc volume 0' , shell=True)
#print "vol 0"
if number == 5:
subprocess.call('mpc prev' , shell=True)
#print "prev"
if number == 6:
subprocess.call('mpc next' , shell=True)
#print "next"
if number == 7:
subprocess.call('mpc volume -5' , shell=True)
#print "vol -x"
if number == 8:
#print "go to menu 2"
menu = 2
update_screen()
return
if menu == 2:
if number == 1:
favorite()
if number == 2:
#print "switch skin"
global skin_number
skin_number = skin_number+1
#print skin_number
update_screen()
if number == 3:
#print "run in background"
pygame.quit()
sys.exit()
if number == 4:
#print "quit radio"
subprocess.call('mpc stop', shell=True)
pygame.quit()
sys.exit()
if number == 5:
print("power off")
poweroff()
if number == 6:
print("reboot")
reboot()
if number == 7:
#print "update screen"
update_screen()
if number == 8:
#print "go to menu 1"
menu = 1
update_screen()
return
#function to update screen
def update_screen():
global skin_number
if skin_number == 9:
skin_number = 1
if skin_number == 1:
skin1 = pygame.image.load("skins/skin_tron_m1.png")
skin2 = pygame.image.load("skins/skin_tron_m2.png")
font_color = cyan
if skin_number == 2:
skin1 = pygame.image.load("skins/skin_blue_m1.png")
skin2 = pygame.image.load("skins/skin_blue_m2.png")
font_color = blue
if skin_number == 3:
skin1 = pygame.image.load("skins/skin_green_m1.png")
skin2 = pygame.image.load("skins/skin_green_m2.png")
font_color = green
if skin_number == 4:
skin1 = pygame.image.load("skins/skin_magenta_m1.png")
skin2 = pygame.image.load("skins/skin_magenta_m2.png")
font_color = magenta
if skin_number == 5:
skin1 = pygame.image.load("skins/skin_orange_m1.png")
skin2 = pygame.image.load("skins/skin_orange_m2.png")
font_color = orange
if skin_number == 6:
skin1 = pygame.image.load("skins/skin_red_m1.png")
skin2 = pygame.image.load("skins/skin_red_m2.png")
font_color = red
if skin_number == 7:
skin1 = pygame.image.load("skins/skin_white_m1.png")
skin2 = pygame.image.load("skins/skin_white_m2.png")
font_color = white
if skin_number == 8:
skin1 = pygame.image.load("skins/skin_yellow_m1.png")
skin2 = pygame.image.load("skins/skin_yellow_m2.png")
font_color = yellow
global menu
if screensaver == False:
current_time = datetime.datetime.now().strftime('%H:%M %d.%m.%Y')
time_label = font.render(current_time, 1, (font_color))
if menu == 1:
skin = skin1
screen.blit(skin, (0, 0))
lines = subprocess.check_output('mpc current', shell=True).split(":")
if len(lines) == 1:
line1 = lines[0]
line1 = line1[:-1]
station_label = font.render("Station: no data", 1, (font_color))
else:
line1 = lines[0]
line2 = lines[1]
line1 = line1[:30]
station_label = font.render('Station: ' + line1 + '.', 1, (font_color))
lines = subprocess.check_output('mpc -f [%title%]', shell=True).split("\n")
line1 = lines[0]
if line1.startswith("volume"):
title_label = font.render("Title: no data! Try with PLAY!", 1, (font_color))
else:
line1 = lines[0]
line2 = lines[1]
global song_title
song_title = line1
line1 = line1[:30]
title_label = font.render(line1 + '.', 1, (font_color))
title = font.render("Now playing:", 1, (font_color))
screen.blit(skin, (0, 0))
screen.blit(station_label, (23, 15))
screen.blit(title, (23, 40))
screen.blit(title_label, (23, 60))
screen.blit(time_label, (160, 90))
lines = subprocess.check_output('mpc volume', shell=True).split("\n")
line1 = lines[0]
volume_label = font.render(line1, 1, (font_color))
screen.blit(volume_label, (23, 90))
pygame.display.flip()
if menu == 2:
skin = skin2
screen.blit(skin, (0, 0))
#get and display ip
ip = subprocess.check_output('hostname -I', shell=True).strip()
ip_label = font.render('IP: ' + ip, 1, (font_color))
screen.blit(ip_label, (23, 15))
#get and display cpu temp
cpu_temp = subprocess.check_output('/opt/vc/bin/vcgencmd measure_temp', shell=True).strip()
temp = font.render('cpu ' + cpu_temp, 1, (font_color))
screen.blit(temp, (23, 35))
#get current time
screen.blit(time_label, (90, 90))
pygame.display.flip()
if screensaver == True:
screen.fill(white)
pygame.display.flip()
minutes = 0
#userevent on every 1000ms, used for screensaver
pygame.time.set_timer(USEREVENT +1, 60000)
subprocess.call('mpc play' , shell=True)
update_screen()
running = True
while running:
for event in pygame.event.get():
if event.type == USEREVENT +1:
minutes += 1
if event.type == pygame.QUIT:
print("Quit radio")
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
print("Quit radio")
pygame.quit()
sys.exit()
#if screensaver is enabled and the screen was touched,
#just disable screensaver, reset timer and update screen
#no button state will be checked
if event.type == pygame.MOUSEBUTTONDOWN and screensaver == True:
minutes = 0
subprocess.call('echo 0 | sudo tee /sys/class/backlight/*/bl_power' , shell=True)
screensaver = False
update_screen()
break
#if screen was touched and screensaver is disabled,
#get position of touched button, call on_touch(), reset timer and update screen
if event.type == pygame.MOUSEBUTTONDOWN and screensaver == False:
pos = (pygame.mouse.get_pos() [0], pygame.mouse.get_pos() [1])
minutes = 0
on_touch()
update_screen()
#enable screensaver on timer overflow
if minutes > screensaver_timer:
screensaver = True
subprocess.call('echo 1 | sudo tee /sys/class/backlight/*/bl_power' , shell=True)
update_screen()
update_screen()
time.sleep(0.1)
<|endoftext|> |
<|endoftext|># coding:utf8
"""
Created on 2013-7-10
memcached client
@author: lan (www.9miao.com)
"""
import memcache
class MemConnError(Exception):
""" """
def __str__(self):
return "memcache connect error"
class MemClient:
"""memcached"""
def __init__(self, timeout=0):
""" """
self._hostname = ""
self._urls = []
self.connection = None
def connect(self, urls, hostname):
"""memcached connect"""
self._hostname = hostname
self._urls = urls
self.connection = memcache.Client(self._urls, debug=0)
if not self.connection.set("__testkey__", 1):
raise MemConnError()
def produceKey(self, keyname):
""" """
if isinstance(keyname, str):
return "".join([self._hostname, ":", keyname])
else:
raise "type error"
def get(self, key):
""" """
key = self.produceKey(key)
return self.connection.get(key)
def get_multi(self, keys):
""" """
keynamelist = [self.produceKey(keyname) for keyname in keys]
olddict = self.connection.get_multi(keynamelist)
newdict = dict(
list(
zip(
[keyname.split(":")[-1] for keyname in list(olddict.keys())],
list(olddict.values()),
)
)
)
return newdict
def set(self, keyname, value):
""" """
key = self.produceKey(keyname)
result = self.connection.set(key, value)
if not result: # å¦æåå
¥å¤±è´¥
self.connect(self._urls, self._hostname) # éæ°è¿æ¥
return self.connection.set(key, value)
return result
def set_multi(self, mapping):
""" """
newmapping = dict(
list(
zip(
[self.produceKey(keyname) for keyname in list(mapping.keys())],
list(mapping.values()),
)
)
)
result = self.connection.set_multi(newmapping)
if result: # å¦æåå
¥å¤±è´¥
self.connect(self._urls, self._hostname) # éæ°è¿æ¥
return self.connection.set_multi(newmapping)
return result
def incr(self, key, delta):
""" """
key = self.produceKey(key)
return self.connection.incr(key, delta)
def delete(self, key):
""" """
key = self.produceKey(key)
return self.connection.delete(key)
def delete_multi(self, keys):
""" """
keys = [self.produceKey(key) for key in keys]
return self.connection.delete_multi(keys)
def flush_all(self):
""" """
self.connection.flush_all()
mclient = MemClient()
<|endoftext|> |
<|endoftext|># coding:utf8
"""
Created on 2013-7-31
@author: lan (www.9miao.com)
"""
from firefly.dbentrust.dbpool import dbpool
from firefly.dbentrust.madminanager import MAdminManager
from firefly.dbentrust import mmode
from firefly.dbentrust.memclient import mclient
import time
if __name__ == "__main__":
# CREATE TABLE `tb_register` (
# `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'id',
# `username` varchar(255) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'ç¨æ·å',
# `password` varchar(255) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'ç¨æ·å¯ç ',
# PRIMARY KEY (`id`,`username`)
# ) ENGINE=MyISAM AUTO_INCREMENT=1 DEFAULT CHARSET=utf8
#
hostname = "localhost"
username = "root"
password = "111"
dbname = "test"
charset = "utf8"
tablename = "test1" #
aa = {
"host": "localhost",
"user": "root",
"passwd": "111",
"db": "test",
"port": 3306,
"charset": "utf8",
}
dbpool.initPool(**aa)
mclient.connect(["127.0.0.1:11211"], "test")
mmanager = MAdminManager()
m1 = mmode.MAdmin("test1", "id", incrkey="id")
m1.insert()
print(m1.get("_incrvalue"))
m2 = mmode.MAdmin("test1", "id", incrkey="id")
print(m2.get("_incrvalue"))
<|endoftext|> |
<|endoftext|>""" Really basic gatttool (BlueZ) wrapper
Based on https://github.com/stratosinc/pygatt
Part of https://github.com/ALPSquid/thebutton-monitor
"""
import pexpect
class connect:
"""Use to initiate a connection to a GATT device
Example: bt_device = gatt.connect('AB:CD:EF:01:23:45')
"""
def __init__(self, address):
self.address = "" # Connected bluetooth device address. Assigned from connect()
self.conn = None # pexpect.spawn() object for the gatttool command
self.connect(address)
def connect(self, address, adapter="hci0"):
"""Open an interactive connection to a bluetooth device
:param address: Bluetooth device address
:param adapter: Bluetooth adapter to use. Default: hci0
"""
if self.conn is None:
self.address = address
cmd = " ".join(["gatttool", "-b", address, "-i", adapter, "-I"])
self.conn = pexpect.spawn(cmd)
self.conn.expect(r"\[LE\]>", timeout=1)
self.conn.sendline("connect")
try:
self.conn.expect(r"Connection successful", timeout=10)
print(("Connected to " + address))
except pexpect.TIMEOUT:
raise Exception("Unable to connect to device")
else:
raise Exception(
"Device already connected! Call disconnect before attempting a new connection"
)
def reconnect(self):
"""Check and attempt to reconnect to device if necessary
:return: True if a reconnect was performed
"""
try:
self.conn.expect(r"Disconnected", timeout=0.1)
self.conn.sendline("connect")
try:
self.conn.expect(r"Connection successful", timeout=10)
print(("Reconnected to device: " + self.address))
except pexpect.TIMEOUT:
# Continue and try to reconnect next time
print(("Lost connection to device: " + self.address))
return True
except pexpect.TIMEOUT:
# No need to reconnect
return False
def disconnect(self):
"""Disconnect from current bluetooth device"""
if self.conn is not None:
self.conn.sendline("exit")
self.conn = None
print(("Disconnected from " + self.address))
def write(self, handle, value):
"""Write a value to the specified handle
:param handle: address to write to. e.g. 0016
:param value: value to write
"""
self.send(" ".join(["char-write-cmd", "0x" + handle, value]))
def read(self, handle):
"""Read from the specified handle
:param handle: address to read from. e.g. 0016
"""
self.send("char-read-hnd 0x" + handle, r"descriptor: .* \r", timeout=5)
val = " ".join(self.conn.after.decode("utf-8").split()[1:])
return val
def send(self, cmd, expect=None, timeout=5):
"""Send command to device. Attempt a reconnect if disconnected
:param cmd: Command to send
"""
self.conn.sendline(cmd)
if expect is not None:
try:
self.conn.expect(expect, timeout)
except pexpect.TIMEOUT:
if self.reconnect():
self.conn.sendline(cmd)
else:
if self.reconnect():
self.conn.sendline(cmd)
<|endoftext|> |
<|endoftext|># -*- coding: utf-8 -*-
from django.db import models, migrations
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
("puput", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="blogpage",
name="description",
field=models.CharField(
max_length=255,
help_text="The blog description that will appear under the title.",
verbose_name="Description",
blank=True,
),
),
migrations.AlterField(
model_name="category",
name="description",
field=models.CharField(
max_length=500, verbose_name="Description", blank=True
),
),
migrations.AlterField(
model_name="category",
name="name",
field=models.CharField(
max_length=80, unique=True, verbose_name="Category name"
),
),
migrations.AlterField(
model_name="category",
name="parent",
field=models.ForeignKey(
to="puput.Category",
related_name="children",
null=True,
verbose_name="Parent category",
blank=True,
),
),
migrations.AlterField(
model_name="entrypage",
name="excerpt",
field=wagtail.wagtailcore.fields.RichTextField(
help_text="Entry excerpt to be displayed on entries list. If this field is not filled, a truncate version of body text will be used.",
verbose_name="excerpt",
blank=True,
),
),
]
<|endoftext|> |
<|endoftext|>"""
==================================
Map two radars to a Cartesian grid
==================================
Map the reflectivity field of two nearby ARM XSARP radars from antenna
coordinates to a Cartesian grid.
"""
print(__doc__)
# Author: Jonathan J. Helmus (jhelmus@anl.gov)
# License: BSD 3 clause
import matplotlib.pyplot as plt
import pyart
# read in the data from both XSAPR radars
XSAPR_SW_FILE = "swx_20120520_0641.nc"
XSAPR_SE_FILE = "sex_20120520_0641.nc"
radar_sw = pyart.io.read_cfradial(XSAPR_SW_FILE)
radar_se = pyart.io.read_cfradial(XSAPR_SE_FILE)
# filter out gates with reflectivity > 100 from both radars
gatefilter_se = pyart.filters.GateFilter(radar_se)
gatefilter_se.exclude_above("corrected_reflectivity_horizontal", 100)
gatefilter_sw = pyart.filters.GateFilter(radar_sw)
gatefilter_sw.exclude_above("corrected_reflectivity_horizontal", 100)
# perform Cartesian mapping, limit to the reflectivity field.
grid = pyart.map.grid_from_radars(
(radar_se, radar_sw),
gatefilters=(gatefilter_se, gatefilter_sw),
grid_shape=(1, 201, 201),
grid_limits=((1000, 1000), (-50000, 40000), (-60000, 40000)),
grid_origin=(36.57861, -97.363611),
fields=["corrected_reflectivity_horizontal"],
)
# create the plot
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(
grid.fields["corrected_reflectivity_horizontal"]["data"][0],
origin="lower",
extent=(-60, 40, -50, 40),
vmin=0,
vmax=48,
)
plt.show()
<|endoftext|> |
<|endoftext|>"""
pyart.aux_io.radx
=================
Reading files using Radx to first convert the file to Cf.Radial format
.. autosummary::
:toctree: generated/
read_radx
"""
import os
import tempfile
import subprocess
from ..io.cfradial import read_cfradial
from ..io.common import _test_arguments
def read_radx(filename, **kwargs):
"""
Read a file by first converting it to Cf/Radial using RadxConvert.
Parameters
----------
filename : str
Name of file to read using RadxConvert.
Returns
-------
radar : Radar
Radar object.
"""
# test for non empty kwargs
_test_arguments(kwargs)
tmpfile = tempfile.mkstemp(suffix=".nc", dir=".")[1]
head, tail = os.path.split(tmpfile)
try:
subprocess.check_call(
[
"RadxConvert",
"-const_ngates",
"-outdir",
head,
"-outname",
tail,
"-f",
filename,
]
)
if not os.path.isfile(tmpfile):
raise IOError(
"RadxConvert failed to create a file, upgrading to the "
" latest version of Radx may be necessary."
)
radar = read_cfradial(tmpfile)
finally:
os.remove(tmpfile)
return radar
<|endoftext|> |
<|endoftext|>"""
pyart.exceptions
================
Custom Py-ART exceptions.
.. autosummary::
:toctree: generated/
MissingOptionalDependency
DeprecatedAttribute
DeprecatedFunctionName
_deprecated_alias
"""
import warnings
class MissingOptionalDependency(Exception):
"""Exception raised when a optional dependency is needed by not found."""
pass
class DeprecatedAttribute(DeprecationWarning):
"""Warning category for an attribute which has been renamed/moved."""
pass
class DeprecatedFunctionName(DeprecationWarning):
"""Warning category for a function which has been renamed/moved."""
pass
def _deprecated_alias(func, old_name, new_name):
"""
A function for creating an alias to a renamed or moved function.
Parameters
----------
func : func
The function which has been renamed or moved.
old_name, new_name : str
Name of the function before and after it was moved or renamed
(with namespace if changed).
Returns
-------
wrapper : func
A wrapper version of func, which issues a DeprecatedFunctionName
warning when the called.
"""
def wrapper(*args, **kwargs):
warnings.warn(
(
"{0} has been deprecated and will be removed in future "
+ "versions of Py-ART, pleases use {1}. "
).format(old_name, new_name),
category=DeprecatedFunctionName,
)
return func(*args, **kwargs)
return wrapper
<|endoftext|> |
<|endoftext|>"""
pyart.io.nexrad_archive
=======================
Functions for reading NEXRAD Level II Archive files.
.. autosummary::
:toctree: generated/
:template: dev_template.rst
_NEXRADLevel2StagedField
.. autosummary::
:toctree: generated/
read_nexrad_archive
_find_range_params
_find_scans_to_interp
_interpolate_scan
"""
import warnings
import numpy as np
from ..config import FileMetadata, get_fillvalue
from ..core.radar import Radar
from .common import make_time_unit_str, _test_arguments, prepare_for_read
from .nexrad_level2 import NEXRADLevel2File
from ..lazydict import LazyLoadDict
from .nexrad_common import get_nexrad_location
def read_nexrad_archive(
filename,
field_names=None,
additional_metadata=None,
file_field_names=False,
exclude_fields=None,
delay_field_loading=False,
station=None,
scans=None,
linear_interp=True,
**kwargs
):
"""
Read a NEXRAD Level 2 Archive file.
Parameters
----------
filename : str
Filename of NEXRAD Level 2 Archive file. The files hosted by
at the NOAA National Climate Data Center [1]_ as well as on the
UCAR THREDDS Data Server [2]_ have been tested. Other NEXRAD
Level 2 Archive files may or may not work. Message type 1 file
and message type 31 files are supported.
field_names : dict, optional
Dictionary mapping NEXRAD moments to radar field names. If a
data type found in the file does not appear in this dictionary or has
a value of None it will not be placed in the radar.fields dictionary.
A value of None, the default, will use the mapping defined in the
metadata configuration file.
additional_metadata : dict of dicts, optional
Dictionary of dictionaries to retrieve metadata from during this read.
This metadata is not used during any successive file reads unless
explicitly included. A value of None, the default, will not
introduct any addition metadata and the file specific or default
metadata as specified by the metadata configuration file will be used.
file_field_names : bool, optional
True to use the NEXRAD field names for the field names. If this
case the field_names parameter is ignored. The field dictionary will
likely only have a 'data' key, unless the fields are defined in
`additional_metadata`.
exclude_fields : list or None, optional
List of fields to exclude from the radar object. This is applied
after the `file_field_names` and `field_names` parameters.
delay_field_loading : bool, optional
True to delay loading of field data from the file until the 'data'
key in a particular field dictionary is accessed. In this case
the field attribute of the returned Radar object will contain
LazyLoadDict objects not dict objects.
station : str or None, optional
Four letter ICAO name of the NEXRAD station used to determine the
location in the returned radar object. This parameter is only
used when the location is not contained in the file, which occur
in older NEXRAD message 1 files.
scans : list or None, optional
Read only specified scans from the file. None (the default) will read
all scans.
linear_interp : bool, optional
True (the default) to perform linear interpolation between valid pairs
of gates in low resolution rays in files mixed resolution rays.
False will perform a nearest neighbor interpolation. This parameter is
not used if the resolution of all rays in the file or requested sweeps
is constant.
Returns
-------
radar : Radar
Radar object containing all moments and sweeps/cuts in the volume.
Gates not collected are masked in the field data.
References
----------
.. [1] http://www.ncdc.noaa.gov/
.. [2] http://thredds.ucar.edu/thredds/catalog.html
"""
# test for non empty kwargs
_test_arguments(kwargs)
# create metadata retrieval object
filemetadata = FileMetadata(
"nexrad_archive",
field_names,
additional_metadata,
file_field_names,
exclude_fields,
)
# open the file and retrieve scan information
nfile = NEXRADLevel2File(prepare_for_read(filename))
scan_info = nfile.scan_info(scans)
# time
time = filemetadata("time")
time_start, _time = nfile.get_times(scans)
time["data"] = _time
time["units"] = make_time_unit_str(time_start)
# range
_range = filemetadata("range")
first_gate, gate_spacing, last_gate = _find_range_params(scan_info, filemetadata)
_range["data"] = np.arange(first_gate, last_gate, gate_spacing, "float32")
_range["meters_to_center_of_first_gate"] = float(first_gate)
_range["meters_between_gates"] = float(gate_spacing)
# metadata
metadata = filemetadata("metadata")
metadata["original_container"] = "NEXRAD Level II"
# scan_type
scan_type = "ppi"
# latitude, longitude, altitude
latitude = filemetadata("latitude")
longitude = filemetadata("longitude")
altitude = filemetadata("altitude")
if nfile._msg_type == "1" and station is not None:
lat, lon, alt = get_nexrad_location(station)
else:
lat, lon, alt = nfile.location()
latitude["data"] = np.array([lat], dtype="float64")
longitude["data"] = np.array([lon], dtype="float64")
altitude["data"] = np.array([alt], dtype="float64")
# sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index
# sweep_end_ray_index
sweep_number = filemetadata("sweep_number")
sweep_mode = filemetadata("sweep_mode")
sweep_start_ray_index = filemetadata("sweep_start_ray_index")
sweep_end_ray_index = filemetadata("sweep_end_ray_index")
if scans is None:
nsweeps = int(nfile.nscans)
else:
nsweeps = len(scans)
sweep_number["data"] = np.arange(nsweeps, dtype="int32")
sweep_mode["data"] = np.array(nsweeps * ["azimuth_surveillance"], dtype="S")
rays_per_scan = [s["nrays"] for s in scan_info]
sweep_end_ray_index["data"] = np.cumsum(rays_per_scan, dtype="int32") - 1
rays_per_scan.insert(0, 0)
sweep_start_ray_index["data"] = np.cumsum(rays_per_scan[:-1], dtype="int32")
# azimuth, elevation, fixed_angle
azimuth = filemetadata("azimuth")
elevation = filemetadata("elevation")
fixed_angle = filemetadata("fixed_angle")
azimuth["data"] = nfile.get_azimuth_angles(scans)
elevation["data"] = nfile.get_elevation_angles(scans).astype("float32")
fixed_angle["data"] = nfile.get_target_angles(scans)
# fields
max_ngates = len(_range["data"])
available_moments = set([m for scan in scan_info for m in scan["moments"]])
interpolate = _find_scans_to_interp(
scan_info, first_gate, gate_spacing, filemetadata
)
fields = {}
for moment in available_moments:
field_name = filemetadata.get_field_name(moment)
if field_name is None:
continue
dic = filemetadata(field_name)
dic["_FillValue"] = get_fillvalue()
if delay_field_loading and moment not in interpolate:
dic = LazyLoadDict(dic)
data_call = _NEXRADLevel2StagedField(nfile, moment, max_ngates, scans)
dic.set_lazy("data", data_call)
else:
mdata = nfile.get_data(moment, max_ngates, scans=scans)
if moment in interpolate:
interp_scans = interpolate[moment]
warnings.warn(
"Gate spacing is not constant, interpolating data in "
+ "scans %s for moment %s." % (interp_scans, moment),
UserWarning,
)
for scan in interp_scans:
idx = scan_info[scan]["moments"].index(moment)
moment_ngates = scan_info[scan]["ngates"][idx]
start = sweep_start_ray_index["data"][scan]
end = sweep_end_ray_index["data"][scan]
_interpolate_scan(mdata, start, end, moment_ngates, linear_interp)
dic["data"] = mdata
fields[field_name] = dic
# instrument_parameters
nyquist_velocity = filemetadata("nyquist_velocity")
unambiguous_range = filemetadata("unambiguous_range")
nyquist_velocity["data"] = nfile.get_nyquist_vel(scans).astype("float32")
unambiguous_range["data"] = nfile.get_unambigous_range(scans).astype("float32")
instrument_parameters = {
"unambiguous_range": unambiguous_range,
"nyquist_velocity": nyquist_velocity,
}
nfile.close()
return Radar(
time,
_range,
fields,
metadata,
scan_type,
latitude,
longitude,
altitude,
sweep_number,
sweep_mode,
fixed_angle,
sweep_start_ray_index,
sweep_end_ray_index,
azimuth,
elevation,
instrument_parameters=instrument_parameters,
)
def _find_range_params(scan_info, filemetadata):
"""Return range parameters, first_gate, gate_spacing, last_gate."""
min_first_gate = 999999
min_gate_spacing = 999999
max_last_gate = 0
for scan_params in scan_info:
ngates = scan_params["ngates"][0]
for i, moment in enumerate(scan_params["moments"]):
if filemetadata.get_field_name(moment) is None:
# moment is not read, skip
continue
first_gate = scan_params["first_gate"][i]
gate_spacing = scan_params["gate_spacing"][i]
last_gate = first_gate + gate_spacing * (ngates - 0.5)
min_first_gate = min(min_first_gate, first_gate)
min_gate_spacing = min(min_gate_spacing, gate_spacing)
max_last_gate = max(max_last_gate, last_gate)
return min_first_gate, min_gate_spacing, max_last_gate
def _find_scans_to_interp(scan_info, first_gate, gate_spacing, filemetadata):
"""Return a dict indicating what moments/scans need interpolation."""
moments = set([m for scan in scan_info for m in scan["moments"]])
interpolate = dict([(moment, []) for moment in moments])
for scan_num, scan in enumerate(scan_info):
for moment in moments:
if moment not in scan["moments"]:
continue
if filemetadata.get_field_name(moment) is None:
# moment is not read, skip
continue
index = scan["moments"].index(moment)
first = scan["first_gate"][index]
spacing = scan["gate_spacing"][index]
if first != first_gate or spacing != gate_spacing:
interpolate[moment].append(scan_num)
# for proper interpolation the gate spacing of the scan to be
# interpolated should be 1/4th the spacing of the radar
assert spacing == gate_spacing * 4
# and the first gate for the scan should be one and half times
# the radar spacing past the radar first gate
assert first_gate + 1.5 * gate_spacing == first
# remove moments with no scans needing interpolation
interpolate = dict([(k, v) for k, v in list(interpolate.items()) if len(v) != 0])
return interpolate
def _interpolate_scan(mdata, start, end, moment_ngates, linear_interp=True):
"""Interpolate a single NEXRAD moment scan from 1000 m to 250 m."""
# This interpolation scheme is only valid for NEXRAD data where a 4:1
# (1000 m : 250 m) interpolation is needed.
#
# The scheme here performs a linear interpolation between pairs of gates
# in a ray when the both of the gates are not masked (below threshold).
# When one of the gates is masked the interpolation changes to a nearest
# neighbor interpolation. Nearest neighbor is also performed at the end
# points until the new range bin would be centered beyond half of the range
# spacing of the original range.
#
# Nearest neighbor interpolation is performed when linear_interp is False,
# this is equivalent to repeating each gate four times in each ray.
#
# No transformation of the raw data is performed prior to interpolation, so
# reflectivity will be interpolated in dB units, velocity in m/s, etc,
# this may not be the best method for interpolation.
#
# This method was adapted from Radx
for ray_num in range(start, end + 1):
ray = mdata[ray_num].copy()
# repeat each gate value 4 times
interp_ngates = 4 * moment_ngates
ray[:interp_ngates] = np.repeat(ray[:moment_ngates], 4)
if linear_interp:
# linear interpolate
for i in range(2, interp_ngates - 4, 4):
gate_val = ray[i]
next_val = ray[i + 4]
if np.ma.is_masked(gate_val) or np.ma.is_masked(next_val):
continue
delta = (next_val - gate_val) / 4.0
ray[i + 0] = gate_val + delta * 0.5
ray[i + 1] = gate_val + delta * 1.5
ray[i + 2] = gate_val + delta * 2.5
ray[i + 3] = gate_val + delta * 3.5
mdata[ray_num] = ray[:]
class _NEXRADLevel2StagedField(object):
"""
A class to facilitate on demand loading of field data from a Level 2 file.
"""
def __init__(self, nfile, moment, max_ngates, scans):
"""initialize."""
self.nfile = nfile
self.moment = moment
self.max_ngates = max_ngates
self.scans = scans
def __call__(self):
"""Return the array containing the field data."""
return self.nfile.get_data(self.moment, self.max_ngates, scans=self.scans)
<|endoftext|> |
<|endoftext|>"""
pyart.io.uf
===========
Reading of Universal format (UF) files
.. autosummary::
:toctree: generated/
read_uf
_get_instrument_parameters
"""
import warnings
import numpy as np
from netCDF4 import date2num
from ..config import FileMetadata, get_fillvalue
from ..core.radar import Radar
from .common import make_time_unit_str, _test_arguments, prepare_for_read
from .uffile import UFFile
_LIGHT_SPEED = 2.99792458e8 # speed of light in meters per second
_UF_SWEEP_MODES = {
0: "calibration",
1: "ppi",
2: "coplane",
3: "rhi",
4: "vpt",
5: "target",
6: "manual",
7: "idle",
}
_SWEEP_MODE_STR = {
"calibration": "calibration",
"ppi": "azimuth_surveillance",
"coplane": "coplane",
"rhi": "rhi",
"vpt": "vertical_pointing",
"target": "pointing",
"manual": "manual",
"idle": "idle",
}
def read_uf(
filename,
field_names=None,
additional_metadata=None,
file_field_names=False,
exclude_fields=None,
delay_field_loading=False,
**kwargs
):
"""
Read a UF File.
Parameters
----------
filename : str or file-like
Name of Universal format file to read data from.
field_names : dict, optional
Dictionary mapping UF data type names to radar field names. If a
data type found in the file does not appear in this dictionary or has
a value of None it will not be placed in the radar.fields dictionary.
A value of None, the default, will use the mapping defined in the
Py-ART configuration file.
additional_metadata : dict of dicts, optional
Dictionary of dictionaries to retrieve metadata from during this read.
This metadata is not used during any successive file reads unless
explicitly included. A value of None, the default, will not
introduce any addition metadata and the file specific or default
metadata as specified by the Py-ART configuration file will be used.
file_field_names : bool, optional
True to force the use of the field names from the file in which
case the `field_names` parameter is ignored. False will use to
`field_names` parameter to rename fields.
exclude_fields : list or None, optional
List of fields to exclude from the radar object. This is applied
after the `file_field_names` and `field_names` parameters.
delay_field_loading : bool
This option is not implemented in the function but included for
compatibility.
Returns
-------
radar : Radar
Radar object.
"""
# test for non empty kwargs
_test_arguments(kwargs)
# create metadata retrieval object
filemetadata = FileMetadata(
"uf", field_names, additional_metadata, file_field_names, exclude_fields
)
# Open UF file and get handle
ufile = UFFile(prepare_for_read(filename))
first_ray = ufile.rays[0]
# time
dts = ufile.get_datetimes()
units = make_time_unit_str(min(dts))
time = filemetadata("time")
time["units"] = units
time["data"] = date2num(dts, units).astype("float32")
# range
_range = filemetadata("range")
# assume that the number of gates and spacing from the first ray is
# representative of the entire volume
field_header = first_ray.field_headers[0]
ngates = field_header["nbins"]
step = field_header["range_spacing_m"]
# this gives distances to the center of each gate, remove step/2 for start
start = (
field_header["range_start_km"] * 1000.0
+ field_header["range_start_m"]
+ step / 2.0
)
_range["data"] = np.arange(ngates, dtype="float32") * step + start
_range["meters_to_center_of_first_gate"] = start
_range["meters_between_gates"] = step
# latitude, longitude and altitude
latitude = filemetadata("latitude")
longitude = filemetadata("longitude")
altitude = filemetadata("altitude")
lat, lon, height = first_ray.get_location()
latitude["data"] = np.array([lat], dtype="float64")
longitude["data"] = np.array([lon], dtype="float64")
altitude["data"] = np.array([height], dtype="float64")
# metadata
metadata = filemetadata("metadata")
metadata["original_container"] = "UF"
metadata["site_name"] = first_ray.mandatory_header["site_name"]
metadata["radar_name"] = first_ray.mandatory_header["radar_name"]
# sweep_start_ray_index, sweep_end_ray_index
sweep_start_ray_index = filemetadata("sweep_start_ray_index")
sweep_end_ray_index = filemetadata("sweep_end_ray_index")
sweep_start_ray_index["data"] = ufile.first_ray_in_sweep
sweep_end_ray_index["data"] = ufile.last_ray_in_sweep
# sweep number
sweep_number = filemetadata("sweep_number")
sweep_number["data"] = np.arange(ufile.nsweeps, dtype="int32")
# sweep_type
scan_type = _UF_SWEEP_MODES[first_ray.mandatory_header["sweep_mode"]]
# sweep_mode
sweep_mode = filemetadata("sweep_mode")
sweep_mode["data"] = np.array(
ufile.nsweeps * [_SWEEP_MODE_STR[scan_type]], dtype="S"
)
# elevation
elevation = filemetadata("elevation")
elevation["data"] = ufile.get_elevations()
# azimuth
azimuth = filemetadata("azimuth")
azimuth["data"] = ufile.get_azimuths()
# fixed_angle
fixed_angle = filemetadata("fixed_angle")
fixed_angle["data"] = ufile.get_sweep_fixed_angles()
# fields
fields = {}
for uf_field_number, uf_field_dic in enumerate(first_ray.field_positions):
uf_field_name = uf_field_dic["data_type"].decode("ascii")
field_name = filemetadata.get_field_name(uf_field_name)
if field_name is None:
continue
field_dic = filemetadata(field_name)
field_dic["data"] = ufile.get_field_data(uf_field_number)
field_dic["_FillValue"] = get_fillvalue()
fields[field_name] = field_dic
# instrument_parameters
instrument_parameters = _get_instrument_parameters(ufile, filemetadata)
# scan rate
scan_rate = filemetadata("scan_rate")
scan_rate["data"] = ufile.get_sweep_rates()
ufile.close()
return Radar(
time,
_range,
fields,
metadata,
scan_type,
latitude,
longitude,
altitude,
sweep_number,
sweep_mode,
fixed_angle,
sweep_start_ray_index,
sweep_end_ray_index,
azimuth,
elevation,
scan_rate=scan_rate,
instrument_parameters=instrument_parameters,
)
def _get_instrument_parameters(ufile, filemetadata):
"""Return a dictionary containing instrument parameters."""
# pulse width
pulse_width = filemetadata("pulse_width")
pulse_width["data"] = ufile.get_pulse_widths() / _LIGHT_SPEED # m->sec
# assume that the parameters in the first ray represent the beam widths,
# bandwidth and frequency in the entire volume
first_ray = ufile.rays[0]
field_header = first_ray.field_headers[0]
beam_width_h = field_header["beam_width_h"] / 64.0
beam_width_v = field_header["beam_width_v"] / 64.0
bandwidth = field_header["bandwidth"] / 16.0 * 1.0e6
wavelength_cm = field_header["wavelength_cm"] / 64.0
if wavelength_cm == 0:
warnings.warn("Invalid wavelength, frequency set to default value.")
wavelength_hz = 9999.0
else:
wavelength_hz = _LIGHT_SPEED / (wavelength_cm / 100.0)
# radar_beam_width_h
radar_beam_width_h = filemetadata("radar_beam_width_h")
radar_beam_width_h["data"] = np.array([beam_width_h], dtype="float32")
# radar_beam_width_v
radar_beam_width_v = filemetadata("radar_beam_width_w")
radar_beam_width_v["data"] = np.array([beam_width_v], dtype="float32")
# radar_receiver_bandwidth
radar_receiver_bandwidth = filemetadata("radar_receiver_bandwidth")
radar_receiver_bandwidth["data"] = np.array([bandwidth], dtype="float32")
# polarization_mode
polarization_mode = filemetadata("polarization_mode")
polarization_mode["data"] = ufile.get_sweep_polarizations()
# frequency
frequency = filemetadata("frequency")
frequency["data"] = np.array([wavelength_hz], dtype="float32")
# prt
prt = filemetadata("prt")
prt["data"] = ufile.get_prts() / 1e6 # us->sec
instrument_parameters = {
"pulse_width": pulse_width,
"radar_beam_width_h": radar_beam_width_h,
"radar_beam_width_v": radar_beam_width_v,
"radar_receiver_bandwidth": radar_receiver_bandwidth,
"polarization_mode": polarization_mode,
"frequency": frequency,
"prt": prt,
}
# nyquist velocity if defined
nyquist_velocity = filemetadata("nyquist_velocity")
nyquist_velocity["data"] = ufile.get_nyquists()
if nyquist_velocity["data"] is not None:
instrument_parameters["nyquist_velocity"] = nyquist_velocity
return instrument_parameters
<|endoftext|> |
<|endoftext|>#! /usr/bin/env python
"""
Make a small netCDF CF/Radial file containing a single RHI scan.
Single field and scan is converted from sigmet file XSW110520113537.RAW7HHL
"""
import pyart
radar = pyart.io.read_rsl("XSW110520113537.RAW7HHL")
time_slice = slice(None, 713, 18)
range_slice = slice(None, None, 12)
sweep_slice = slice(None, 1)
# remove all but the reflectivity_horizontal fields
rf_field = radar.fields["reflectivity"]
rf_data = rf_field["data"]
rf_field["data"] = rf_data[time_slice, range_slice]
radar.fields = {"reflectivity_horizontal": rf_field}
radar.nsweeps = 1
radar.nray = 40
radar.ngates = 45
# truncate the range based variables
radar.range["data"] = radar.range["data"][range_slice]
# truncate the time based variables
radar.time["data"] = radar.time["data"][time_slice]
radar.azimuth["data"] = radar.azimuth["data"][time_slice]
radar.elevation["data"] = radar.elevation["data"][time_slice]
radar.instrument_parameters["prt"]["data"] = radar.instrument_parameters["prt"]["data"][
time_slice
]
radar.instrument_parameters["unambiguous_range"]["data"] = radar.instrument_parameters[
"unambiguous_range"
]["data"][time_slice]
radar.instrument_parameters["nyquist_velocity"]["data"] = radar.instrument_parameters[
"nyquist_velocity"
]["data"][time_slice]
# truncate the sweep based variables
radar.sweep_number["data"] = radar.sweep_number["data"][sweep_slice]
radar.fixed_angle["data"] = radar.fixed_angle["data"][sweep_slice]
radar.sweep_start_ray_index["data"] = radar.sweep_start_ray_index["data"][sweep_slice]
radar.sweep_end_ray_index["data"] = radar.sweep_end_ray_index["data"][sweep_slice]
radar.sweep_end_ray_index["data"][0] = 39
radar.sweep_mode["data"] = radar.sweep_mode["data"][sweep_slice]
radar.sweep_number["data"] = radar.sweep_number["data"][sweep_slice]
radar.instrument_parameters["prt_mode"]["data"] = radar.instrument_parameters[
"prt_mode"
]["data"][sweep_slice]
# adjust metadata
radar.metadata = {
"Conventions": "CF/Radial instrument_parameters",
"version": "1.2",
"title": "Py-ART Example RHI CF/Radial file",
"institution": (
"United States Department of Energy - Atmospheric "
"Radiation Measurement (ARM) program"
),
"references": "none",
"source": "ARM SGP XSAPR Radar",
"history": "created by jhelmus on evs348532 at 2013-05-22T12:34:56",
"comment": "none",
"instrument_name": "xsapr-sgp",
}
pyart.io.write_cfradial("example_cfradial_rhi.nc", radar)
<|endoftext|> |
<|endoftext|>"""
pyart.util.radar_utils
======================
Functions for working radar instances.
.. autosummary::
:toctree: generated/
is_vpt
to_vpt
join_radar
"""
import copy
import numpy as np
from netCDF4 import num2date, date2num
from . import datetime_utils
def is_vpt(radar, offset=0.5):
"""
Determine if a Radar appears to be a vertical pointing scan.
This function only verifies that the object is a vertical pointing scan,
use the :py:func:`to_vpt` function to convert the radar to a vpt scan
if this function returns True.
Parameters
----------
radar : Radar
Radar object to determine if
offset : float
Maximum offset of the elevation from 90 degrees to still consider
to be vertically pointing.
Returns
-------
flag : bool
True if the radar appear to be verticle pointing, False if not.
"""
# check that the elevation is within offset of 90 degrees.
elev = radar.elevation["data"]
return np.all((elev < 90.0 + offset) & (elev > 90.0 - offset))
def to_vpt(radar, single_scan=True):
"""
Convert an existing Radar object to represent a vertical pointing scan.
This function does not verify that the Radar object contains a vertical
pointing scan. To perform such a check use :py:func:`is_vpt`.
Parameters
----------
radar : Radar
Mislabeled vertical pointing scan Radar object to convert to be
properly labeled. This object is converted in place, no copy of
the existing data is made.
single_scan : bool, optional
True to convert the volume to a single scan, any azimuth angle data
is lost. False will convert the scan to contain the same number of
scans as rays, azimuth angles are retained.
"""
if single_scan:
nsweeps = 1
radar.azimuth["data"][:] = 0.0
seri = np.array([radar.nrays - 1], dtype="int32")
radar.sweep_end_ray_index["data"] = seri
else:
nsweeps = radar.nrays
# radar.azimuth not adjusted
radar.sweep_end_ray_index["data"] = np.arange(nsweeps, dtype="int32")
radar.scan_type = "vpt"
radar.nsweeps = nsweeps
radar.target_scan_rate = None # no scanning
radar.elevation["data"][:] = 90.0
radar.sweep_number["data"] = np.arange(nsweeps, dtype="int32")
radar.sweep_mode["data"] = np.array(["vertical_pointing"] * nsweeps)
radar.fixed_angle["data"] = np.ones(nsweeps, dtype="float32") * 90.0
radar.sweep_start_ray_index["data"] = np.arange(nsweeps, dtype="int32")
if radar.instrument_parameters is not None:
for key in ["prt_mode", "follow_mode", "polarization_mode"]:
if key in radar.instrument_parameters:
ip_dic = radar.instrument_parameters[key]
ip_dic["data"] = np.array([ip_dic["data"][0]] * nsweeps)
# Attributes that do not need any changes
# radar.altitude
# radar.altitude_agl
# radar.latitude
# radar.longitude
# radar.range
# radar.ngates
# radar.nrays
# radar.metadata
# radar.radar_calibration
# radar.time
# radar.fields
# radar.antenna_transition
# radar.scan_rate
return
def join_radar(radar1, radar2):
"""
Combine two radar instances into one.
Parameters
----------
radar1 : Radar
Radar object.
radar2 : Radar
Radar object.
"""
# must have same gate spacing
new_radar = copy.deepcopy(radar1)
new_radar.azimuth["data"] = np.append(
radar1.azimuth["data"], radar2.azimuth["data"]
)
new_radar.elevation["data"] = np.append(
radar1.elevation["data"], radar2.elevation["data"]
)
if len(radar1.range["data"]) >= len(radar2.range["data"]):
new_radar.range["data"] = radar1.range["data"]
else:
new_radar.range["data"] = radar2.range["data"]
# to combine times we need to reference them to a standard
# for this we'll use epoch time
estring = "seconds since 1970-01-01T00:00:00Z"
r1dt = num2date(radar1.time["data"], radar1.time["units"])
r2dt = num2date(radar2.time["data"], radar2.time["units"])
r1num = datetime_utils.datetimes_from_radar(radar1, epoch=True)
r2num = datetime_utils.datetimes_from_radar(radar2, epoch=True)
new_radar.time["data"] = np.append(r1num, r2num)
new_radar.time["units"] = datetime_utils.EPOCH_UNITS
for var in list(new_radar.fields.keys()):
sh1 = radar1.fields[var]["data"].shape
sh2 = radar2.fields[var]["data"].shape
new_field = np.ma.zeros([sh1[0] + sh2[0], max([sh1[1], sh2[1]])]) - 9999.0
new_field[0 : sh1[0], 0 : sh1[1]] = radar1.fields[var]["data"]
new_field[sh1[0] :, 0 : sh2[1]] = radar2.fields[var]["data"]
new_radar.fields[var]["data"] = new_field
# radar locations
# TODO moving platforms - any more?
if (
len(radar1.latitude["data"])
== 1 & len(radar2.latitude["data"])
== 1 & len(radar1.longitude["data"])
== 1 & len(radar2.longitude["data"])
== 1 & len(radar1.altitude["data"])
== 1 & len(radar2.altitude["data"])
== 1
):
lat1 = float(radar1.latitude["data"])
lon1 = float(radar1.longitude["data"])
alt1 = float(radar1.altitude["data"])
lat2 = float(radar2.latitude["data"])
lon2 = float(radar2.longitude["data"])
alt2 = float(radar2.altitude["data"])
if (lat1 != lat2) or (lon1 != lon2) or (alt1 != alt2):
ones1 = np.ones(len(radar1.time["data"]), dtype="float32")
ones2 = np.ones(len(radar2.time["data"]), dtype="float32")
new_radar.latitude["data"] = np.append(ones1 * lat1, ones2 * lat2)
new_radar.longitude["data"] = np.append(ones1 * lon1, ones2 * lon2)
new_radar.latitude["data"] = np.append(ones1 * alt1, ones2 * alt2)
else:
new_radar.latitude["data"] = radar1.latitude["data"]
new_radar.longitude["data"] = radar1.longitude["data"]
new_radar.altitude["data"] = radar1.altitude["data"]
else:
new_radar.latitude["data"] = np.append(
radar1.latitude["data"], radar2.latitude["data"]
)
new_radar.longitude["data"] = np.append(
radar1.longitude["data"], radar2.longitude["data"]
)
new_radar.altitude["data"] = np.append(
radar1.altitude["data"], radar2.altitude["data"]
)
return new_radar
<|endoftext|> |
<|endoftext|>"""
Default config for Workload Automation. DO NOT MODIFY this file. This file
gets copied to ~/.workload_automation/config.py on initial run of run_workloads.
Add your configuration to that file instead.
"""
# *** WARNING: ***
# Configuration listed in this file is NOT COMPLETE. This file sets the default
# configuration for WA and gives EXAMPLES of other configuration available. It
# is not supposed to be an exhaustive list.
# PLEASE REFER TO WA DOCUMENTATION FOR THE COMPLETE LIST OF AVAILABLE
# EXTENSIONS AND THEIR CONFIGURATION.
# This defines when the device will be rebooted during Workload Automation execution. #
# #
# Valid policies are: #
# never: The device will never be rebooted. #
# as_needed: The device will only be rebooted if the need arises (e.g. if it #
# becomes unresponsive #
# initial: The device will be rebooted when the execution first starts, just before executing #
# the first workload spec. #
# each_spec: The device will be rebooted before running a new workload spec. #
# each_iteration: The device will be rebooted before each new iteration. #
# #
reboot_policy = "as_needed"
# Defines the order in which the agenda spec will be executed. At the moment, #
# the following execution orders are supported: #
# #
# by_iteration: The first iteration of each workload spec is executed one ofter the other, #
# so all workloads are executed before proceeding on to the second iteration. #
# This is the default if no order is explicitly specified. #
# If multiple sections were specified, this will also split them up, so that specs #
# in the same section are further apart in the execution order. #
# by_section: Same as "by_iteration", but runn specs from the same section one after the other #
# by_spec: All iterations of the first spec are executed before moving on to the next #
# spec. This may also be specified as ``"classic"``, as this was the way #
# workloads were executed in earlier versions of WA. #
# random: Randomisizes the order in which specs run. #
execution_order = "by_iteration"
# This indicates when a job will be re-run.
# Possible values:
# OK: This iteration has completed and no errors have been detected
# PARTIAL: One or more instruments have failed (the iteration may still be running).
# FAILED: The workload itself has failed.
# ABORTED: The user interupted the workload
#
# If set to an empty list, a job will not be re-run ever.
retry_on_status = ["FAILED", "PARTIAL"]
# How many times a job will be re-run before giving up
max_retries = 3
####################################################################################################
######################################### Device Settings ##########################################
####################################################################################################
# Specify the device you want to run workload automation on. This must be a #
# string with the ID of the device. At the moment, only 'TC2' is supported. #
# #
device = "generic_android"
# Configuration options that will be passed onto the device. These are obviously device-specific, #
# so check the documentation for the particular device to find out which options and values are #
# valid. The settings listed below are common to all devices #
# #
device_config = dict(
# The name used by adb to identify the device. Use "adb devices" in bash to list
# the devices currently seen by adb.
# adb_name='10.109.173.2:5555',
# The directory on the device that WA will use to push files to
# working_directory='/sdcard/wa-working',
# This specifies the device's CPU cores. The order must match how they
# appear in cpufreq. The example below is for TC2.
# core_names = ['a7', 'a7', 'a7', 'a15', 'a15']
# Specifies cluster mapping for the device's cores.
# core_clusters = [0, 0, 0, 1, 1]
)
####################################################################################################
################################### Instrumention Configuration ####################################
####################################################################################################
# This defines the additionnal instrumentation that will be enabled during workload execution, #
# which in turn determines what additional data (such as /proc/interrupts content or Streamline #
# traces) will be available in the results directory. #
# #
instrumentation = [
# Records the time it took to run the workload
"execution_time",
# Collects /proc/interrupts before and after execution and does a diff.
"interrupts",
# Collects the contents of/sys/devices/system/cpu before and after execution and does a diff.
"cpufreq",
# Gets energy usage from the workload form HWMON devices
# NOTE: the hardware needs to have the right sensors in order for this to work
#'hwmon',
# Run perf in the background during workload execution and then collect the results. perf is a
# standard Linux performance analysis tool.
#'perf',
# Collect Streamline traces during workload execution. Streamline is part of DS-5
#'streamline',
# Collects traces by interacting with Ftrace Linux kernel internal tracer
#'trace-cmd',
# Obtains the power consumption of the target device's core measured by National Instruments
# Data Acquisition(DAQ) device.
#'daq',
# Collects CCI counter data.
#'cci_pmu_logger',
# Collects FPS (Frames Per Second) and related metrics (such as jank) from
# the View of the workload (Note: only a single View per workload is
# supported at the moment, so this is mainly useful for games).
#'fps',
]
####################################################################################################
################################# Result Processors Configuration ##################################
####################################################################################################
# Specifies how results will be processed and presented. #
# #
result_processors = [
# Creates a status.txt that provides a summary status for the run
"status",
# Creates a results.txt file for each iteration that lists all collected metrics
# in "name = value (units)" format
"standard",
# Creates a results.csv that contains metrics for all iterations of all workloads
# in the .csv format.
"csv",
# Creates a summary.csv that contains summary metrics for all iterations of all
# all in the .csv format. Summary metrics are defined on per-worklod basis
# are typically things like overall scores. The contents of summary.csv are
# always a subset of the contents of results.csv (if it is generated).
#'summary_csv',
# Creates a results.csv that contains metrics for all iterations of all workloads
# in the JSON format
#'json',
# Write results to an sqlite3 database. By default, a new database will be
# generated for each run, however it is possible to specify a path to an
# existing DB file (see result processor configuration below), in which
# case results from multiple runs may be stored in the one file.
#'sqlite',
]
####################################################################################################
################################### Logging output Configuration ###################################
####################################################################################################
# Specify the format of logging messages. The format uses the old formatting syntax: #
# #
# http://docs.python.org/2/library/stdtypes.html#string-formatting-operations #
# #
# The attributes that can be used in formats are listested here: #
# #
# http://docs.python.org/2/library/logging.html#logrecord-attributes #
# #
logging = {
# Log file format
"file format": "%(asctime)s %(levelname)-8s %(name)s: %(message)s",
# Verbose console output format
"verbose format": "%(asctime)s %(levelname)-8s %(name)s: %(message)s",
# Regular console output format
"regular format": "%(levelname)-8s %(message)s",
# Colouring the console output
"colour_enabled": True,
}
####################################################################################################
#################################### Instruments Configuration #####################################
####################################################################################################
# Instrumention Configuration is related to specific insturment's settings. Some of the #
# instrumentations require specific settings in order for them to work. These settings are #
# specified here. #
# Note that these settings only take effect if the corresponding instrument is
# enabled above.
####################################################################################################
######################################## perf configuration ########################################
# The hardware events such as instructions executed, cache-misses suffered, or branches
# mispredicted to be reported by perf. Events can be obtained from the device by tpying
# 'perf list'.
# perf_events = ['migrations', 'cs']
# The perf options which can be obtained from man page for perf-record
# perf_options = '-a -i'
####################################################################################################
####################################### hwmon configuration ########################################
# The kinds of sensors hwmon instrument will look for
# hwmon_sensors = ['energy', 'temp']
####################################################################################################
###################################### trace-cmd configuration #####################################
# trace-cmd events to be traced. The events can be found by rooting on the device then type
# 'trace-cmd list -e'
# trace_events = ['power*']
####################################################################################################
######################################### DAQ configuration ########################################
# The host address of the machine that runs the daq Server which the insturment communicates with
# daq_server_host = '10.1.17.56'
# The port number for daq Server in which daq insturment communicates with
# daq_server_port = 56788
# The values of resistors 1 and 2 (in Ohms) across which the voltages are measured
# daq_resistor_values = [0.002, 0.002]
####################################################################################################
################################### cci_pmu_logger configuration ###################################
# The events to be counted by PMU
# NOTE: The number of events must not exceed the number of counters available (which is 4 for CCI-400)
# cci_pmu_events = ['0x63', '0x83']
# The name of the events which will be used when reporting PMU counts
# cci_pmu_event_labels = ['event_0x63', 'event_0x83']
# The period (in jiffies) between counter reads
# cci_pmu_period = 15
####################################################################################################
################################### fps configuration ##############################################
# Data points below this FPS will dropped as not constituting "real" gameplay. The assumption
# being that while actually running, the FPS in the game will not drop below X frames per second,
# except on loading screens, menus, etc, which should not contribute to FPS calculation.
# fps_drop_threshold=5
# If set to True, this will keep the raw dumpsys output in the results directory (this is maily
# used for debugging). Note: frames.csv with collected frames data will always be generated
# regardless of this setting.
# fps_keep_raw=False
####################################################################################################
################################# Result Processor Configuration ###################################
####################################################################################################
# Specifies an alternative database to store results in. If the file does not
# exist, it will be created (the directiory of the file must exist however). If
# the file does exist, the results will be added to the existing data set (each
# run as a UUID, so results won't clash even if identical agendas were used).
# Note that in order for this to work, the version of the schema used to generate
# the DB file must match that of the schema used for the current run. Please
# see "What's new" secition in WA docs to check if the schema has changed in
# recent releases of WA.
# sqlite_database = '/work/results/myresults.sqlite'
# If the file specified by sqlite_database exists, setting this to True will
# cause that file to be overwritten rather than updated -- existing results in
# the file will be lost.
# sqlite_overwrite = False
# distribution: internal
####################################################################################################
#################################### Resource Getter configuration #################################
####################################################################################################
# The location on your system where /arm/scratch is mounted. Used by
# Scratch resource getter.
# scratch_mount_point = '/arm/scratch'
# end distribution
<|endoftext|> |
<|endoftext|># Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Original implementation by Rene de Jong. Updated by Sascha Bischoff.
import logging
from wlauto import LinuxDevice, Parameter
from wlauto.common.gem5.device import BaseGem5Device
from wlauto.utils import types
class Gem5LinuxDevice(BaseGem5Device, LinuxDevice):
"""
Implements gem5 Linux device.
This class allows a user to connect WA to a simulation using gem5. The
connection to the device is made using the telnet connection of the
simulator, and is used for all commands. The simulator does not have ADB
support, and therefore we need to fall back to using standard shell
commands.
Files are copied into the simulation using a VirtIO 9P device in gem5. Files
are copied out of the simulated environment using the m5 writefile command
within the simulated system.
When starting the workload run, the simulator is automatically started by
Workload Automation, and a connection to the simulator is established. WA
will then wait for Android to boot on the simulated system (which can take
hours), prior to executing any other commands on the device. It is also
possible to resume from a checkpoint when starting the simulation. To do
this, please append the relevant checkpoint commands from the gem5
simulation script to the gem5_discription argument in the agenda.
Host system requirements:
* VirtIO support. We rely on diod on the host system. This can be
installed on ubuntu using the following command:
sudo apt-get install diod
Guest requirements:
* VirtIO support. We rely on VirtIO to move files into the simulation.
Please make sure that the following are set in the kernel
configuration:
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_9P_FS=y
CONFIG_9P_FS_POSIX_ACL=y
CONFIG_9P_FS_SECURITY=y
CONFIG_VIRTIO_BLK=y
* m5 binary. Please make sure that the m5 binary is on the device and
can by found in the path.
"""
name = "gem5_linux"
platform = "linux"
parameters = [
Parameter("core_names", default=[], override=True),
Parameter("core_clusters", default=[], override=True),
Parameter(
"host",
default="localhost",
override=True,
description="Host name or IP address for the device.",
),
Parameter(
"login_prompt",
kind=types.list_of_strs,
default=["login:", "AEL login:", "username:"],
mandatory=False,
),
Parameter(
"login_password_prompt",
kind=types.list_of_strs,
default=["password:"],
mandatory=False,
),
]
# Overwritten from Device. For documentation, see corresponding method in
# Device.
def __init__(self, **kwargs):
self.logger = logging.getLogger("Gem5LinuxDevice")
LinuxDevice.__init__(self, **kwargs)
BaseGem5Device.__init__(self)
def login_to_device(self):
# Wait for the login prompt
prompt = self.login_prompt + [self.sckt.UNIQUE_PROMPT]
i = self.sckt.expect(prompt, timeout=10)
# Check if we are already at a prompt, or if we need to log in.
if i < len(prompt) - 1:
self.sckt.sendline("{}".format(self.username))
password_prompt = self.login_password_prompt + [
r"# ",
self.sckt.UNIQUE_PROMPT,
]
j = self.sckt.expect(password_prompt, timeout=self.delay)
if j < len(password_prompt) - 2:
self.sckt.sendline("{}".format(self.password))
self.sckt.expect([r"# ", self.sckt.UNIQUE_PROMPT], timeout=self.delay)
def capture_screen(self, filepath):
if BaseGem5Device.capture_screen(self, filepath):
return
# If we didn't manage to do the above, call the parent class.
self.logger.warning(
"capture_screen: falling back to parent class implementation"
)
LinuxDevice.capture_screen(self, filepath)
def initialize(self, context):
self.resize_shell()
self.deploy_m5(context, force=False)
<|endoftext|> |
<|endoftext|>"""Louie version information."""
NAME = "Louie"
DESCRIPTION = "Signal dispatching mechanism"
VERSION = "1.1"
<|endoftext|> |
<|endoftext|># Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=attribute-defined-outside-init
import os
import sqlite3
import json
import uuid
from datetime import datetime, timedelta
from contextlib import contextmanager
from wlauto import ResultProcessor, settings, Parameter
from wlauto.exceptions import ResultProcessorError
from wlauto.utils.types import boolean
# IMPORTANT: when updating this schema, make sure to bump the version!
SCHEMA_VERSION = "0.0.2"
SCHEMA = [
"""CREATE TABLE runs (
uuid text,
start_time datetime,
end_time datetime,
duration integer
)""",
"""CREATE TABLE workload_specs (
id text,
run_oid text,
number_of_iterations integer,
label text,
workload_name text,
boot_parameters text,
runtime_parameters text,
workload_parameters text
)""",
"""CREATE TABLE metrics (
spec_oid int,
iteration integer,
metric text,
value text,
units text,
lower_is_better integer
)""",
"""CREATE VIEW results AS
SELECT uuid as run_uuid, spec_id, label as workload, iteration, metric, value, units, lower_is_better
FROM metrics AS m INNER JOIN (
SELECT ws.OID as spec_oid, ws.id as spec_id, uuid, label
FROM workload_specs AS ws INNER JOIN runs AS r ON ws.run_oid = r.OID
) AS wsr ON wsr.spec_oid = m.spec_oid
""",
"""CREATE TABLE __meta (
schema_version text
)""",
"""INSERT INTO __meta VALUES ("{}")""".format(SCHEMA_VERSION),
]
sqlite3.register_adapter(datetime, lambda x: x.isoformat())
sqlite3.register_adapter(timedelta, lambda x: x.total_seconds())
sqlite3.register_adapter(uuid.UUID, str)
class SqliteResultProcessor(ResultProcessor):
name = "sqlite"
description = """
Stores results in an sqlite database.
This may be used accumulate results of multiple runs in a single file.
"""
name = "sqlite"
parameters = [
Parameter(
"database",
default=None,
global_alias="sqlite_database",
description=""" Full path to the sqlite database to be used. If this is not specified then
a new database file will be created in the output directory. This setting can be
used to accumulate results from multiple runs in a single database. If the
specified file does not exist, it will be created, however the directory of the
file must exist.
.. note:: The value must resolve to an absolute path,
relative paths are not allowed; however the
value may contain environment variables and/or
the home reference ~.
""",
),
Parameter(
"overwrite",
kind=boolean,
default=False,
global_alias="sqlite_overwrite",
description="""If ``True``, this will overwrite the database file
if it already exists. If ``False`` (the default) data
will be added to the existing file (provided schema
versions match -- otherwise an error will be raised).
""",
),
]
def initialize(self, context):
self._last_spec = None
self._run_oid = None
self._spec_oid = None
if not os.path.exists(self.database):
self._initdb()
elif self.overwrite: # pylint: disable=no-member
os.remove(self.database)
self._initdb()
else:
self._validate_schema_version()
self._update_run(context.run_info.uuid)
def process_iteration_result(self, result, context):
if self._last_spec != context.spec:
self._update_spec(context.spec)
metrics = [
(
self._spec_oid,
context.current_iteration,
m.name,
str(m.value),
m.units,
int(m.lower_is_better),
)
for m in result.metrics
]
with self._open_connecton() as conn:
conn.executemany("INSERT INTO metrics VALUES (?,?,?,?,?,?)", metrics)
def process_run_result(self, result, context):
info = context.run_info
with self._open_connecton() as conn:
conn.execute(
"""UPDATE runs SET start_time=?, end_time=?, duration=?
WHERE OID=?""",
(info.start_time, info.end_time, info.duration, self._run_oid),
)
def validate(self):
if not self.database: # pylint: disable=access-member-before-definition
self.database = os.path.join(settings.output_directory, "results.sqlite")
self.database = os.path.expandvars(os.path.expanduser(self.database))
def _initdb(self):
with self._open_connecton() as conn:
for command in SCHEMA:
conn.execute(command)
def _validate_schema_version(self):
with self._open_connecton() as conn:
try:
c = conn.execute("SELECT schema_version FROM __meta")
found_version = c.fetchone()[0]
except sqlite3.OperationalError:
message = (
"{} does not appear to be a valid WA results database.".format(
self.database
)
)
raise ResultProcessorError(message)
if found_version != SCHEMA_VERSION:
message = (
"Schema version in {} ({}) does not match current version ({})."
)
raise ResultProcessorError(
message.format(self.database, found_version, SCHEMA_VERSION)
)
def _update_run(self, run_uuid):
with self._open_connecton() as conn:
conn.execute("INSERT INTO runs (uuid) VALUES (?)", (run_uuid,))
conn.commit()
c = conn.execute("SELECT OID FROM runs WHERE uuid=?", (run_uuid,))
self._run_oid = c.fetchone()[0]
def _update_spec(self, spec):
self._last_spec = spec
spec_tuple = (
spec.id,
self._run_oid,
spec.number_of_iterations,
spec.label,
spec.workload_name,
json.dumps(spec.boot_parameters),
json.dumps(spec.runtime_parameters),
json.dumps(spec.workload_parameters),
)
with self._open_connecton() as conn:
conn.execute(
"INSERT INTO workload_specs VALUES (?,?,?,?,?,?,?,?)", spec_tuple
)
conn.commit()
c = conn.execute(
"SELECT OID FROM workload_specs WHERE run_oid=? AND id=?",
(self._run_oid, spec.id),
)
self._spec_oid = c.fetchone()[0]
@contextmanager
def _open_connecton(self):
conn = sqlite3.connect(self.database)
try:
yield conn
finally:
conn.commit()
<|endoftext|> |
<|endoftext|># Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains utilities for implemening device hard reset
using Netio 230 series power switches. This utilizes the KSHELL connection.
"""
import telnetlib
import socket
import re
import time
import logging
logger = logging.getLogger("NetIO")
class NetioError(Exception):
pass
class KshellConnection(object):
response_regex = re.compile(r"^(\d+) (.*?)\r\n")
delay = 0.5
def __init__(self, host="ippowerbar", port=1234, timeout=None):
"""Parameters are passed into ``telnetlib.Telnet`` -- see Python docs."""
self.host = host
self.port = port
self.conn = telnetlib.Telnet(host, port, timeout)
time.sleep(self.delay) # give time to respond
output = self.conn.read_very_eager()
if "HELLO" not in output:
raise NetioError(
"Could not connect: did not see a HELLO. Got: {}".format(output)
)
def login(self, user, password):
code, out = self.send_command("login {} {}\r\n".format(user, password))
if code != 250:
raise NetioError("Login failed. Got: {} {}".format(code, out))
def enable_port(self, port):
"""Enable the power supply at the specified port."""
self.set_port(port, 1)
def disable_port(self, port):
"""Enable the power supply at the specified port."""
self.set_port(port, 0)
def set_port(self, port, value):
code, out = self.send_command("port {} {}".format(port, value))
if code != 250:
raise NetioError(
"Could not set {} on port {}. Got: {} {}".format(value, port, code, out)
)
def send_command(self, command):
try:
if command.startswith("login"):
parts = command.split()
parts[2] = "*" * len(parts[2])
logger.debug(" ".join(parts))
else:
logger.debug(command)
self.conn.write("{}\n".format(command))
time.sleep(self.delay) # give time to respond
out = self.conn.read_very_eager()
match = self.response_regex.search(out)
if not match:
raise NetioError("Invalid response: {}".format(out.strip()))
logger.debug("response: {} {}".format(match.group(1), match.group(2)))
return int(match.group(1)), match.group(2)
except socket.error as err:
try:
time.sleep(self.delay) # give time to respond
out = self.conn.read_very_eager()
if out.startswith("130 CONNECTION TIMEOUT"):
raise NetioError("130 Timed out.")
except EOFError:
pass
raise err
def close(self):
self.conn.close()
<|endoftext|> |
<|endoftext|># Copyright 2012-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=no-member
# pylint: disable=attribute-defined-outside-init
import os
import time
from wlauto import settings, Workload, Executable, Parameter
from wlauto.exceptions import ConfigError, WorkloadError
from wlauto.utils.types import boolean
TXT_RESULT_NAME = "cyclictest_result.txt"
RESULT_INTERPRETATION = {
"T": "Thread",
"P": "Priority",
"C": "Clock",
}
class Cyclictest(Workload):
name = "cyclictest"
description = """
Measures the amount of time that passes between when a timer expires and
when the thread which set the timer actually runs.
Cyclic test works by taking a time snapshot just prior to waiting for a specific
time interval (t1), then taking another time snapshot after the timer
finishes (t2), then comparing the theoretical wakeup time with the actual
wakeup time (t2 -(t1 + sleep_time)). This value is the latency for that
timers wakeup.
"""
parameters = [
Parameter(
"clock",
allowed_values=["monotonic", "realtime"],
default="realtime",
description=("specify the clock to be used during the test."),
),
Parameter(
"duration",
kind=int,
default=30,
description=("Specify the length for the test to run in seconds."),
),
Parameter(
"quiet",
kind=boolean,
default=True,
description=("Run the tests quiet and print only a summary on exit."),
),
Parameter(
"thread",
kind=int,
default=8,
description=("Set the number of test threads"),
),
Parameter(
"latency",
kind=int,
default=1000000,
description=("Write the value to /dev/cpu_dma_latency"),
),
Parameter(
"extra_parameters",
kind=str,
default="",
description=(
"Any additional command line parameters to append to the "
"existing parameters above. A list can be found at "
"https://rt.wiki.kernel.org/index.php/Cyclictest or "
"in the help page ``cyclictest -h``"
),
),
Parameter(
"clear_file_cache",
kind=boolean,
default=True,
description=("Clear file caches before starting test"),
),
Parameter(
"screen_off",
kind=boolean,
default=True,
description=(
"If true it will turn the screen off so that onscreen "
"graphics do not effect the score. This is predominantly "
"for devices without a GPU"
),
),
]
def setup(self, context):
self.cyclictest_on_device = "cyclictest"
self.cyclictest_result = os.path.join(
self.device.working_directory, TXT_RESULT_NAME
)
self.cyclictest_command = (
"{} --clock={} --duration={}s --thread={} --latency={} {} {} > {}"
)
self.device_binary = None
if not self.device.is_rooted:
raise WorkloadError(
"This workload requires a device with root premissions to run"
)
host_binary = context.resolver.get(
Executable(self, self.device.abi, "cyclictest")
)
self.device_binary = self.device.install(host_binary)
self.cyclictest_command = self.cyclictest_command.format(
self.device_binary,
0 if self.clock == "monotonic" else 1,
self.duration,
self.thread,
self.latency,
"--quiet" if self.quiet else "",
self.extra_parameters,
self.cyclictest_result,
)
if self.clear_file_cache:
self.device.execute("sync")
self.device.set_sysfile_value("/proc/sys/vm/drop_caches", 3)
if self.device.platform == "android":
if self.screen_off and self.device.is_screen_on:
self.device.execute("input keyevent 26")
def run(self, context):
self.device.execute(self.cyclictest_command, self.duration * 2, as_root=True)
def update_result(self, context):
self.device.pull_file(self.cyclictest_result, context.output_directory)
# Parsing the output
# Standard Cyclictest Output:
# T: 0 (31974) P:95 I:1000 C:4990 Min:9 Act:37 Avg:31 Max:59
with open(os.path.join(context.output_directory, TXT_RESULT_NAME)) as f:
for line in f:
if line.find("C:") is not -1:
# Key = T: 0 (31974) P:95 I:1000
# Remaing = 49990 Min:9 Act:37 Avg:31 Max:59
# sperator = C:
(key, sperator, remaing) = line.partition("C:")
index = key.find("T")
key = key.replace(key[index], RESULT_INTERPRETATION["T"])
index = key.find("P")
key = key.replace(key[index], RESULT_INTERPRETATION["P"])
index = sperator.find("C")
sperator = sperator.replace(
sperator[index], RESULT_INTERPRETATION["C"]
)
metrics = (sperator + remaing).split()
# metrics is now in the from of ['Min:', '9', 'Act:', '37', 'Avg:', '31' , 'Max', '59']
for i in range(0, len(metrics), 2):
full_key = key + " " + metrics[i][:-1]
value = int(metrics[i + 1])
context.result.add_metric(full_key, value, "microseconds")
def teardown(self, context):
if self.device.platform == "android":
if self.screen_off:
self.device.ensure_screen_is_on()
self.device.execute("rm -f {}".format(self.cyclictest_result))
<|endoftext|> |
<|endoftext|># Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101,W0201
import os
import re
from collections import defaultdict
from wlauto import Workload, Parameter, File
from wlauto.utils.types import caseless_string
from wlauto.exceptions import WorkloadError
class Recentfling(Workload):
name = "recentfling"
description = """
Tests UI jank on android devices.
For this workload to work, ``recentfling.sh`` and ``defs.sh`` must be placed
in ``~/.workload_automation/dependencies/recentfling/``. These can be found
in the [AOSP Git repository](https://android.googlesource.com/platform/system/extras/+/master/tests/).
To change the apps that are opened at the start of the workload you will need
to modify the ``defs.sh`` file. You will need to add your app to ``dfltAppList``
and then add a variable called ``{app_name}Activity`` with the name of the
activity to launch (where ``{add_name}`` is the name you put into ``dfltAppList``).
You can get a list of activities available on your device by running
``adb shell pm list packages -f``
"""
supported_platforms = ["android"]
parameters = [
Parameter(
"loops", kind=int, default=3, description="The number of test iterations."
),
]
def initialise(self, context): # pylint: disable=no-self-use
if context.device.get_sdk_version() < 23:
raise WorkloadError(
"This workload relies on ``dumpsys gfxinfo`` \
only present in Android M and onwards"
)
def setup(self, context):
self.defs_host = context.resolver.get(File(self, "defs.sh"))
self.recentfling_host = context.resolver.get(File(self, "recentfling.sh"))
self.device.push_file(self.recentfling_host, self.device.working_directory)
self.device.push_file(self.defs_host, self.device.working_directory)
self._kill_recentfling()
self.device.ensure_screen_is_on()
def run(self, context):
cmd = "echo $$>{dir}/pidfile; exec {dir}/recentfling.sh -i {}; rm {dir}/pidfile"
cmd = cmd.format(self.loops, dir=self.device.working_directory)
try:
self.output = self.device.execute(cmd, timeout=120)
except KeyboardInterrupt:
self._kill_recentfling()
raise
def update_result(self, context):
group_names = [
"90th Percentile",
"95th Percentile",
"99th Percentile",
"Jank",
"Jank%",
]
count = 0
for line in self.output.strip().splitlines():
p = re.compile(
"Frames: \d+ latency: (?P<pct90>\d+)/(?P<pct95>\d+)/(?P<pct99>\d+) Janks: (?P<jank>\d+)\((?P<jank_pct>\d+)%\)"
)
match = p.search(line)
if match:
count += 1
if line.startswith("AVE: "):
group_names = ["Average " + g for g in group_names]
count = 0
for metric in zip(group_names, match.groups()):
context.result.add_metric(
metric[0],
metric[1],
None,
classifiers={"loop": count or "Average"},
)
def teardown(self, context):
self.device.delete_file(
self.device.path.join(self.device.working_directory, "recentfling.sh")
)
self.device.delete_file(
self.device.path.join(self.device.working_directory, "defs.sh")
)
def _kill_recentfling(self):
pid = self.device.execute(
"cat {}/pidfile".format(self.device.working_directory)
)
if pid:
self.device.kill(pid.strip(), signal="SIGKILL")
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from time import time
class HtrunLogger(object):
"""! Yet another logger flavour"""
def __init__(self, prn_lock, name):
self.__prn_lock = prn_lock
self.__name = name
def __prn_func(self, text, nl=True):
"""! Prints and flushes data to stdout"""
with self.__prn_lock:
if nl and not text.endswith("\n"):
text += "\n"
sys.stdout.write(text)
sys.stdout.flush()
def __prn_log_human(self, level, text, timestamp=None):
if not timestamp:
timestamp = time()
timestamp_str = strftime("%y-%m-%d %H:%M:%S", gmtime(timestamp))
frac, whole = modf(timestamp)
s = "[%s.%d][%s][%s] %s" % (timestamp_str, frac, self.__name, level, text)
self.__prn_func(s, nl=True)
def __prn_log(self, level, text, timestamp=None):
if not timestamp:
timestamp = time()
s = "[%.2f][%s][%s] %s" % (timestamp, self.__name, level, text)
self.__prn_func(s, nl=True)
def prn_dbg(self, text, timestamp=None):
self.__prn_log("DBG", text, timestamp)
def prn_wrn(self, text, timestamp=None):
self.__prn_log("WRN", text, timestamp)
def prn_err(self, text, timestamp=None):
self.__prn_log("ERR", text, timestamp)
def prn_inf(self, text, timestamp=None):
self.__prn_log("INF", text, timestamp)
def prn_txt(self, text, timestamp=None):
self.__prn_log("TXT", text, timestamp)
def prn_txd(self, text, timestamp=None):
self.__prn_log("TXD", text, timestamp)
def prn_rxd(self, text, timestamp=None):
self.__prn_log("RXD", text, timestamp)
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_host_tests import is_host_test
from mbed_host_tests import get_host_test
from mbed_host_tests import get_plugin_caps
from mbed_host_tests import get_host_test_list
class BasicHostTestsTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_get_host_test(self):
self.assertNotEqual(None, get_host_test("default"))
self.assertNotEqual(None, get_host_test("default_auto"))
def test_basic_is_host_test(self):
self.assertFalse(is_host_test(""))
self.assertFalse(is_host_test(None))
self.assertTrue(is_host_test("default"))
self.assertTrue(is_host_test("default_auto"))
def test_get_host_test_list(self):
d = get_host_test_list()
self.assertIs(type(d), dict)
self.assertIn("default", d)
self.assertIn("default_auto", d)
def test_get_plugin_caps(self):
d = get_plugin_caps()
self.assertIs(type(d), dict)
if __name__ == "__main__":
unittest.main()
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
# small RNA oriented bowtie wrapper
# version 1.5 17-7-2014: arg parser implementation
# Usage sRbowtie.py <1 input_fasta_file> <2 alignment method> <3 -v mismatches> <4 out_type> <5 buildIndexIfHistory> <6 fasta/bowtie index> <7 bowtie output> <8 ali_fasta> <9 unali_fasta> <10 --num-threads \${GALAXY_SLOTS:-4}>
# current rev: for bowtie __norc, move from --supress 2,6,7,8 to --supress 6,7,8. Future Parser must be updated to take into account this standardisation
# Christophe Antoniewski <drosofff@gmail.com>
import sys
import os
import subprocess
import tempfile
import shutil
import argparse
def Parser():
the_parser = argparse.ArgumentParser(
description="bowtie wrapper for small fasta reads"
)
the_parser.add_argument("--input", action="store", type=str, help="input file")
the_parser.add_argument(
"--input-format",
dest="input_format",
action="store",
type=str,
help="fasta or fastq",
)
the_parser.add_argument(
"--method",
action="store",
type=str,
help="RNA, unique, multiple, k_option, n_option, a_option",
)
the_parser.add_argument(
"--v-mismatches",
dest="v_mismatches",
action="store",
type=str,
help="number of mismatches allowed for the alignments",
)
the_parser.add_argument(
"--output-format",
dest="output_format",
action="store",
type=str,
help="tabular, sam, bam",
)
the_parser.add_argument(
"--output", action="store", type=str, help="output file path"
)
the_parser.add_argument(
"--index-from",
dest="index_from",
action="store",
type=str,
help="indexed or history",
)
the_parser.add_argument(
"--index-source",
dest="index_source",
action="store",
type=str,
help="file path to the index source",
)
the_parser.add_argument(
"--aligned", action="store", type=str, help="aligned read file path, maybe None"
)
the_parser.add_argument(
"--unaligned",
action="store",
type=str,
help="unaligned read file path, maybe None",
)
the_parser.add_argument(
"--num-threads",
dest="num_threads",
action="store",
type=str,
help="number of bowtie threads",
)
args = the_parser.parse_args()
return args
def stop_err(msg):
sys.stderr.write("%s\n" % msg)
sys.exit()
def bowtieCommandLiner(
alignment_method="RNA",
v_mis="1",
out_type="tabular",
aligned="None",
unaligned="None",
input_format="fasta",
input="path",
index="path",
output="path",
pslots="4",
):
if input_format == "fasta":
input_format = "-f"
elif (input_format == "fastq") or (input_format == "fastqsanger"):
input_format = "-q"
else:
raise Exception("input format must be one of fasta or fastq")
if alignment_method == "RNA":
x = "-v %s -M 1 --best --strata -p %s --norc --suppress 6,7,8" % (v_mis, pslots)
elif alignment_method == "unique":
x = "-v %s -m 1 -p %s --suppress 6,7,8" % (v_mis, pslots)
elif alignment_method == "multiple":
x = "-v %s -M 1 --best --strata -p %s --suppress 6,7,8" % (v_mis, pslots)
elif alignment_method == "k_option":
x = "-v %s -k 1 --best -p %s --suppress 6,7,8" % (v_mis, pslots)
elif alignment_method == "n_option":
x = "-n %s -M 1 --best -p %s --suppress 6,7,8" % (v_mis, pslots)
elif alignment_method == "a_option":
x = "-v %s -a --best -p %s --suppress 6,7,8" % (v_mis, pslots)
if aligned == "None" and unaligned == "None":
fasta_command = ""
elif aligned != "None" and unaligned == "None":
fasta_command = " --al %s" % aligned
elif aligned == "None" and unaligned != "None":
fasta_command = " --un %s" % unaligned
else:
fasta_command = " --al %s --un %s" % (aligned, unaligned)
x = x + fasta_command
if out_type == "tabular":
return "bowtie %s %s %s %s > %s" % (x, index, input_format, input, output)
elif out_type == "sam":
return "bowtie %s -S %s %s %s > %s" % (x, index, input_format, input, output)
elif out_type == "bam":
return "bowtie %s -S %s %s %s |samtools view -bS - > %s" % (
x,
index,
input_format,
input,
output,
)
def bowtie_squash(fasta):
# make temp directory for bowtie indexes
tmp_index_dir = tempfile.mkdtemp()
ref_file = tempfile.NamedTemporaryFile(dir=tmp_index_dir)
ref_file_name = ref_file.name
# by default, delete the temporary file, but ref_file.name is now stored
# in ref_file_name
ref_file.close()
# symlink between the fasta source file and the deleted ref_file name
os.symlink(fasta, ref_file_name)
# bowtie command line, which will work after changing dir
# (cwd=tmp_index_dir)
cmd1 = "bowtie-build -f %s %s" % (ref_file_name, ref_file_name)
try:
FNULL = open(os.devnull, "w")
# a path string for a temp file in tmp_index_dir. Just a string
tmp = tempfile.NamedTemporaryFile(dir=tmp_index_dir).name
# creates and open a file handler pointing to the temp file
tmp_stderr = open(tmp, "wb")
# both stderr and stdout of bowtie-build are redirected in dev/null
proc = subprocess.Popen(
args=cmd1, shell=True, cwd=tmp_index_dir, stderr=FNULL, stdout=FNULL
)
returncode = proc.wait()
tmp_stderr.close()
FNULL.close()
sys.stdout.write(cmd1 + "\n")
except Exception as e:
# clean up temp dir
if os.path.exists(tmp_index_dir):
shutil.rmtree(tmp_index_dir)
stop_err("Error indexing reference sequence\n" + str(e))
# no Cleaning if no Exception, tmp_index_dir has to be cleaned after
# bowtie_alignment()
# bowtie fashion path without extention
index_full_path = os.path.join(tmp_index_dir, ref_file_name)
return tmp_index_dir, index_full_path
def bowtie_alignment(command_line, flyPreIndexed=""):
# make temp directory just for stderr
tmp_index_dir = tempfile.mkdtemp()
tmp = tempfile.NamedTemporaryFile(dir=tmp_index_dir).name
tmp_stderr = open(tmp, "wb")
# conditional statement for sorted bam generation viewable in Trackster
if "samtools" in command_line:
# recover the final output file name
target_file = command_line.split()[-1]
path_to_unsortedBam = os.path.join(tmp_index_dir, "unsorted.bam")
path_to_sortedBam = os.path.join(tmp_index_dir, "unsorted.bam.sorted")
first_command_line = (
" ".join(command_line.split()[:-3]) + " -o " + path_to_unsortedBam + " - "
)
# example: bowtie -v 0 -M 1 --best --strata -p 12 --suppress 6,7,8 -S
# /home/galaxy/galaxy-dist/bowtie/Dmel/dmel-all-chromosome-r5.49 -f
# /home/galaxy/galaxy-dist/database/files/003/dataset_3460.dat
# |samtools view -bS -o /tmp/tmp_PgMT0/unsorted.bam -
# generates an "unsorted.bam.sorted.bam file", NOT an
# "unsorted.bam.sorted" file
second_command_line = "samtools sort %s %s" % (
path_to_unsortedBam,
path_to_sortedBam,
)
# fileno() method return the file descriptor number of tmp_stderr
p = subprocess.Popen(
args=first_command_line,
cwd=tmp_index_dir,
shell=True,
stderr=tmp_stderr.fileno(),
)
returncode = p.wait()
sys.stdout.write("%s\n" % first_command_line + str(returncode))
p = subprocess.Popen(
args=second_command_line,
cwd=tmp_index_dir,
shell=True,
stderr=tmp_stderr.fileno(),
)
returncode = p.wait()
sys.stdout.write("\n%s\n" % second_command_line + str(returncode))
if os.path.isfile(path_to_sortedBam + ".bam"):
shutil.copy2(path_to_sortedBam + ".bam", target_file)
else:
p = subprocess.Popen(args=command_line, shell=True, stderr=tmp_stderr.fileno())
returncode = p.wait()
sys.stdout.write(command_line + "\n")
tmp_stderr.close()
# cleaning if the index was created in the fly
if os.path.exists(flyPreIndexed):
shutil.rmtree(flyPreIndexed)
# cleaning tmp files and directories
if os.path.exists(tmp_index_dir):
shutil.rmtree(tmp_index_dir)
return
def __main__():
args = Parser()
F = open(args.output, "w")
if args.index_from == "history":
tmp_dir, index_path = bowtie_squash(args.index_source)
else:
tmp_dir, index_path = "dummy/dymmy", args.index_source
command_line = bowtieCommandLiner(
args.method,
args.v_mismatches,
args.output_format,
args.aligned,
args.unaligned,
args.input_format,
args.input,
index_path,
args.output,
args.num_threads,
)
bowtie_alignment(command_line, flyPreIndexed=tmp_dir)
F.close()
if __name__ == "__main__":
__main__()
<|endoftext|> |
<|endoftext|>#!/usr/bin/python
#
import sys
input = open(sys.argv[1], "r")
output = open(sys.argv[2], "w")
for line in input:
if line[0] == ">":
print("@HTW-" + line[1:-1], file=output)
continue
else:
print(line[:-1], file=output)
print("+", file=output)
print("H" * len(line[:-1]), file=output)
input.close()
output.close()
<|endoftext|> |
<|endoftext|>"""
Verbose demonstration of how to set up a server and run a remote game.
For all practical needs, using the simplesetup module should be sufficient.
"""
import sys
import subprocess
from pelita.simplesetup import SimpleServer, SimplePublisher, SimpleController
import logging
from pelita.ui.tk_viewer import TkViewer
try:
import colorama
MAGENTA = colorama.Fore.MAGENTA
RESET = colorama.Fore.RESET
except ImportError:
MAGENTA = ""
RESET = ""
def get_python_process():
py_proc = sys.executable
if not py_proc:
raise RuntimeError("Cannot retrieve current Python executable.")
return py_proc
FORMAT = (
"[%(asctime)s,%(msecs)03d][%(name)s][%(levelname)s][%(funcName)s]"
+ MAGENTA
+ " %(message)s"
+ RESET
)
logging.basicConfig(format=FORMAT, datefmt="%H:%M:%S", level=logging.INFO)
layout = """ ##################
#0#. . 2# . 3 #
# ##### ##### #
# . # . .#1#
################## """
server = SimpleServer(
layout_string=layout, rounds=200, bind_addrs=("tcp://*:50007", "tcp://*:50008")
)
publisher = SimplePublisher("tcp://*:50012")
server.game_master.register_viewer(publisher)
subscribe_sock = server
tk_open = "TkViewer(%r, %r).run()" % ("tcp://localhost:50012", "tcp://localhost:50013")
tkprocess = subprocess.Popen(
[get_python_process(), "-c", "from pelita.ui.tk_viewer import TkViewer\n" + tk_open]
)
try:
print((server.bind_addresses))
server.register_teams()
controller = SimpleController(server.game_master, "tcp://*:50013")
controller.run()
server.exit_teams()
except KeyboardInterrupt:
tkprocess.kill()
<|endoftext|> |
<|endoftext|># Main entry point for the plugin.
# Author: Yuri van Geffen
import sublime, sublime_plugin
import os
import threading
import queue
import asyncore
import socket
from itertools import chain
import re
settings = sublime.load_settings("subdebug")
TCP_IP = "127.0.0.1"
TCP_PORT = 8172
BUFFER_SIZE = 1024
BASEDIR = settings.get("basedir", "")
STEP_ON_CONNECT = settings.get("step_on_connect", False)
# Handles incoming and outgoing messages for the MobDebug client
class SubDebugHandler(asyncore.dispatcher):
def __init__(self, socket, handler_id):
asyncore.dispatcher.__init__(self, socket)
self.handler_id = handler_id
msg_queue.put(b"STEP\n" if STEP_ON_CONNECT else b"RUN\n")
for view_name, row in state_handler.breakpoints():
msg_queue.put("SETB {0} {1}\n".format(view_name, row).encode("latin-1"))
# Reads the message-code of incomming messages and passes
# them to the right function
def handle_read(self):
data = self.recv(BUFFER_SIZE)
if data:
print((self.handler_id, "Received: ", data))
split = data.split()
if split[0] in message_parsers:
message_parsers[split[0]](split)
def handle_write(self):
if not msg_queue.empty():
msg = msg_queue.get()
print(("Sending: ", msg))
self.send(msg)
def handle_error(self):
raise
# Starts listening on TCP_PORT and accepts incoming connections
# before passing them to an instance of SubDebugHandler
class SubDebugServer(asyncore.dispatcher):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.handler_id = 0
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(1)
print(("Started listening on: ", host, ":", port))
def handle_accept(self):
pair = self.accept()
if pair is not None:
(conn_sock, client_address) = pair
print(("Incoming connection: ", client_address))
SubDebugHandler(conn_sock, ++self.handler_id)
def handle_close(self):
print("Closing server.")
self.close()
def handle_error(self):
self.close()
# Lets the user run the script (until breakpoint)
class RunCommand(sublime_plugin.WindowCommand):
def run(self):
print("Running until breakpoint...")
msg_queue.put(b"RUN\n")
state_handler.remove_line_marker()
# Lets the user step to the next line
class StepCommand(sublime_plugin.WindowCommand):
def run(self):
print("Stepping to next line...")
msg_queue.put(b"STEP\n")
# Lets the user step to the next line
class ToggleBreakpointCommand(sublime_plugin.TextCommand):
def run(self, edit):
view_name = simplify_path(self.view.file_name())
row, _ = self.view.rowcol(self.view.sel()[0].begin())
print(("Toggling breakpoint:", view_name, row))
state_handler.toggle_breakpoint(view_name, row + 1)
# Lets the user pick a base directory from where the lua is executed
class SetBasedirCommand(sublime_plugin.WindowCommand):
def run(self):
# Ran if the user want to choose their own base directory
def choose_other(path):
global BASEDIR
BASEDIR = path.replace("\\", "/")
if BASEDIR[-1] != "/":
BASEDIR += "/"
print(("BASEDIR:", BASEDIR))
# Ran if the user has chosen a base directory option
def selected_folder(index):
global BASEDIR
if index != -1: # The last option lets the user choose a base dir themself
if index == len(folders) - 1:
sublime.active_window().show_input_panel(
"Give the base directory path.",
BASEDIR,
choose_other,
None,
None,
)
else:
BASEDIR = folders[index] + "/"
state_handler.clear_state()
print(("BASEDIR:", BASEDIR))
folders = list(chain.from_iterable([w.folders() for w in sublime.windows()]))
folders = [f.replace("\\", "/") for f in folders]
folders.insert(len(folders), "Choose other directory...")
sublime.active_window().show_quick_panel(folders, selected_folder)
# Lets the user step to the next line
class ToggleStepOnConnectCommand(sublime_plugin.WindowCommand):
def run(self):
global STEP_ON_CONNECT
STEP_ON_CONNECT = not STEP_ON_CONNECT
print(("Step on connect:", STEP_ON_CONNECT))
def is_checked(self):
return STEP_ON_CONNECT or False
# =========Incomming message parsers=========#
# Called when the "202 Paused" message is received
def paused_command(args):
state_handler.set_line_marker(args[2].decode("utf-8"), int(args[3]))
# Mapping from incomming messages to the functions that parse them
message_parsers = {
b"202": paused_command,
}
# ===========================================#
class StateHandler:
# Initiates object by checking which views are available and
# clearing the state
def __init__(self):
self.clear_state()
self.update_regions()
def clear_state(self):
self.state = {}
self.update_regions()
# Gets all available views in sublime and adds the missing ones to the state
def add_missing_views(self):
views = [v for v in sum([w.views() for w in sublime.windows()], [])]
self.views = {
simplify_path(v.file_name()): v for v in views if v.file_name() != None
}
print((self.views))
for view_name, view in list(self.views.items()):
if view_name not in self.state:
self.state[view_name] = []
# Updates all views with the available state-objects using the
# assigned functions
def update_regions(self):
self.add_missing_views()
# Iterate over all files in the state
for view_name, regions in list(self.state.items()):
# Remove all old regions
for reg_type_name in self.region_types:
self.views[view_name].erase_regions(reg_type_name)
region_sets = {}
# Iterate over all regions in that file
for reg_type, line in regions:
if reg_type == "line_marker" or ("line_marker", line) not in regions:
if reg_type not in region_sets:
region_sets[reg_type] = []
region_sets[reg_type].append(
sublime.Region(self.views[view_name].text_point(line - 1, 0))
)
# Register all new regions except the line-marker with sublime
for reg_name, v in list(region_sets.items()):
print(("Adding region:", view_name, reg_name, v))
self.views[view_name].add_regions(
reg_name, v, *self.region_types[reg_name]
)
def set_line_marker(self, view_name, line_number):
view_name = simplify_path(view_name)
print(("Setting line marker:", view_name, line_number))
self.add_missing_views()
if view_name in self.views:
self.state.setdefault(view_name, [])
self.state[view_name] = [
(k, v) for k, v in self.state[view_name] if k != "line_marker"
]
self.state[view_name].append(("line_marker", line_number))
self.update_regions()
def remove_line_marker(self):
for name, view in list(self.state.items()):
self.state[name] = [(t, n) for t, n in view if t != "line_marker"]
self.update_regions()
def toggle_breakpoint(self, view_name, line_number):
self.add_missing_views()
if (
view_name in self.views
and ("breakpoint", line_number) in self.state[view_name]
):
self.remove_breakpoint(view_name, line_number)
else:
self.set_breakpoint(view_name, line_number)
self.update_regions()
def set_breakpoint(self, view_name, line_number):
self.state.setdefault(view_name, [])
self.state[view_name].append(("breakpoint", line_number))
msg_queue.put("SETB {0} {1}\n".format(view_name, line_number).encode("latin-1"))
def remove_breakpoint(self, view_name, line_number):
self.state[view_name].remove(("breakpoint", line_number))
msg_queue.put("DELB {0} {1}\n".format(view_name, line_number).encode("latin-1"))
def breakpoints(self):
ret = []
for k, v in list(self.state.items()):
for t in v:
if t[0] == "breakpoint":
ret.append((k, t[1]))
return ret
views = {}
state = {}
region_types = {
"breakpoint": ("keyword", "circle"),
"line_marker": ("keyword", "bookmark"),
}
def plugin_unloaded():
settings.set("basedir", BASEDIR)
settings.set("step_on_connect", STEP_ON_CONNECT)
print("Closing down the server...")
server.close()
def simplify_path(path):
path = path.replace("\\", "/").replace(BASEDIR, "")
path = re.sub("\.lua$", "", path) # Strip ".lua" from the path
return path
# Open a threadsafe message queue
msg_queue = queue.Queue()
state_handler = StateHandler()
# Start listening and open the asyncore loop
server = SubDebugServer(TCP_IP, TCP_PORT)
if os.name == "posix":
thread = threading.Thread(target=asyncore.loop, kwargs={"use_poll": True})
else:
thread = threading.Thread(target=asyncore.loop)
thread.start()
<|endoftext|> |
<|endoftext|>from django.contrib import sitemaps
from django.core.urlresolvers import reverse
class StaticViewSitemap(sitemaps.Sitemap):
priority = 0.5
changefreq = "monthly"
def items(self):
return [
"landpage",
"robots",
"humans",
"google_plus_verify",
"terms",
"privacy",
]
def location(self, item):
return reverse(item)
# https://docs.djangoproject.com/en/1.8/ref/contrib/sitemaps/
<|endoftext|> |
<|endoftext|>from django.conf.urls import patterns, include, url
from publisher.views import catalog
from publisher.views import my_publication
from publisher.views import publication
urlpatterns = patterns(
"",
# Publications(s)
url(r"^publish$", catalog.catalog_page),
url(r"^publication/(\d+)$", publication.publication_page),
url(r"^publication/(\d+)/peer_review_modal$", publication.peer_review_modal),
url(r"^publication/(\d+)/save_peer_review$", publication.save_peer_review),
url(r"^publication/(\d+)/delete_peer_review$", publication.delete_peer_review),
# My Publications
url(r"^my_publications$", my_publication.my_publications_page),
url(r"^refresh_publications_table$", my_publication.refresh_publications_table),
url(r"^my_publication_modal$", my_publication.my_publication_modal),
url(r"^save_publication$", my_publication.save_publication),
url(r"^delete_publication$", my_publication.delete_publication),
)
<|endoftext|> |
<|endoftext|>from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf.urls.static import static, settings
import json
from registrar.models import Course
from registrar.models import Teacher
from registrar.models import Student
from registrar.models import Assignment
from registrar.models import AssignmentSubmission
from registrar.models import Quiz
from registrar.models import QuizSubmission
from registrar.models import Exam
from registrar.models import ExamSubmission
from registrar.models import EssayQuestion
from registrar.models import EssaySubmission
from registrar.models import MultipleChoiceQuestion
from registrar.models import MultipleChoiceSubmission
from registrar.models import ResponseQuestion
from registrar.models import ResponseSubmission
from registrar.models import TrueFalseQuestion
from registrar.models import TrueFalseSubmission
from registrar.models import PeerReview
from student.views import assignment
from student.views import quiz
from student.views import exam
from student.views import credit
TEST_USER_EMAIL = "ledo@gah.com"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "password"
class CreditTestCase(TestCase):
def tearDown(self):
courses = Course.objects.all()
for course in courses:
course.delete()
User.objects.get(email=TEST_USER_EMAIL).delete()
def setUp(self):
# Create our Student.
User.objects.create_user(
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD,
)
user = User.objects.get(email=TEST_USER_EMAIL)
teacher = Teacher.objects.create(user=user)
student = Student.objects.create(user=user)
# Create a test course.
course = Course.objects.create(
id=1,
title="Comics Book Course",
sub_title="The definitive course on comics!",
category="",
teacher=teacher,
)
# Create our assignment(s)
assignment = Assignment.objects.create(
assignment_id=1,
assignment_num=1,
title="Hideauze",
description="Anime related assignment.",
worth=25,
course=course,
)
# Create questions
EssayQuestion.objects.create(
question_id=1,
assignment=assignment,
title="Evolvers",
description="Write an essay about the Evolvers.",
)
MultipleChoiceQuestion.objects.create(
question_id=2,
assignment=assignment,
title="Hideauze",
description="Who where the Hideauze?",
a="Former Humans",
a_is_correct=True,
b="Aliens",
b_is_correct=False,
c="Magical or Supernatural Creatures",
c_is_correct=False,
d="Dark Elves",
d_is_correct=False,
e="Heavenly Creatures",
e_is_correct=False,
)
TrueFalseQuestion.objects.create(
question_id=3,
assignment=assignment,
title="Hideauze",
description="Where the Hideauze human?",
true_choice="Yes, former humans",
false_choice="No, aliens",
answer=True,
)
ResponseQuestion.objects.create(
question_id=4,
assignment=assignment,
title="Hideauze",
description="Why did humanity migrate off-world?",
answer="Because of solar hibernation causing Global Cooling on Earth.",
)
# Create our quiz
Quiz.objects.create(
quiz_id=1,
quiz_num=1,
title="Hideauze",
description="Anime related assignment.",
worth=25,
course=course,
)
quiz = Quiz.objects.get(quiz_id=1)
TrueFalseQuestion.objects.create(
question_id=5,
quiz=quiz,
title="Hideauze",
description="Where the Hideauze human?",
true_choice="Yes, former humans",
false_choice="No, aliens",
answer=True,
)
# Create our Exam
Exam.objects.create(
exam_id=1,
exam_num=1,
title="Hideauze",
description="Anime related assignment.",
worth=50,
course=course,
is_final=True,
)
exam = Exam.objects.get(exam_id=1)
MultipleChoiceQuestion.objects.create(
question_id=6,
exam=exam,
title="Hideauze",
description="Who where the Hideauze?",
a="Former Humans",
a_is_correct=True,
b="Aliens",
b_is_correct=False,
c="Magical or Supernatural Creatures",
c_is_correct=False,
d="Orcs",
d_is_correct=False,
e="Heavenly Creatures",
e_is_correct=False,
)
def get_logged_in_client(self):
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
return client
def test_url_resolves_to_credit_page_view(self):
found = resolve("/course/1/credit")
self.assertEqual(found.func, credit.credit_page)
def test_credit_page_with_no_submissions(self):
client = self.get_logged_in_client()
response = client.post("/course/1/credit")
self.assertEqual(response.status_code, 200)
self.assertIn(b"Comics Book Course", response.content)
self.assertIn(b"ajax_submit_credit_application();", response.content)
def test_url_resolves_to_submit_json(self):
found = resolve("/course/1/submit_credit_application")
self.assertEqual(found.func, credit.submit_credit_application)
def test_submit_credit_application_on_no_failing_criteria(self):
kwargs = {"HTTP_X_REQUESTED_WITH": "XMLHttpRequest"}
client = self.get_logged_in_client()
response = client.post(
"/course/1/submit_credit_application",
{
"assignment_id": 1,
},
**kwargs
)
json_string = response.content.decode(encoding="UTF-8")
array = json.loads(json_string)
self.assertEqual(response.status_code, 200)
self.assertEqual(array["status"], "failure")
self.assertEqual(array["message"], "you need to pass with at minimum 50%")
def test_submit_credit_application_on_passing_criteria_without_peer_reviews(self):
kwargs = {"HTTP_X_REQUESTED_WITH": "XMLHttpRequest"}
client = self.get_logged_in_client()
# Setup Failing
# Assignment
file_path = settings.MEDIA_ROOT + "/sample.pdf"
with open(file_path, "rb") as fp:
self.assertTrue(fp is not None)
client.post(
"/course/1/assignment/1/submit_e_assignment_answer",
{"question_id": 1, "file": fp},
**kwargs
)
client.post(
"/course/1/assignment/1/submit_mc_assignment_answer",
{
"question_id": 2,
"answer": "A",
},
**kwargs
)
client.post(
"/course/1/assignment/1/submit_tf_assignment_answer",
{
"question_id": 3,
"answer": "true",
},
**kwargs
)
client.post(
"/course/1/assignment/1/submit_r_assignment_answer",
{
"question_id": 4,
"answer": "Because of Global Cooling caused by abnormal solar hibernation.",
},
**kwargs
)
client.post("/course/1/assignment/1/submit_assignment", {}, **kwargs)
# Quiz
client.post(
"/course/1/quiz/1/submit_tf_quiz_answer",
{
"question_id": 5,
"answer": "true",
},
**kwargs
)
client.post("/course/1/quiz/1/submit_quiz", {}, **kwargs)
# Exam
response = client.post(
"/course/1/exam/1/submit_mc_exam_answer",
{
"question_id": 6,
"answer": "A",
},
**kwargs
)
client.post("/course/1/exam/1/submit_exam", {}, **kwargs)
# Test
response = client.post(
"/course/1/submit_credit_application",
{
"assignment_id": 1,
},
**kwargs
)
json_string = response.content.decode(encoding="UTF-8")
array = json.loads(json_string)
self.assertEqual(response.status_code, 200)
self.assertEqual(array["status"], "success")
self.assertEqual(array["message"], "credit granted")
# Cleanup
try:
EssaySubmission.objects.get(submission_id=1).delete()
except EssaySubmission.DoesNotExist:
pass
try:
EssaySubmission.objects.get(submission_id=2).delete()
except EssaySubmission.DoesNotExist:
pass
def test_submit_credit_application_on_passing_criteria_with_peer_reviews(self):
kwargs = {"HTTP_X_REQUESTED_WITH": "XMLHttpRequest"}
client = self.get_logged_in_client()
# Setup Failing
# Assignment
file_path = settings.MEDIA_ROOT + "/sample.pdf"
with open(file_path, "rb") as fp:
self.assertTrue(fp is not None)
client.post(
"/course/1/assignment/1/submit_e_assignment_answer",
{"question_id": 1, "file": fp},
**kwargs
)
client.post(
"/course/1/assignment/1/submit_mc_assignment_answer",
{
"question_id": 2,
"answer": "A",
},
**kwargs
)
client.post(
"/course/1/assignment/1/submit_tf_assignment_answer",
{
"question_id": 3,
"answer": "true",
},
**kwargs
)
client.post(
"/course/1/assignment/1/submit_r_assignment_answer",
{
"question_id": 4,
"answer": "Because of Global Cooling caused by abnormal solar hibernation.",
},
**kwargs
)
client.post("/course/1/assignment/1/submit_assignment", {}, **kwargs)
# Quiz
client.post(
"/course/1/quiz/1/submit_tf_quiz_answer",
{
"question_id": 5,
"answer": "true",
},
**kwargs
)
client.post("/course/1/quiz/1/submit_quiz", {}, **kwargs)
# Exam
response = client.post(
"/course/1/exam/1/submit_mc_exam_answer",
{
"question_id": 6,
"answer": "A",
},
**kwargs
)
client.post("/course/1/exam/1/submit_exam", {}, **kwargs)
# Peer Reviews
client.post(
"/course/1/peer_review/1/save_peer_review",
{
"question_id": 1,
"question_type": settings.ESSAY_QUESTION_TYPE,
"submission_id": 1,
"marks": 5,
},
**kwargs
)
client.post(
"/course/1/peer_review/1/save_peer_review",
{
"question_id": 4,
"question_type": settings.RESPONSE_QUESTION_TYPE,
"submission_id": 1,
"marks": 5,
},
**kwargs
)
# Test
response = client.post(
"/course/1/submit_credit_application",
{
"assignment_id": 1,
},
**kwargs
)
json_string = response.content.decode(encoding="UTF-8")
array = json.loads(json_string)
self.assertEqual(response.status_code, 200)
self.assertEqual(array["status"], "success")
self.assertEqual(array["message"], "credit granted")
# Cleanup
try:
EssaySubmission.objects.get(submission_id=1).delete()
except EssaySubmission.DoesNotExist:
pass
try:
EssaySubmission.objects.get(submission_id=2).delete()
except EssaySubmission.DoesNotExist:
pass
<|endoftext|> |
<|endoftext|># Django & Python
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf.urls.static import static, settings
import json
# Modal
from registrar.models import Teacher
from registrar.models import Course
from registrar.models import Announcement
from registrar.models import Syllabus
from registrar.models import Policy
from registrar.models import Lecture
from registrar.models import Assignment
from registrar.models import Quiz
from registrar.models import Exam
from registrar.models import CourseSubmission
# View
from teacher.views import overview
# Contants
TEST_USER_EMAIL = "ledo@gah.com"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "ContinentalUnion"
TEST_USER_EMAIL2 = "whalesquid@hideauze.com"
TEST_USER_USERNAME2 = "whalesquid"
TEST_USER_PASSWORD2 = "Evolvers"
class OverviewTestCase(TestCase):
def tearDown(self):
syllabuses = Syllabus.objects.all()
for syllabus in syllabuses:
syllabus.delete()
policies = Policy.objects.all()
for policy in policies:
policy.delete()
courses = Course.objects.all()
for course in courses:
course.delete()
User.objects.all().delete()
def setUp(self):
# Create our Trudy user.
User.objects.create_user(
email=TEST_USER_EMAIL2,
username=TEST_USER_USERNAME2,
password=TEST_USER_PASSWORD2,
)
user = User.objects.get(email=TEST_USER_EMAIL2)
teacher = Teacher.objects.create(user=user)
# Create our Teacher.
user = User.objects.create_user(
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD,
)
teacher = Teacher.objects.create(user=user)
course = Course.objects.create(
id=1,
title="Comics Book Course",
sub_title="The definitive course on comics!",
category="",
teacher=teacher,
)
def populate_course_content(self, client, kwargs):
course = Course.objects.get(id=1)
Announcement.objects.create(
announcement_id=1,
course=course,
title="Hello world!",
body="This is the body of the message.",
)
course = Course.objects.get(id=1)
file_path = settings.MEDIA_ROOT + "/sample.pdf"
with open(file_path, "rb") as fp:
self.assertTrue(fp is not None)
Syllabus.objects.create(
syllabus_id=1,
file="",
course=course,
)
with open(file_path, "rb") as fp:
self.assertTrue(fp is not None)
Policy.objects.create(
policy_id=1,
file="",
course=course,
)
Lecture.objects.create(
lecture_id=1,
lecture_num=1,
week_num=1,
title="Blade vs Evil",
description="Fighting for the destiny of the Earth.",
course=course,
)
Lecture.objects.create(
lecture_id=2,
lecture_num=2,
week_num=1,
title="Blade vs Evil",
description="Fighting for the destiny of the Earth.",
course=course,
)
Assignment.objects.create(
assignment_id=1,
assignment_num=1,
title="Hideauze",
description="Anime related assignment.",
worth=25,
course=course,
)
Quiz.objects.create(
quiz_id=1,
quiz_num=1,
title="Hideauze",
description="Anime related assignment.",
worth=25,
course=course,
)
Exam.objects.create(
exam_id=1,
exam_num=1,
title="Hideauze",
description="Anime related assignment.",
worth=50,
course=course,
is_final=True,
)
def delete_course_content(self):
for id in range(1, 10):
# Syllabus
try:
Syllabus.objects.get(syllabus_id=id).delete()
except Syllabus.DoesNotExist:
pass
# Policy
try:
Policy.objects.get(policy_id=id).delete()
except Policy.DoesNotExist:
pass
# Announcement
try:
Announcement.objects.get(announcement_id=1).delete()
except Announcement.DoesNotExist:
pass
def get_logged_in_client(self):
client = Client()
client.login(username=TEST_USER_USERNAME, password=TEST_USER_PASSWORD)
return client
def test_url_resolves_to_overview_page_view(self):
found = resolve("/teacher/course/1/overview")
self.assertEqual(found.func, overview.overview_page)
def test_overview_page(self):
client = self.get_logged_in_client()
response = client.post("/teacher/course/1/overview")
self.assertEqual(response.status_code, 200)
self.assertIn(b"Comics Book Course", response.content)
self.assertIn(b"ajax_submit_course()", response.content)
def test_submit_course_for_review(self):
client = self.get_logged_in_client()
kwargs = {"HTTP_X_REQUESTED_WITH": "XMLHttpRequest"}
# Create course content.
self.populate_course_content(client, kwargs)
response = client.post(
"/teacher/course/1/submit_course_for_review", {}, **kwargs
)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding="UTF-8")
array = json.loads(json_string)
self.assertEqual(array["message"], "submitted course review")
self.assertEqual(array["status"], "success")
# Delete course content.
self.delete_course_content()
<|endoftext|> |
<|endoftext|>"""added goal properties
Revision ID: 5018059c5c8f
Revises: 16b4a243d41d
Create Date: 2015-09-23 11:56:01.897992
"""
# revision identifiers, used by Alembic.
revision = "5018059c5c8f"
down_revision = "16b4a243d41d"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
"goalproperties",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("is_variable", sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"goals_goalproperties",
sa.Column("goal_id", sa.Integer(), nullable=False),
sa.Column("property_id", sa.Integer(), nullable=False),
sa.Column("value", sa.String(length=255), nullable=True),
sa.Column("value_translation_id", sa.Integer(), nullable=True),
sa.Column("from_level", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(["goal_id"], ["goals.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["property_id"], ["goalproperties.id"], ondelete="CASCADE"
),
sa.ForeignKeyConstraint(
["value_translation_id"], ["translationvariables.id"], ondelete="RESTRICT"
),
sa.PrimaryKeyConstraint("goal_id", "property_id", "from_level"),
)
op.add_column(
"goals",
sa.Column("name", sa.String(length=255), nullable=False, server_default=""),
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column("goals", "name")
op.drop_table("goals_goalproperties")
op.drop_table("goalproperties")
### end Alembic commands ###
<|endoftext|> |
<|endoftext|># Demonstration of `applib` features
import logging
from applib.base import Cmdln, Application
from applib.misc import require_option
from applib import textui, sh, _cmdln as cmdln
LOG = logging.getLogger(__name__)
application = Application("demo-app", "CompanyNameHere", "1.2")
@cmdln.option("", "--foo", action="store_true", help="*must pass --foo")
class Commands(Cmdln):
name = "demo-app"
def initialize(self):
require_option(self.options, "foo")
@cmdln.alias("cd")
@cmdln.option(
"-t", "--show-time", action="store_true", help="Also show the current time"
)
def do_currentdate(self, subcmd, opts):
"""${cmd_name}: Show the current date
${cmd_usage}
${cmd_option_list}
"""
with self.bootstrapped():
from datetime import datetime
now = datetime.now()
LOG.debug("datetime.now = %s", now)
if opts.show_time:
print(now)
else:
print((now.date()))
def do_ls(self, subcmd, opts):
"""${cmd_name}: Show directory listing (runs 'ls')
${cmd_usage}
${cmd_option_list}
"""
with self.bootstrapped():
print((sh.run("ls")[0].decode("utf-8")))
def do_makeerror(self, subcmd, opts, what):
"""${cmd_name}: Make an error. Use -v to see full traceback
${cmd_usage}
${cmd_option_list}
"""
with self.bootstrapped():
LOG.debug("About to make an error! %s", what)
textui.askyesno("Press enter to proceed:", default=True)
1 / 0
@cmdln.option("", "--no-break", action="store_true", help="Don't break from loop")
def do_think(self, subcmd, opts, length=200):
"""${cmd_name}: Progress bar example
${cmd_usage}
${cmd_option_list}
"""
with self.bootstrapped():
import time
length = int(length)
for x in textui.ProgressBar.iterate(
list(range(length)), post="Thought {total} thoughts in time {elapsed}"
):
if x == length - 1 and not opts.no_break:
break # test that break doesn't mess up output
time.sleep(0.1)
def do_multable(self, subcmd, opts, number=10, times=25):
"""${cmd_name}: Print multiplication table
To demonstrate `colprint` feature
${cmd_usage}
${cmd_option_list}
"""
with self.bootstrapped():
textui.colprint(
[
[str(x * y) for y in range(1, 1 + int(times))]
for x in range(1, 1 + int(number))
]
)
if __name__ == "__main__":
application.run(Commands)
<|endoftext|> |
<|endoftext|># Copyright (c) 2015-2016, Activision Publishing, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from assertpy import assert_that, fail
class TestType(object):
def test_is_type_of(self):
assert_that("foo").is_type_of(str)
assert_that(123).is_type_of(int)
assert_that(0.456).is_type_of(float)
# assert_that(234L).is_type_of(long)
assert_that(["a", "b"]).is_type_of(list)
assert_that(("a", "b")).is_type_of(tuple)
assert_that({"a": 1, "b": 2}).is_type_of(dict)
assert_that(set(["a", "b"])).is_type_of(set)
assert_that(None).is_type_of(type(None))
assert_that(Foo()).is_type_of(Foo)
assert_that(Bar()).is_type_of(Bar)
def test_is_type_of_failure(self):
try:
assert_that("foo").is_type_of(int)
fail("should have raised error")
except AssertionError as ex:
assert_that(str(ex)).is_equal_to(
"Expected <foo:str> to be of type <int>, but was not."
)
def test_is_type_of_bad_arg_failure(self):
try:
assert_that("foo").is_type_of("bad")
fail("should have raised error")
except TypeError as ex:
assert_that(str(ex)).is_equal_to("given arg must be a type")
def test_is_type_of_subclass_failure(self):
try:
assert_that(Bar()).is_type_of(Foo)
fail("should have raised error")
except AssertionError as ex:
assert_that(str(ex)).starts_with("Expected <")
assert_that(str(ex)).ends_with(":Bar> to be of type <Foo>, but was not.")
def test_is_instance_of(self):
assert_that("foo").is_instance_of(str)
assert_that(123).is_instance_of(int)
assert_that(0.456).is_instance_of(float)
# assert_that(234L).is_instance_of(long)
assert_that(["a", "b"]).is_instance_of(list)
assert_that(("a", "b")).is_instance_of(tuple)
assert_that({"a": 1, "b": 2}).is_instance_of(dict)
assert_that(set(["a", "b"])).is_instance_of(set)
assert_that(None).is_instance_of(type(None))
assert_that(Foo()).is_instance_of(Foo)
assert_that(Bar()).is_instance_of(Bar)
assert_that(Bar()).is_instance_of(Foo)
def test_is_instance_of_failure(self):
try:
assert_that("foo").is_instance_of(int)
fail("should have raised error")
except AssertionError as ex:
assert_that(str(ex)).is_equal_to(
"Expected <foo:str> to be instance of class <int>, but was not."
)
def test_is_instance_of_bad_arg_failure(self):
try:
assert_that("foo").is_instance_of("bad")
fail("should have raised error")
except TypeError as ex:
assert_that(str(ex)).is_equal_to("given arg must be a class")
class Foo(object):
pass
class Bar(Foo):
pass
<|endoftext|> |
<|endoftext|>import sys
import math
import scipy
import pylab
import scipy.io.wavfile as wav
import wave
from scipy import signal
from itertools import product
import numpy
def readWav():
"""
Reads a sound wave from a standard input and finds its parameters.
"""
# Read the sound wave from the input.
sound_wave = wave.open(sys.argv[1], "r")
# Get parameters of the sound wave.
nframes = sound_wave.getnframes()
framerate = sound_wave.getframerate()
params = sound_wave.getparams()
duration = nframes / float(framerate)
print("frame rate: %d " % (framerate,))
print("nframes: %d" % (nframes,))
print("duration: %f seconds" % (duration,))
print(scipy.array(sound_wave))
return (sound_wave, nframes, framerate, duration, params)
def getDuration(sound_file):
"""
Returns the duration of a given sound file.
"""
wr = wave.open(sound_file, "r")
nchannels, sampwidth, framerate, nframes, comptype, compname = wr.getparams()
return nframes / float(framerate)
def getFrameRate(sound_file):
"""
Returns the frame rate of a given sound file.
"""
wr = wave.open(sound_file, "r")
nchannels, sampwidth, framerate, nframes, comptype, compname = wr.getparams()
return framerate
def get_channels_no(sound_file):
"""
Returns number of channels of a given sound file.
"""
wr = wave.open(sound_file, "r")
nchannels, sampwidth, framerate, nframes, comptype, compname = wr.getparams()
return nchannels
def plotSoundWave(rate, sample):
"""
Plots a given sound wave.
"""
t = scipy.linspace(0, 2, 2 * rate, endpoint=False)
pylab.figure("Sound wave")
T = int(0.0001 * rate)
pylab.plot(
t[:T],
sample[:T],
)
pylab.show()
def plotPartials(binFrequencies, maxFreq, magnitudes):
"""
Calculates and plots the power spectrum of a given sound wave.
"""
T = int(maxFreq)
pylab.figure("Power spectrum")
pylab.plot(
binFrequencies[:T],
magnitudes[:T],
)
pylab.xlabel("Frequency (Hz)")
pylab.ylabel("Power spectrum (|X[k]|^2)")
pylab.show()
def plotPowerSpectrum(FFT, binFrequencies, maxFreq):
"""
Calculates and plots the power spectrum of a given sound wave.
"""
T = int(maxFreq)
pylab.figure("Power spectrum")
pylab.plot(
binFrequencies[:T],
scipy.absolute(FFT[:T]) * scipy.absolute(FFT[:T]),
)
pylab.xlabel("Frequency (Hz)")
pylab.ylabel("Power spectrum (|X[k]|^2)")
pylab.show()
def get_frequencies_axis(framerate, fft_length):
binResolution = float(framerate) / float(fft_length)
binFreqs = []
for k in range(fft_length):
binFreq = k * binResolution
binFreqs.append(binFreq)
return binFreqs
def get_next_power_2(n):
"""
Returns the closest number that is smaller than n that is a power of 2.
"""
power = 1
while power < n:
power *= 2
if power > 1:
return power / 2
else:
return 1
class MIDI_Detector(object):
"""
Class for MIDI notes detection given a .wav file.
"""
def __init__(self, wav_file):
self.wav_file = wav_file
self.minFreqConsidered = 20
self.maxFreqConsidered = 5000
self.low_f0s = [
27.5,
29.135,
30.868,
32.703,
34.648,
37.708,
38.891,
41.203,
43.654,
46.249,
48.999,
51.913,
55.0,
58.27,
61.735,
65.406,
69.296,
73.416,
77.782,
82.407,
]
def detect_MIDI_notes(self):
"""
The algorithm for calculating midi notes from a given wav file.
"""
(framerate, sample) = wav.read(self.wav_file)
if get_channels_no(self.wav_file) > 1:
sample = sample.mean(axis=1)
duration = getDuration(self.wav_file)
midi_notes = []
# Consider only files with a duration longer than 0.18 seconds.
if duration > 0.18:
(
FFT,
filteredFreqs,
maxFreq,
magnitudes,
significant_freq,
) = self.calculateFFT(duration, framerate, sample)
# plotPowerSpectrum(FFT, filteredFreqs, 1000)
clusters = self.clusterFrequencies(filteredFreqs)
averagedClusters = self.getClustersMeans(clusters)
f0_candidates = self.getF0Candidates(averagedClusters)
midi_notes = self.matchWithMIDINotes(f0_candidates)
"""
OCTAVE CORRECTION METHOD
"""
"""
# Include a note with a significant magnitude:
# if its magnitude is higher than the sum of magnitudes
# of all other spectral peaks
# include it in the list of detected notes and
# remove the note that's octave lower than this one
# if it was also detected.
if significant_freq > 0:
significant_midi_notes = self.matchWithMIDINotes([
significant_freq])
significant_midi_note = significant_midi_notes[0]
if significant_midi_note not in midi_notes:
midi_notes.append(significant_midi_note)
midi_notes = self.remove_lower_octave(
significant_midi_note, midi_notes)
"""
return midi_notes
def remove_lower_octave(self, upper_octave, midi_notes):
lower_octave = upper_octave - 12
if lower_octave in midi_notes:
midi_notes.remove(lower_octave)
return midi_notes
def get_candidates_with_partials(self, frequencies, magnitudes):
print(frequencies)
partial_margin = 11.0 # Hz
# A list of frequencies of each candidate.
candidates_freq = []
# A list of magnitudes of frequencies of each candidate.
candidates_magnitude = []
for i in range(len(frequencies)):
partials, partial_magnitudes = self.find_partials(
frequencies[i:], frequencies[i], magnitudes[i:]
)
candidates_freq.append(partials)
candidates_magnitude.append(partial_magnitudes)
return (candidates_freq, candidates_magnitude)
def calculateFFT(self, duration, framerate, sample):
"""
Calculates FFT for a given sound wave.
Considers only frequencies with the magnitudes higher than
a given threshold.
"""
fft_length = int(duration * framerate)
# For the FFT to work much faster take the length that is a power of 2.
fft_length = get_next_power_2(fft_length)
FFT = numpy.fft.fft(sample, n=fft_length)
""" ADJUSTING THRESHOLD - HIGHEST SPECTRAL PEAK METHOD"""
threshold = 0
power_spectra = []
frequency_bin_with_max_spectrum = 0
for i in range(len(FFT) / 2):
power_spectrum = scipy.absolute(FFT[i]) * scipy.absolute(FFT[i])
if power_spectrum > threshold:
threshold = power_spectrum
frequency_bin_with_max_spectrum = i
power_spectra.append(power_spectrum)
max_power_spectrum = threshold
threshold *= 0.1
binFrequencies = []
magnitudes = []
binResolution = float(framerate) / float(fft_length)
sum_of_significant_spectra = 0
# For each bin calculate the corresponding frequency.
for k in range(len(FFT)):
binFreq = k * binResolution
# Truncating the FFT so we consider only hearable frequencies.
if binFreq > self.maxFreqConsidered:
FFT = FFT[:k]
break
elif binFreq > self.minFreqConsidered:
# Consider only the frequencies
# with magnitudes higher than the threshold.
power_spectrum = power_spectra[k]
if power_spectrum > threshold:
magnitudes.append(power_spectrum)
binFrequencies.append(binFreq)
# Sum all significant power spectra
# except the max power spectrum.
if power_spectrum != max_power_spectrum:
sum_of_significant_spectra += power_spectrum
significant_freq = 0.0
if max_power_spectrum > sum_of_significant_spectra:
significant_freq = frequency_bin_with_max_spectrum * binResolution
# Max. frequency considered after truncating.
# maxFreq = rate without truncating.
maxFreq = len(FFT) / duration
return (FFT, binFrequencies, maxFreq, magnitudes, significant_freq)
# Code for STFT taken from:
# http://stackoverflow.com/questions/2459295/stft-and-istft-in-python
def STFT(self, x, samplingFreq, framesz, hop):
"""
Computes STFT for a given sound wave using Hanning window.
"""
framesamp = int(framesz * samplingFreq)
print("FRAMESAMP: " + str(framesamp))
hopsamp = int(hop * samplingFreq)
print("HOP SAMP: " + str(hopsamp))
# Modification: using Hanning window instead of Hamming - by Pertusa
w = signal.hann(framesamp)
X = numpy.array(
[
numpy.fft.fft(w * x[i : i + framesamp])
for i in range(0, len(x) - framesamp, hopsamp)
]
)
return X
def plotMagnitudeSpectrogram(self, rate, sample, framesz, hop):
"""
Calculates and plots the magnitude spectrum of a given sound wave.
"""
X = self.STFT(sample, rate, framesz, hop)
# Plot the magnitude spectrogram.
pylab.figure("Magnitude spectrogram")
pylab.imshow(
scipy.absolute(X.T), origin="lower", aspect="auto", interpolation="nearest"
)
pylab.xlabel("Time")
pylab.ylabel("Frequency")
pylab.show()
def getFilteredFFT(self, FFT, duration, threshold):
"""
Returns a list of frequencies with the magnitudes higher
than a given threshold.
"""
significantFreqs = []
for i in range(len(FFT)):
power_spectrum = scipy.absolute(FFT[i]) * scipy.absolute(FFT[i])
if power_spectrum > threshold:
significantFreqs.append(i / duration)
return significantFreqs
def clusterFrequencies(self, freqs):
"""
Clusters frequencies.
"""
if len(freqs) == 0:
return {}
clusteredFreqs = {}
bin = 0
clusteredFreqs[0] = [freqs[0]]
for i in range(len(freqs) - 1):
dist = self.calcDistance(freqs[i], freqs[i + 1])
if dist < 2.0:
clusteredFreqs[bin].append(freqs[i + 1])
else:
bin += 1
clusteredFreqs[bin] = [freqs[i + 1]]
return clusteredFreqs
def getClustersMeans(self, clusters):
"""
Given clustered frequencies finds a mean of each cluster.
"""
means = []
for bin, freqs in clusters.items():
means.append(sum(freqs) / len(freqs))
return means
def getDistances(self, freqs):
"""
Returns a list of distances between each frequency.
"""
distances = {
(freqs[i], freqs[j]): self.calcDistance(freqs[i], freqs[j])
for (i, j) in product(list(range(len(freqs))), repeat=2)
}
distances = {
freq_pair: dist for freq_pair, dist in distances.items() if dist < 2.0
}
return distances
def calcDistance(self, freq1, freq2):
"""
Calculates distance between frequencies taking into account that
the frequencies of pitches increase logarithmically.
"""
difference = abs(freq1 - freq2)
log = math.log((freq1 + freq2) / 2)
return difference / log
def getF0Candidates(self, frequencies):
"""
Given frequencies finds possible F0 candidates
by discarding potential harmonic frequencies.
"""
f0_candidates = []
"""
MODIFICATION: CONSIDER ONLY MIDDLE RANGE FREQUENCIES
"""
"""
if len(frequencies) > 0 and frequencies[0] < 83.0:
low_freq_candidate = self.find_low_freq_candidate(frequencies)
if low_freq_candidate > 0.0:
f0_candidates.append(low_freq_candidate)
#frequencies = self.filterOutHarmonics(
frequencies, low_freq_candidate)
"""
while len(frequencies) > 0:
f0_candidate = frequencies[0]
f0_candidates.append(f0_candidate)
frequencies.remove(f0_candidate)
frequencies = self.filterOutHarmonics(frequencies, f0_candidate)
return f0_candidates
def filterOutHarmonics(self, frequencies, f0_candidate):
"""
Given frequencies and an f0 candidate remove
all possible harmonics of this f0 candidate.
"""
# If an integer frequency is a multiple of another frequency
# then it is its harmonic. This constant was found empirically.
REMAINDER_THRESHOLD = 0.2
def is_multiple(f, f0):
return abs(round(f / f0) - f / f0) < REMAINDER_THRESHOLD
return [f for f in frequencies if not is_multiple(f, f0_candidate)]
def find_low_freq_candidate(self, frequencies):
REMAINDER_THRESHOLD = 0.05
f0_candidates = []
def is_multiple(f, f0):
return abs(round(f / f0) - f / f0) < REMAINDER_THRESHOLD
best_candidate = -1
max_no_partials = 0
for low_f0 in self.low_f0s:
num_of_partials = 0
for f in frequencies:
if is_multiple(f, low_f0):
num_of_partials += 1
if num_of_partials > max_no_partials:
max_no_partials = num_of_partials
best_candidate = low_f0
return best_candidate
def find_partials(self, frequencies, f0_candidate, magnitudes):
"""
Given frequencies, frequency magnitudes and an f0 candidate
return the partials and magnitudes of this f0 candidate.
"""
REMAINDER_THRESHOLD = 0.05
def is_multiple(f, f0):
return abs(round(f / f0) - f / f0) < REMAINDER_THRESHOLD
partials = []
partial_magnitudes = []
for i in range(len(frequencies)):
if is_multiple(frequencies[i], f0_candidate):
partials.append(frequencies[i])
partial_magnitudes.append(magnitudes[i])
return (partials, partial_magnitudes)
def matchWithMIDINotes(self, f0_candidates):
midi_notes = []
for freq in f0_candidates:
# Formula for calculating MIDI note number.
midi_notes.append(int(round(69 + 12 * math.log(freq / 440) / math.log(2))))
return midi_notes
if __name__ == "__main__":
MIDI_detector = MIDI_Detector(sys.argv[1])
midi_notes = MIDI_detector.detect_MIDI_notes()
print(midi_notes)
<|endoftext|> |
<|endoftext|>__author__ = "Ahmed Hani Ibrahim"
class Action(object):
def GetActionName(self):
return self.__name
def SetActionName(self, name):
self.__name = name
def __init__(self, name):
self.__name = name
<|endoftext|> |
<|endoftext|># auto-generated file
import _cffi_backend
ffi = _cffi_backend.FFI(
"_simple_example",
_version=0x2601,
_types=b"\x00\x00\x04\x0D\x00\x00\x03\x03\x00\x00\x01\x0F\x00\x00\x02\x01\x00\x00\x07\x01",
_globals=(
b"\x00\x00\x00\x23printf",
0,
),
)
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
from dnslib import *
packet = binascii.unhexlify(
b"d5ad818000010005000000000377777706676f6f676c6503636f6d0000010001c00c0005000100000005000803777777016cc010c02c0001000100000005000442f95b68c02c0001000100000005000442f95b63c02c0001000100000005000442f95b67c02c0001000100000005000442f95b93"
)
d = DNSRecord.parse(packet)
# The default text representation of the DNSRecord is in zone file format
print(d)
<|endoftext|> |
<|endoftext|>from app import app
if __name__ == "__main__":
app.run()
<|endoftext|> |
<|endoftext|># coding: utf-8
from flask import render_template, Blueprint, redirect, request, url_for
from ..forms import SigninForm, SignupForm
from ..utils.account import signin_user, signout_user
from ..utils.permissions import VisitorPermission, UserPermission
from ..models import db, User
bp = Blueprint("account", __name__)
@bp.route("/signin", methods=["GET", "POST"])
@VisitorPermission()
def signin():
"""Signin"""
form = SigninForm()
if form.validate_on_submit():
signin_user(form.user)
return redirect(url_for("site.index"))
return render_template("account/signin/signin.html", form=form)
@bp.route("/signup", methods=["GET", "POST"])
@VisitorPermission()
def signup():
"""Signup"""
form = SignupForm()
if form.validate_on_submit():
params = form.data.copy()
params.pop("repassword")
user = User(**params)
db.session.add(user)
db.session.commit()
signin_user(user)
return redirect(url_for("site.index"))
return render_template("account/signup/signup.html", form=form)
@bp.route("/signout")
def signout():
"""Signout"""
signout_user()
return redirect(request.referrer or url_for("site.index"))
<|endoftext|> |
<|endoftext|>from app import app, db
import unittest
import os
import tempfile
from flask import json
TEST_DB = "test.db"
class BasicTestCase(unittest.TestCase):
def test_index(self):
"""inital test. ensure flask was set up correctly"""
tester = app.test_client(self)
response = tester.get("/", content_type="html/text")
self.assertEqual(response.status_code, 200)
def test_database(self):
"""inital test. ensure that the database exists"""
tester = os.path.exists("flaskr.db")
self.assertTrue(tester)
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
"""Set up a blank temp database before each test"""
basedir = os.path.abspath(os.path.dirname(__file__))
app.config["TESTING"] = True
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + os.path.join(
basedir, TEST_DB
)
self.app = app.test_client()
db.create_all()
def tearDown(self):
"""Destroy blank temp database after each test"""
db.drop_all()
def login(self, username, password):
"""Login helper function"""
return self.app.post(
"/login",
data=dict(username=username, password=password),
follow_redirects=True,
)
def logout(self):
"""Logout helper function"""
return self.app.get("/logout", follow_redirects=True)
# assert functions
def test_empty_db(self):
"""Ensure database is blank"""
rv = self.app.get("/")
self.assertIn(b"No entries yet. Add some!", rv.data)
def test_login_logout(self):
"""Test login and logout using helper functions"""
rv = self.login(app.config["USERNAME"], app.config["PASSWORD"])
self.assertIn(b"You were logged in", rv.data)
rv = self.logout()
self.assertIn(b"You were logged out", rv.data)
rv = self.login(app.config["USERNAME"] + "x", app.config["PASSWORD"])
self.assertIn(b"Invalid username", rv.data)
rv = self.login(app.config["USERNAME"], app.config["PASSWORD"] + "x")
self.assertIn(b"Invalid password", rv.data)
def test_messages(self):
"""Ensure that user can post messages"""
self.login(app.config["USERNAME"], app.config["PASSWORD"])
rv = self.app.post(
"/add",
data=dict(title="<Hello>", text="<strong>HTML</strong> allowed here"),
follow_redirects=True,
)
self.assertNotIn(b"No entries here so far", rv.data)
self.assertIn(b"<Hello>", rv.data)
self.assertIn(b"<strong>HTML</strong> allowed here", rv.data)
def test_delete_message(self):
"""Ensure the messages are being deleted"""
rv = self.app.get("/delete/1")
data = json.loads(rv.data)
self.assertEqual(data["status"], 1)
if __name__ == "__main__":
unittest.main()
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
# encoding: utf-8
import json
data = [{"a": "A", "b": (2, 4), "c": 3.0}]
print("DATA:", repr(data))
unsorted = json.dumps(data)
print("JSON:", json.dumps(data))
print("SORT:", json.dumps(data, sort_keys=True))
first = json.dumps(data, sort_keys=True)
second = json.dumps(data, sort_keys=True)
print("UNSORTED MATCH:", unsorted == first)
print("SORTED MATCH :", first == second)
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
# Exercise 30: Else and If
people = 30
cars = 40
trucks = 15
if cars > people:
print("We should take the cars.")
elif cars < people:
print("We should not take the cars.")
else:
print("We can't decide.")
if trucks > cars:
print("That's too many trucks.")
elif trucks < cars:
print("Maybe we could take the trucks.")
else:
print("We still can't decide.")
if people > trucks:
print("Alright, let's just take the trucks.")
else:
print("Fine, let's stay home then.")
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import _thread
import time
mylock = _thread.allocate_lock() # Allocate a lock
num = 0 # Shared resource
def add_num(name):
global num
while True:
mylock.acquire() # Get the lock
# Do something to the shared resource
print(("Thread %s locked! num=%s" % (name, str(num))))
if num >= 5:
print(("Thread %s released! num=%s" % (name, str(num))))
mylock.release()
_thread.exit()
num += 1
print(("Thread %s released! num=%s" % (name, str(num))))
mylock.release() # Release the lock.
def test():
_thread.start_new_thread(add_num, ("A",))
_thread.start_new_thread(add_num, ("B",))
time.sleep(30)
if __name__ == "__main__":
test()
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
# encoding: utf-8
"""Expand shell variables in filenames.
"""
import os.path
import os
os.environ["MYVAR"] = "VALUE"
print(os.path.expandvars("/path/to/$MYVAR"))
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
import pyglet
from pyglet.window import key
from pyglet.window import mouse
window = pyglet.window.Window()
@window.event
def on_key_press(symbol, modifiers):
print("key %s was pressed" % symbol)
if symbol == key.A:
print('The "A" key was pressed.')
elif symbol == key.LEFT:
print("The left arrow key was pressed.")
elif symbol == key.ENTER:
print("The enter key was pressed.")
@window.event
def on_mouse_press(x, y, button, modifiers):
print("location: (%s, %s), button: %s" % (x, y, button))
if button == mouse.LEFT:
print("The left mouse button was pressed.")
@window.event
def on_draw():
window.clear()
pyglet.app.run()
<|endoftext|> |
<|endoftext|>number = 53
go = True
while go:
guess = int(raw_input('input a number please'))
if guess == number:
print 'correct'
go = False
elif guess < number:
print 'try a bigger one'
else:
print 'try a smaller one'
else:
print 'it\'s over'
<|endoftext|> |
<|endoftext|>import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README.txt")) as f:
README = f.read()
with open(os.path.join(here, "CHANGES.txt")) as f:
CHANGES = f.read()
requires = [
"pyramid",
"pyramid_chameleon",
"pyramid_debugtoolbar",
"pyramid_tm",
"SQLAlchemy",
"transaction",
"zope.sqlalchemy",
"waitress",
]
setup(
name="pyramid_pycharm",
version="0.0",
description="pyramid_pycharm",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author="",
author_email="",
url="",
keywords="web wsgi bfg pylons pyramid",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite="pyramid_pycharm",
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = pyramid_pycharm:main
[console_scripts]
initialize_pyramid_pycharm_db = pyramid_pycharm.scripts.initializedb:main
""",
)
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
from mako.template import Template
from mako.runtime import Context
from io import StringIO
mytemplate = Template("hello, ${name}!")
buf = StringIO()
ctx = Context(buf, name="Akagi201")
mytemplate.render_context(ctx)
print((buf.getvalue()))
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
"""Test for inequality
"""
import unittest
class InequalityTest(unittest.TestCase):
def testEqual(self):
self.assertNotEqual(1, 3 - 2)
def testNotEqual(self):
self.assertEqual(2, 3 - 2)
if __name__ == "__main__":
unittest.main()
<|endoftext|> |
<|endoftext|>from flask import Flask
from flask.ext.fragment import Fragment
from flask.ext.login import LoginManager
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
fragment = Fragment(app)
login = LoginManager(app)
from models import User, Post, Comment, LoginForm, RegisterForm, PostForm, CommentForm
from flask.ext.login import current_user, login_required, login_user, logout_user
from flask import render_template, redirect, url_for, request, flash
#### VIEWS
from models import User, Post, Comment, LoginForm, RegisterForm, PostForm, CommentForm
from flask.ext.login import current_user, login_required, login_user, logout_user
from flask import render_template, redirect, url_for, request, flash
POSTS_ON_PAGE = 20
COMMENTS_ON_PAGE = 20
## Handlers
@login.user_loader
def load_user(userid):
return User.get(userid)
@app.errorhandler(404)
def page_not_found(e):
return render_template("page404.html"), 404
@login.unauthorized_handler
def unauthorized():
flash(
"Only authorized users can do requested action or see requested page.",
"warning",
)
return redirect(url_for("index"))
### Login/Logout/Register pages
@fragment(app)
def login_form():
return render_template("login.html", form=LoginForm())
@app.route("/login", methods=["POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
login_user(form.user)
flash("You are logged successfully.", "info")
return redirect(request.args.get("next") or url_for("index"))
return redirect(url_for("index"))
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for("index"))
@app.route("/register", methods=["GET", "POST"])
def register():
form = RegisterForm()
if form.validate_on_submit():
db.session.add(form.user)
db.session.commit()
login_user(form.user)
flash("You are registered successfully.", "info")
return redirect(url_for("index"))
return render_template("register.html", form=form)
### Index page
@fragment(app, cache=300)
def user_info(userid):
return render_template("fragments/userinfo.html")
@fragment(app, cache=300)
def posts_list(page):
page = int(page)
page_size = POSTS_ON_PAGE
pagination = Post.query.filter_by().paginate(page, page_size)
posts = Post.query.filter_by().offset((page - 1) * page_size).limit(page_size).all()
return render_template(
"fragments/posts_list.html", pagination=pagination, posts=posts
)
@fragment.resethandler(posts_list)
def reset_posts_list():
page_size = POSTS_ON_PAGE
pagination = Post.query.filter_by().paginate(1, page_size)
for N in range(pagination.pages):
fragment.reset_url(url_for("posts_list", page=N + 1))
@app.route("/posts/<int:page>")
@app.route("/", endpoint="index", defaults={"page": 1})
def posts(page):
return render_template("index.html", page=page)
### Post page
@fragment(app, cache=300)
def post_show(post_id):
post = Post.query.filter_by(id=post_id).first()
return render_template("fragments/post_show.html", post=post)
@fragment(app, cache=300)
def comments_list(post_id, page):
page = int(page)
page_size = COMMENTS_ON_PAGE
pagination = Comment.query.filter_by(post_id=post_id).paginate(page, page_size)
comments = (
Comment.query.filter_by(post_id=post_id)
.offset((page - 1) * page_size)
.limit(page_size)
.all()
)
return render_template(
"fragments/comments_list.html",
post_id=post_id,
page=page,
pagination=pagination,
comments=comments,
)
@fragment.resethandler(comments_list)
def reset_comments_list(post_id):
page_size = COMMENTS_ON_PAGE
pagination = Comment.query.filter_by(post_id=post_id).paginate(1, page_size)
for N in range(pagination.pages):
fragment.reset_url(url_for("comments_list", post_id=post_id, page=N + 1))
@app.route("/post/<int:post_id>/<int:page>", methods=["GET", "POST"])
def post(post_id, page):
form = CommentForm()
if current_user.is_authenticated() and form.validate_on_submit():
form.comment.author_id = current_user.id
form.comment.post_id = post_id
db.session.add(form.comment)
db.session.commit()
fragment.reset(posts_list)
fragment.reset(comments_list, post_id)
fragment.reset(user_info, current_user.id)
flash("Your comment has saved successfully.", "info")
return render_template("post.html", form=form, post_id=post_id, page=page)
### New Post page
@app.route("/new/post", methods=["GET", "POST"])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
form.post.author_id = current_user.id
db.session.add(form.post)
db.session.commit()
fragment.reset(posts_list)
fragment.reset(user_info, current_user.id)
flash("Your post has saved successfully.", "info")
return redirect(url_for("index"))
return render_template("newpost.html", form=form)
### Config ###
class DefaultConfig(object):
FRAGMENT_CACHING = True
SQLALCHEMY_DATABASE_URI = "sqlite:///ssiblog.db"
SECRET_KEY = "Development_Secret_Key_Must_Be_Overwritten"
### Console command ###
import sys
import os.path
PY2 = sys.version_info[0] == 2
from flask.ext.script import Manager
manager = Manager(app, with_default_commands=False)
@manager.command
def debug():
"""Runs application within debug environment."""
app.config["DEBUG"] = True
if PY2:
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(app)
app.run(debug=True)
@manager.command
def nginx_conf():
"""Creates application config for nginx."""
file_name = os.path.join(os.path.dirname(os.path.dirname(__file__)), "nginx.conf")
fragment._create_nginx_config(file_name)
@manager.command
def create_db():
"""Creates application DB."""
from models import DB
url = app.config.get("SQLALCHEMY_DATABASE_URI", "sqlite://")
if url.startswith("sqlite:////"):
path = url[10:]
if not os.path.exists(path):
os.makedirs(path)
DB.create_all()
DB.session.commit()
if __name__ == "__main__":
app.config.from_object(DefaultConfig)
manager.run()
<|endoftext|> |
<|endoftext|># -*- coding: utf-8 -*-
from tests.common import (
parent_id,
parent_name,
child_id,
child_parent_id,
relation,
child,
parent,
)
from eralchemy.main import _intermediary_to_markdown
import re
import pytest
column_re = re.compile('(?P<key>\*?)(?P<name>[^*].+) \{label:"(?P<type>.+)"\}')
def test_all_to_er():
tables = [child, parent]
relations = [relation]
output = _intermediary_to_markdown(tables, relations)
for element in relations + tables:
assert element.to_markdown() in output
def assert_column_well_rendered_to_er(col):
col_er = col.to_markdown().strip()
col_parsed = column_re.match(col_er)
assert col_parsed.group("key") == ("*" if col.is_key else "")
assert col_parsed.group("name") == col.name
assert col_parsed.group("type") == col.type
def test_column_to_er():
assert_column_well_rendered_to_er(parent_id)
assert_column_well_rendered_to_er(parent_name)
assert_column_well_rendered_to_er(child_id)
assert_column_well_rendered_to_er(child_parent_id)
def test_relation():
assert relation.to_markdown() in ["parent *--? child", "child ?--* parent"]
def assert_table_well_rendered_to_er(table):
assert table.header_markdown == "[" + table.name + "]"
table_er = table.to_markdown()
for col in table.columns:
assert col.to_markdown() in table_er
def test_table():
assert_table_well_rendered_to_er(child)
assert_table_well_rendered_to_er(parent)
<|endoftext|> |
<|endoftext|>from django.http import Http404
from django.shortcuts import render_to_response
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
###########
# CHOICES #
###########
def choice_list(request, app_label, module_name, field_name, models):
m, f = lookup_field(app_label, module_name, field_name, models)
return render_to_response("databrowse/choice_list.html", {"model": m, "field": f})
def choice_detail(request, app_label, module_name, field_name, field_val, models):
m, f = lookup_field(app_label, module_name, field_name, models)
try:
label = dict(f.field.choices)[field_val]
except KeyError:
raise Http404("Invalid choice value given")
obj_list = m.objects(**{f.field.name: field_val})
numitems = request.GET.get("items")
items_per_page = [25, 50, 100]
if numitems and numitems.isdigit() and int(numitems) > 0:
paginator = Paginator(obj_list, numitems)
else:
# fall back to default
paginator = Paginator(obj_list, items_per_page[0])
page = request.GET.get("page")
try:
obj_list_page = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
obj_list_page = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page.
obj_list_page = paginator.page(paginator.num_pages)
return render_to_response(
"databrowse/choice_detail.html",
{
"model": m,
"field": f,
"value": label,
"object_list": obj_list_page,
"items_per_page": items_per_page,
},
)
<|endoftext|> |
<|endoftext|>"""
This is testing project for KeyKeeper application.
"""
<|endoftext|> |
<|endoftext|>"""Dynamic REST (or DREST) is an extension of Django REST Framework.
DREST offers the following features on top of the standard DRF kit:
- Linked/embedded/sideloaded relationships
- Field inclusions/exlusions
- Field-based filtering/sorting
- Directory panel for the browsable API
- Optimizations
"""
<|endoftext|> |
<|endoftext|># -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0001_initial"),
("tests", "0002_auto_20160310_1052"),
]
operations = [
migrations.AddField(
model_name="user",
name="favorite_pet_id",
field=models.TextField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name="user",
name="favorite_pet_type",
field=models.ForeignKey(
blank=True, to="contenttypes.ContentType", null=True
), # noqa
preserve_default=True,
),
]
<|endoftext|> |
<|endoftext|>"""FamilySearch User submodule"""
# Python imports
# Magic
class User(object):
"""https://familysearch.org/developers/docs/api/resources#user"""
def __init__(self):
"""https://familysearch.org/developers/docs/api/examples#user"""
pass
def current_user(self):
"""https://familysearch.org/developers/docs/api/users/Current_User_resource"""
url = self.root_collection["response"]["collections"][0]["links"][
"current-user"
]["href"]
return url
def current_user_person(self):
"""https://familysearch.org/developers/docs/api/tree/Current_Tree_Person_resource"""
try:
url = self.collections["FSFT"]["response"]["collections"][0]["links"][
"current-user-person"
]["href"]
except KeyError:
self.update_collection("FSFT")
url = self.collections["FSFT"]["response"]["collections"][0]["links"][
"current-user-person"
]["href"]
return url
def agent(self, uid):
"""https://familysearch.org/developers/docs/api/users/Agent_resource"""
return self.user_base + "agents/" + uid
def current_user_history(self):
"""https://familysearch.org/developers/docs/api/users/Current_User_History_resource"""
try:
url = self.collections["FSFT"]["response"]["collections"][0]["links"][
"current-user-history"
]["href"]
except KeyError:
self.update_collection("FSFT")
url = self.collections["FSFT"]["response"]["collections"][0]["links"][
"current-user-history"
]["href"]
return url
<|endoftext|> |
<|endoftext|>"""
[Advanced] [In-development]
Export a program list to a single yaml file.
The export may contain machine specific paths.
and may need to be edited for portability
"""
from argparse import FileType
import logging
import sys
import yaml
from chalmers.utils.cli import add_selection_group, select_programs
log = logging.getLogger("chalmers.export")
def main(args):
export_data = []
programs = select_programs(args, filter_paused=False)
for prog in programs:
export_data.append({"program": dict(prog.raw_data)})
yaml.safe_dump(export_data, args.output, default_flow_style=False)
def add_parser(subparsers):
parser = subparsers.add_parser(
"export",
help='[IN DEVELOPMENT] Export current configuration to be installed with the "import" command',
description=__doc__,
)
add_selection_group(parser)
parser.add_argument("-o", "--output", type=FileType("w"), default=sys.stdout)
parser.set_defaults(main=main)
<|endoftext|> |
<|endoftext|>"""
Linux services, this module checks the existence of linux command line
programs on import
* systemd_service
* upstart_service
* sysv_service
* cron_service
In that order
"""
import logging
import platform
import sys
from . import cron_service, sysv_service, upstart_service, systemd_service
from chalmers import errors
# Fix for AWS Linux
if sys.version_info.major == 3:
system_dist = ("system",)
else:
system_dist = (b"system",)
platform._supported_dists += system_dist
log = logging.getLogger("chalmers.service")
class NoPosixSystemService(object):
def __init__(self, target_user=None):
supported_dists = platform._supported_dists + system_dist
linux = platform.linux_distribution(supported_dists=supported_dists)
raise errors.ChalmersError(
"Could not detect system service for platform %s (tried systemd, sysv init and upstart)"
% linux[0]
)
if systemd_service.check():
PosixSystemService = systemd_service.SystemdService
elif sysv_service.check():
PosixSystemService = sysv_service.SysVService
elif upstart_service.check():
PosixSystemService = upstart_service.UpstartService
else:
PosixSystemService = NoPosixSystemService
PosixLocalService = cron_service.CronService
<|endoftext|> |
<|endoftext|>import abc
import logging
import traceback
import servicemanager
import win32event, win32service, win32api
from win32serviceutil import ServiceFramework
log = logging.getLogger(__name__)
class WindowsService(object, ServiceFramework, metaclass=abc.ABCMeta):
"""
Base windows service class that provides all the nice things that a python
service needs
"""
def __init__(self, args):
try:
self._svc_name_ = args[0]
self._svc_display_name_ = args[0]
ServiceFramework.__init__(self, args)
self.stop_event = win32event.CreateEvent(None, 0, 0, None)
except Exception:
self.log("Error in WindowsService.__init__")
self.log(traceback.format_exc())
raise
def log(self, msg):
"Log to the NTEventlog"
servicemanager.LogInfoMsg(str(msg))
def sleep(self, sec):
win32api.Sleep(sec * 1000, True)
def SvcDoRun(self):
self.log("start")
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
try:
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.log("start")
self.start()
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
# self.log('wait')
# win32event.WaitForSingleObject(self.stop_event, win32event.INFINITE)
self.log("done")
except Exception:
self.log("Error in WindowsService.SvcDoRun")
self.log(traceback.format_exc())
self.SvcStop()
def SvcStop(self):
pass
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.log("stopping")
self.stop()
self.log("stopped")
win32event.SetEvent(self.stop_event)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
<|endoftext|> |
<|endoftext|># Copyright (c) 2014 Johan Burke
# Distributed under the MIT software license. See http://www.opensource.org/licenses/mit-license.php.
from ..pyelliptic.ecc import *
from ..threads.threadutils import *
from ..constants import *
from .key import *
import hashlib
from struct import *
import sys
def encodeInt(val, alphabet=ALPHABET):
base = len(alphabet)
result = ""
while val > 0:
rem = val % base
result = str(alphabet[rem]) + result
val = val // base
return result
class Address:
def __init__(self, hashValue, version=VERSION):
self.version = version
self.hashValue = hashValue
self.encodedValue = ""
def encodeVersion(self):
# return the version as a big-endian unsigned byte.
return pack(">B", self.version)
def encode(self):
a = self.encodeVersion() + self.hashValue
sha = hashlib.new("sha512")
sha.update(a)
sha.update(sha.digest())
checksum = sha.digest()[0:2]
intValue = int.from_bytes(a + checksum, "big")
# this value is in base 64
self.encodedValue = encodeInt(intValue)
def genKey():
curve = ECC()
pubKey = curve.get_pubkey()
sha = hashlib.new("sha512")
sha.update(pubKey)
ripemd = hashlib.new("ripemd160")
ripemd.update(sha.digest())
sha.update(ripemd.digest())
ripemd.update(sha.digest())
# safePrint(ripemd.digest())
a = Address(ripemd.digest())
a.encode()
key = Key(pubKey, curve.get_privkey(), a.encodedValue)
return key
<|endoftext|> |
<|endoftext|>from anymesh import AnyMesh, AnyMeshDelegateProtocol
class LeftDelegate(AnyMeshDelegateProtocol):
def connected_to(self, device_info):
print(("left connected to " + device_info.name))
def disconnected_from(self, name):
pass
def received_msg(self, message):
print(("left received message from " + message.sender))
print(("message: " + message.data["msg"]))
leftMesh.request("right", {"msg": "back at ya righty!"})
class RightDelegate(AnyMeshDelegateProtocol):
def connected_to(self, device_info):
print(("right connected to " + device_info.name))
rightMesh.request("left", {"msg": "hey lefty!"})
def disconnected_from(self, name):
pass
def received_msg(self, message):
print(("right received message from " + message.sender))
print(("message: " + message.data["msg"]))
leftMesh = AnyMesh("left", "global", LeftDelegate())
rightMesh = AnyMesh("right", "global", RightDelegate())
AnyMesh.run()
<|endoftext|> |
<|endoftext|>import unittest
import doctest
import urwid
def load_tests(loader, tests, ignore):
module_doctests = [
urwid.widget,
urwid.wimp,
urwid.decoration,
urwid.display_common,
urwid.main_loop,
urwid.monitored_list,
urwid.raw_display,
"urwid.split_repr", # override function with same name
urwid.util,
urwid.signals,
]
for m in module_doctests:
tests.addTests(
doctest.DocTestSuite(
m, optionflags=doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL
)
)
return tests
<|endoftext|> |
<|endoftext|>import logging
log = logging.getLogger(__name__)
EXCLUDED_LOG_VARS = [
"threadName",
"name",
"thread",
"created",
"process",
"processName",
"args",
"module",
"filename",
"levelno",
"exc_text",
"pathname",
"lineno",
"msg",
"exc_info",
"message",
"funcName",
"relativeCreated",
"levelname",
"msecs",
"asctime",
]
def register_logging(logger, client_config, cls):
found = False
for handler in logger.handlers:
if isinstance(handler, cls):
found = True
reg_handler = handler
if not found:
reg_handler = cls(client_config=client_config)
logger.addHandler(reg_handler)
return reg_handler
def unregister_logger(logger, handler):
logger.removeHandler(handler)
<|endoftext|> |
<|endoftext|>import uuid
import datetime
from appenlight_client.timing import get_local_storage
from appenlight_client.timing import default_timer
from appenlight_client.client import PY3
import logging
log = logging.getLogger(__name__)
class AppenlightWSGIWrapper(object):
__version__ = "0.3"
def __init__(self, app, appenlight_client):
self.app = app
self.appenlight_client = appenlight_client
def __call__(self, environ, start_response):
"""Run the application and conserve the traceback frames.
also determine if we got 404
"""
environ["appenlight.request_id"] = str(uuid.uuid4())
appenlight_storage = get_local_storage()
# clear out thread stats on request start
appenlight_storage.clear()
app_iter = None
detected_data = []
create_report = False
traceback = None
http_status = 200
start_time = default_timer()
def detect_headers(status, headers, *k, **kw):
detected_data[:] = status[:3], headers
return start_response(status, headers, *k, **kw)
# inject client instance reference to environ
if "appenlight.client" not in environ:
environ["appenlight.client"] = self.appenlight_client
# some bw. compat stubs
def local_report(message, include_traceback=True, http_status=200):
environ["appenlight.force_send"] = True
def local_log(level, message):
environ["appenlight.force_send"] = True
environ["appenlight.report"] = local_report
environ["appenlight.log"] = local_log
if "appenlight.tags" not in environ:
environ["appenlight.tags"] = {}
if "appenlight.extra" not in environ:
environ["appenlight.extra"] = {}
try:
app_iter = self.app(environ, detect_headers)
return app_iter
except Exception:
if hasattr(app_iter, "close"):
app_iter.close()
# we need that here
traceback = self.appenlight_client.get_current_traceback()
# by default reraise exceptions for app/FW to handle
if self.appenlight_client.config["reraise_exceptions"]:
raise
try:
start_response(
"500 INTERNAL SERVER ERROR",
[("Content-Type", "text/html; charset=utf-8")],
)
except Exception:
environ["wsgi.errors"].write(
"AppenlightWSGIWrapper middleware catched exception "
"in streamed response at a point where response headers "
"were already sent.\n"
)
else:
return "Server Error"
finally:
# report 500's and 404's
# report slowness
end_time = default_timer()
appenlight_storage.thread_stats["main"] = end_time - start_time
delta = datetime.timedelta(seconds=(end_time - start_time))
stats, slow_calls = appenlight_storage.get_thread_stats()
if "appenlight.view_name" not in environ:
environ["appenlight.view_name"] = getattr(
appenlight_storage, "view_name", ""
)
if detected_data and detected_data[0]:
http_status = int(detected_data[0])
if self.appenlight_client.config["slow_requests"] and not environ.get(
"appenlight.ignore_slow"
):
# do we have slow calls/request ?
if (
delta >= self.appenlight_client.config["slow_request_time"]
or slow_calls
):
create_report = True
if "appenlight.__traceback" in environ and not environ.get(
"appenlight.ignore_error"
):
# get traceback gathered by pyramid tween
traceback = environ["appenlight.__traceback"]
del environ["appenlight.__traceback"]
http_status = 500
create_report = True
if (
traceback
and self.appenlight_client.config["report_errors"]
and not environ.get("appenlight.ignore_error")
):
http_status = 500
create_report = True
elif self.appenlight_client.config["report_404"] and http_status == 404:
create_report = True
if create_report:
self.appenlight_client.py_report(
environ,
traceback,
message=None,
http_status=http_status,
start_time=datetime.datetime.utcfromtimestamp(start_time),
end_time=datetime.datetime.utcfromtimestamp(end_time),
request_stats=stats,
slow_calls=slow_calls,
)
# dereference
del traceback
self.appenlight_client.save_request_stats(
stats, view_name=environ.get("appenlight.view_name", "")
)
if self.appenlight_client.config["logging"]:
records = self.appenlight_client.log_handlers_get_records()
self.appenlight_client.log_handlers_clear_records()
self.appenlight_client.py_log(
environ,
records=records,
r_uuid=environ["appenlight.request_id"],
created_report=create_report,
)
# send all data we gathered immediately at the end of request
self.appenlight_client.check_if_deliver(
self.appenlight_client.config["force_send"]
or environ.get("appenlight.force_send")
)
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
# Programmer: Chris Bunch (chris@appscale.com)
# General-purpose Python library imports
import json
import os
import re
import shutil
import subprocess
import sys
import unittest
import yaml
# Third party testing libraries
import boto.ec2
from flexmock import flexmock
# AppScale import, the library that we're testing here
lib = os.path.dirname(__file__) + os.sep + ".." + os.sep + "lib"
sys.path.append(lib)
from agents.ec2_agent import EC2Agent
from appscale import AppScale
from appscale_tools import AppScaleTools
from custom_exceptions import AppScaleException
from custom_exceptions import AppScalefileException
from custom_exceptions import BadConfigurationException
from local_state import LocalState
from remote_helper import RemoteHelper
class TestAppScale(unittest.TestCase):
def setUp(self):
os.environ["EC2_ACCESS_KEY"] = ""
os.environ["EC2_SECRET_KEY"] = ""
def tearDown(self):
os.environ["EC2_ACCESS_KEY"] = ""
os.environ["EC2_SECRET_KEY"] = ""
def addMockForNoAppScalefile(self, appscale):
flexmock(os)
os.should_receive("getcwd").and_return("/boo")
mock = flexmock(sys.modules["__builtin__"])
mock.should_call("open") # set the fall-through
(
mock.should_receive("open")
.with_args("/boo/" + appscale.APPSCALEFILE)
.and_raise(IOError)
)
def addMockForAppScalefile(self, appscale, contents):
flexmock(os)
os.should_receive("getcwd").and_return("/boo")
mock = flexmock(sys.modules["__builtin__"])
mock.should_call("open") # set the fall-through
(
mock.should_receive("open")
.with_args("/boo/" + appscale.APPSCALEFILE)
.and_return(flexmock(read=lambda: contents))
)
return mock
def test_get_nodes(self):
appscale = flexmock(AppScale())
builtin = flexmock(sys.modules["__builtin__"])
builtin.should_call("open")
nodes = [{"public_ip": "blarg"}]
appscale_yaml = {"keyname": "boo"}
appscale.should_receive("get_locations_json_file").and_return("locations.json")
# If the locations JSON file exists, it should return the locations as a
# dictionary.
builtin.should_receive("open").with_args("locations.json").and_return(
flexmock(read=lambda: json.dumps(nodes))
)
self.assertEqual(nodes, appscale.get_nodes(appscale_yaml["keyname"]))
# If the locations JSON file does not exist, it should throw an
# AppScaleException.
builtin.should_receive("open").with_args("locations.json").and_raise(IOError)
with self.assertRaises(AppScaleException):
appscale.get_nodes(appscale_yaml["keyname"])
def test_get_head_node(self):
shadow_node_1 = {"public_ip": "public2", "jobs": ["shadow"]}
appengine_node = {"public_ip": "public1", "jobs": ["appengine"]}
shadow_node_2 = {"public_ip": "public3", "jobs": ["shadow"]}
appscale = AppScale()
# If the list of nodes does not have a node with the shadow role, the
# tools should raise an AppScaleException.
with self.assertRaises(AppScaleException):
appscale.get_head_node([appengine_node])
# If the list of nodes contains any nodes with the shadow role, the tools
# should return the public IP address of the first node which has that
# role.
self.assertEqual(
shadow_node_1["public_ip"],
appscale.get_head_node([shadow_node_1, appengine_node, shadow_node_2]),
)
def testInitWithNoAppScalefile(self):
# calling 'appscale init cloud' if there's no AppScalefile in the local
# directory should write a new cloud config file there
appscale = AppScale()
flexmock(os)
os.should_receive("getcwd").and_return("/boo")
flexmock(os.path)
os.path.should_receive("exists").with_args(
"/boo/" + appscale.APPSCALEFILE
).and_return(False)
# mock out the actual writing of the template file
flexmock(shutil)
shutil.should_receive("copy").with_args(
appscale.TEMPLATE_CLOUD_APPSCALEFILE, "/boo/" + appscale.APPSCALEFILE
).and_return()
appscale.init("cloud")
def testInitWithAppScalefile(self):
# calling 'appscale init cloud' if there is an AppScalefile in the local
# directory should throw up and die
appscale = AppScale()
flexmock(os)
os.should_receive("getcwd").and_return("/boo")
flexmock(os.path)
os.path.should_receive("exists").with_args(
"/boo/" + appscale.APPSCALEFILE
).and_return(True)
self.assertRaises(AppScalefileException, appscale.init, "cloud")
def testUpWithNoAppScalefile(self):
# calling 'appscale up' if there is no AppScalefile present
# should throw up and die
appscale = AppScale()
self.addMockForNoAppScalefile(appscale)
self.assertRaises(AppScalefileException, appscale.up)
def testUpWithClusterAppScalefile(self):
# calling 'appscale up' if there is an AppScalefile present
# should call appscale-run-instances with the given config
# params. here, we assume that the file is intended for use
# on a virtualized cluster
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"ips_layout": {
"master": "ip1",
"appengine": "ip1",
"database": "ip2",
"zookeeper": "ip2",
},
"keyname": "boobazblarg",
"group": "boobazblarg",
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
flexmock(os.path)
os.path.should_call("exists")
os.path.should_receive("exists").with_args(
"/boo/" + appscale.APPSCALEFILE
).and_return(True)
# for this test, let's say that we don't have an SSH key already
# set up for ip1 and ip2
# TODO(cgb): Add in tests where we have a key for ip1 but not ip2,
# and the case where we have a key but it doesn't work
key_path = os.path.expanduser("~/.appscale/boobazblarg.key")
os.path.should_receive("exists").with_args(key_path).and_return(False)
# finally, mock out the actual appscale tools calls. since we're running
# via a cluster, this means we call add-keypair to set up SSH keys, then
# run-instances to start appscale
flexmock(AppScaleTools)
AppScaleTools.should_receive("add_keypair")
AppScaleTools.should_receive("run_instances")
appscale.up()
def testUpWithMalformedClusterAppScalefile(self):
# if we try to use an IPs layout that isn't a dictionary, we should throw up
# and die
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file, with an IPs layout that is a str
contents = {
"ips_layout": "'master' 'ip1' 'appengine' 'ip1'",
"keyname": "boobazblarg",
"group": "boobazblarg",
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
flexmock(os.path)
os.path.should_call("exists")
os.path.should_receive("exists").with_args(
"/boo/" + appscale.APPSCALEFILE
).and_return(True)
# finally, mock out the actual appscale tools calls. since we're running
# via a cluster, this means we call add-keypair to set up SSH keys, then
# run-instances to start appscale
flexmock(AppScaleTools)
AppScaleTools.should_receive("add_keypair")
self.assertRaises(BadConfigurationException, appscale.up)
def testUpWithCloudAppScalefile(self):
# calling 'appscale up' if there is an AppScalefile present
# should call appscale-run-instances with the given config
# params. here, we assume that the file is intended for use
# on EC2
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"infrastructure": "ec2",
"machine": "ami-ABCDEFG",
"keyname": "bookey",
"group": "boogroup",
"min": 1,
"max": 1,
"zone": "my-zone-1b",
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
flexmock(os.path)
os.path.should_call("exists")
os.path.should_receive("exists").with_args(
"/boo/" + appscale.APPSCALEFILE
).and_return(True)
# throw in some mocks for the argument parsing
for credential in EC2Agent.REQUIRED_CREDENTIALS:
os.environ[credential] = "baz"
# finally, pretend that our ec2 zone and image exists
fake_ec2 = flexmock(name="fake_ec2")
fake_ec2.should_receive("get_all_instances")
fake_ec2.should_receive("get_all_zones").with_args("my-zone-1b").and_return(
"anything"
)
fake_ec2.should_receive("get_image").with_args("ami-ABCDEFG").and_return()
flexmock(boto.ec2)
boto.ec2.should_receive("connect_to_region").with_args(
"my-zone-1", aws_access_key_id="baz", aws_secret_access_key="baz"
).and_return(fake_ec2)
# finally, mock out the actual appscale-run-instances call
flexmock(AppScaleTools)
AppScaleTools.should_receive("run_instances")
appscale.up()
def testUpWithEC2EnvironmentVariables(self):
# if the user wants us to use their EC2 credentials when running AppScale,
# we should make sure they get set
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"infrastructure": "ec2",
"machine": "ami-ABCDEFG",
"keyname": "bookey",
"group": "boogroup",
"min": 1,
"max": 1,
"EC2_ACCESS_KEY": "access key",
"EC2_SECRET_KEY": "secret key",
"zone": "my-zone-1b",
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
flexmock(os.path)
os.path.should_call("exists")
os.path.should_receive("exists").with_args(
"/boo/" + appscale.APPSCALEFILE
).and_return(True)
# finally, pretend that our ec2 zone/image to use exist
fake_ec2 = flexmock(name="fake_ec2")
fake_ec2.should_receive("get_all_instances")
fake_ec2.should_receive("get_all_zones").with_args("my-zone-1b").and_return(
"anything"
)
fake_ec2.should_receive("get_image").with_args("ami-ABCDEFG").and_return()
flexmock(boto.ec2)
boto.ec2.should_receive("connect_to_region").with_args(
"my-zone-1",
aws_access_key_id="access key",
aws_secret_access_key="secret key",
).and_return(fake_ec2)
# finally, mock out the actual appscale-run-instances call
flexmock(AppScaleTools)
AppScaleTools.should_receive("run_instances")
appscale.up()
self.assertEqual("access key", os.environ["EC2_ACCESS_KEY"])
self.assertEqual("secret key", os.environ["EC2_SECRET_KEY"])
def testSshWithNoAppScalefile(self):
# calling 'appscale ssh' with no AppScalefile in the local
# directory should throw up and die
appscale = AppScale()
self.addMockForNoAppScalefile(appscale)
self.assertRaises(AppScalefileException, appscale.ssh, 1)
def testSshWithNotIntArg(self):
# calling 'appscale ssh not-int' should throw up and die
appscale = AppScale()
self.addMockForAppScalefile(appscale, "")
self.assertRaises(TypeError, appscale.ssh, "boo")
def testSshWithNoNodesJson(self):
# calling 'appscale ssh' when there isn't a locations.json
# file should throw up and die
appscale = AppScale()
contents = {"keyname": "boo"}
yaml_dumped_contents = yaml.dump(contents)
mock = self.addMockForAppScalefile(appscale, yaml_dumped_contents)
(
mock.should_receive("open")
.with_args(appscale.get_locations_json_file("boo"))
.and_raise(IOError)
)
self.assertRaises(AppScaleException, appscale.ssh, 0)
def testSshWithIndexOutOfBounds(self):
# calling 'appscale ssh 1' should ssh to the second node
# (nodes[1]). If there's only one node in this deployment,
# we should throw up and die
appscale = AppScale()
contents = {"keyname": "boo"}
yaml_dumped_contents = yaml.dump(contents)
one = {"public_ip": "blarg"}
nodes = [one]
nodes_contents = json.dumps(nodes)
mock = self.addMockForAppScalefile(appscale, yaml_dumped_contents)
(
mock.should_receive("open")
.with_args(appscale.get_locations_json_file("boo"))
.and_return(flexmock(read=lambda: nodes_contents))
)
self.assertRaises(AppScaleException, appscale.ssh, 1)
def testSshWithIndexInBounds(self):
# calling 'appscale ssh 1' should ssh to the second node
# (nodes[1]). If there are two nodes in this deployment,
# we should ssh into it successfully
appscale = AppScale()
contents = {"keyname": "boo"}
yaml_dumped_contents = yaml.dump(contents)
one = {"public_ip": "blarg"}
two = {"public_ip": "blarg2"}
nodes = [one, two]
nodes_contents = json.dumps(nodes)
mock = self.addMockForAppScalefile(appscale, yaml_dumped_contents)
(
mock.should_receive("open")
.with_args(appscale.get_locations_json_file("boo"))
.and_return(flexmock(read=lambda: nodes_contents))
)
flexmock(subprocess)
subprocess.should_receive("call").with_args(
[
"ssh",
"-o",
"StrictHostkeyChecking=no",
"-i",
appscale.get_key_location("boo"),
"root@blarg2",
]
).and_return().once()
appscale.ssh(1)
def testStatusWithNoAppScalefile(self):
# calling 'appscale status' with no AppScalefile in the local
# directory should throw up and die
appscale = AppScale()
self.addMockForNoAppScalefile(appscale)
self.assertRaises(AppScalefileException, appscale.status)
def testStatusWithCloudAppScalefile(self):
# calling 'appscale status' with an AppScalefile in the local
# directory should collect any parameters needed for the
# 'appscale-describe-instances' command and then exec it
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"infrastructure": "ec2",
"machine": "ami-ABCDEFG",
"keyname": "bookey",
"group": "boogroup",
"verbose": True,
"min": 1,
"max": 1,
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
# finally, mock out the actual appscale-describe-instances call
flexmock(AppScaleTools)
AppScaleTools.should_receive("describe_instances")
appscale.status()
def testDeployWithNoAppScalefile(self):
# calling 'appscale deploy' with no AppScalefile in the local
# directory should throw up and die
appscale = AppScale()
self.addMockForNoAppScalefile(appscale)
app = "/bar/app"
self.assertRaises(AppScalefileException, appscale.deploy, app)
def testDeployWithCloudAppScalefile(self):
# calling 'appscale deploy app' with an AppScalefile in the local
# directory should collect any parameters needed for the
# 'appscale-upload-app' command and then exec it
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"infrastructure": "ec2",
"machine": "ami-ABCDEFG",
"keyname": "bookey",
"group": "boogroup",
"verbose": True,
"min": 1,
"max": 1,
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
# finally, mock out the actual appscale-run-instances call
fake_port = 8080
fake_host = "fake_host"
flexmock(AppScaleTools)
AppScaleTools.should_receive("upload_app").and_return((fake_host, fake_port))
app = "/bar/app"
(host, port) = appscale.deploy(app)
self.assertEqual(fake_host, host)
self.assertEqual(fake_port, port)
def testUndeployWithNoAppScalefile(self):
# calling 'appscale undeploy' with no AppScalefile in the local
# directory should throw up and die
appscale = AppScale()
self.addMockForNoAppScalefile(appscale)
appid = "barapp"
self.assertRaises(AppScalefileException, appscale.undeploy, appid)
def testUndeployWithCloudAppScalefile(self):
# calling 'appscale undeploy app' with an AppScalefile in the local
# directory should collect any parameters needed for the
# 'appscale-remove-app' command and then exec it
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"infrastructure": "ec2",
"machine": "ami-ABCDEFG",
"keyname": "bookey",
"group": "boogroup",
"verbose": True,
"min": 1,
"max": 1,
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
# finally, mock out the actual appscale-run-instances call
flexmock(AppScaleTools)
AppScaleTools.should_receive("remove_app")
app = "barapp"
appscale.undeploy(app)
def testDeployWithCloudAppScalefileAndTestFlag(self):
# same as before, but with the 'test' flag in our AppScalefile
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"infrastructure": "ec2",
"machine": "ami-ABCDEFG",
"keyname": "bookey",
"group": "boogroup",
"verbose": True,
"min": 1,
"max": 1,
"test": True,
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
# finally, mock out the actual appscale-run-instances call
fake_port = 8080
fake_host = "fake_host"
flexmock(AppScaleTools)
AppScaleTools.should_receive("upload_app").and_return((fake_host, fake_port))
app = "/bar/app"
(host, port) = appscale.deploy(app)
self.assertEqual(fake_host, host)
self.assertEqual(fake_port, port)
def testTailWithNoAppScalefile(self):
# calling 'appscale tail' with no AppScalefile in the local
# directory should throw up and die
appscale = AppScale()
self.addMockForNoAppScalefile(appscale)
self.assertRaises(AppScalefileException, appscale.tail, 0, "")
def testTailWithNotIntArg(self):
# calling 'appscale tail not-int *' should throw up and die
appscale = AppScale()
self.addMockForAppScalefile(appscale, "")
self.assertRaises(TypeError, appscale.tail, "boo", "")
def testTailWithNoNodesJson(self):
# calling 'appscale tail' when there isn't a locations.json
# file should throw up and die
appscale = AppScale()
contents = {"keyname": "boo"}
yaml_dumped_contents = yaml.dump(contents)
mock = self.addMockForAppScalefile(appscale, yaml_dumped_contents)
(
mock.should_receive("open")
.with_args(appscale.get_locations_json_file("boo"))
.and_raise(IOError)
)
self.assertRaises(AppScaleException, appscale.tail, 0, "")
def testTailWithIndexOutOfBounds(self):
# calling 'appscale tail 1 *' should tail from the second node
# (nodes[1]). If there's only one node in this deployment,
# we should throw up and die
appscale = AppScale()
contents = {"keyname": "boo"}
yaml_dumped_contents = yaml.dump(contents)
one = {"public_ip": "blarg"}
nodes = [one]
nodes_contents = json.dumps(nodes)
mock = self.addMockForAppScalefile(appscale, yaml_dumped_contents)
(
mock.should_receive("open")
.with_args(appscale.get_locations_json_file("boo"))
.and_return(flexmock(read=lambda: nodes_contents))
)
self.assertRaises(AppScaleException, appscale.tail, 1, "")
def testTailWithIndexInBounds(self):
# calling 'appscale tail 1 *' should tail from the second node
# (nodes[1]). If there are two nodes in this deployment,
# we should tail from it successfully
appscale = AppScale()
contents = {"keyname": "boo"}
yaml_dumped_contents = yaml.dump(contents)
one = {"public_ip": "blarg"}
two = {"public_ip": "blarg2"}
nodes = [one, two]
nodes_contents = json.dumps(nodes)
mock = self.addMockForAppScalefile(appscale, yaml_dumped_contents)
(
mock.should_receive("open")
.with_args(appscale.get_locations_json_file("boo"))
.and_return(flexmock(read=lambda: nodes_contents))
)
flexmock(subprocess)
subprocess.should_receive("call").with_args(
[
"ssh",
"-o",
"StrictHostkeyChecking=no",
"-i",
appscale.get_key_location("boo"),
"root@blarg2",
"tail -F /var/log/appscale/c*",
]
).and_return().once()
appscale.tail(1, "c*")
def testGetLogsWithNoAppScalefile(self):
# calling 'appscale logs' with no AppScalefile in the local
# directory should throw up and die
appscale = AppScale()
self.addMockForNoAppScalefile(appscale)
self.assertRaises(AppScalefileException, appscale.logs, "")
def testGetLogsWithKeyname(self):
# calling 'appscale logs dir' with a keyname should produce
# a command to exec with the --keyname flag
appscale = AppScale()
contents = {"keyname": "boo"}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
# mock out the actual call to appscale-gather-logs
flexmock(AppScaleTools)
AppScaleTools.should_receive("run_instances")
self.assertRaises(BadConfigurationException, appscale.logs, "/baz")
def testRelocateWithNoAppScalefile(self):
# calling 'appscale relocate' with no AppScalefile in the local directory
# should throw up and die
appscale = AppScale()
self.addMockForNoAppScalefile(appscale)
self.assertRaises(AppScalefileException, appscale.relocate, "myapp", 80, 443)
def testRelocateWithAppScalefile(self):
# calling 'appscale relocate' with an AppScalefile in the local
# directory should collect any parameters needed for the
# 'appscale-relocate-app' command and then exec it
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"infrastructure": "ec2",
"machine": "ami-ABCDEFG",
"keyname": "bookey",
"group": "boogroup",
"verbose": True,
"min": 1,
"max": 1,
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
# finally, mock out the actual appscale-relocate-app call
flexmock(AppScaleTools)
AppScaleTools.should_receive("relocate_app")
appscale.relocate("myapp", 80, 443)
def testGetPropertyWithNoAppScalefile(self):
# calling 'appscale get' with no AppScalefile in the local directory
# should throw up and die
appscale = AppScale()
self.addMockForNoAppScalefile(appscale)
self.assertRaises(AppScalefileException, appscale.get, ".*")
def testGetPropertyWithAppScalefile(self):
# calling 'appscale get' with an AppScalefile in the local
# directory should collect any parameters needed for the
# 'appscale-get-property' command and then exec it
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"infrastructure": "ec2",
"machine": "ami-ABCDEFG",
"keyname": "bookey",
"group": "boogroup",
"verbose": True,
"min": 1,
"max": 1,
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
# finally, mock out the actual appscale-get-property call
flexmock(AppScaleTools)
AppScaleTools.should_receive("get_property")
appscale.get(".*")
def testSetPropertyWithNoAppScalefile(self):
# calling 'appscale set' with no AppScalefile in the local directory
# should throw up and die
appscale = AppScale()
self.addMockForNoAppScalefile(appscale)
self.assertRaises(AppScalefileException, appscale.set, "key", "value")
def testSetPropertyWithAppScalefile(self):
# calling 'appscale set' with an AppScalefile in the local
# directory should collect any parameters needed for the
# 'appscale-get-property' command and then exec it
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"infrastructure": "ec2",
"machine": "ami-ABCDEFG",
"keyname": "bookey",
"group": "boogroup",
"verbose": True,
"min": 1,
"max": 1,
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
# finally, mock out the actual appscale-set-property call
flexmock(AppScaleTools)
AppScaleTools.should_receive("set_property")
appscale.set("key", "value")
def testDestroyWithNoAppScalefile(self):
# calling 'appscale destroy' with no AppScalefile in the local
# directory should throw up and die
appscale = AppScale()
self.addMockForNoAppScalefile(appscale)
self.assertRaises(AppScalefileException, appscale.destroy)
def testDestroyWithCloudAppScalefile(self):
# calling 'appscale destroy' with an AppScalefile in the local
# directory should collect any parameters needed for the
# 'appscale-terminate-instances' command and then exec it
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"infrastructure": "ec2",
"machine": "ami-ABCDEFG",
"keyname": "bookey",
"group": "boogroup",
"verbose": True,
"min": 1,
"max": 1,
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
# finally, mock out the actual appscale-terminate-instances call
flexmock(AppScaleTools)
AppScaleTools.should_receive("terminate_instances")
appscale.destroy()
def testDestroyWithEC2EnvironmentVariables(self):
# if the user wants us to use their EC2 credentials when running AppScale,
# we should make sure they get set
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"infrastructure": "ec2",
"machine": "ami-ABCDEFG",
"keyname": "bookey",
"group": "boogroup",
"min": 1,
"max": 1,
"EC2_ACCESS_KEY": "access key",
"EC2_SECRET_KEY": "secret key",
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
# finally, mock out the actual appscale-terminate-instances call
flexmock(AppScaleTools)
AppScaleTools.should_receive("terminate_instances")
appscale.destroy()
self.assertEqual("access key", os.environ["EC2_ACCESS_KEY"])
self.assertEqual("secret key", os.environ["EC2_SECRET_KEY"])
def testCleanWithNoAppScalefile(self):
# calling 'appscale clean' with no AppScalefile in the local
# directory should throw up and die
appscale = AppScale()
self.addMockForNoAppScalefile(appscale)
self.assertRaises(AppScalefileException, appscale.clean)
def testCleanInCloudDeployment(self):
# calling 'appscale clean' in a cloud deployment should throw up and die
appscale = AppScale()
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"infrastructure": "ec2",
"machine": "ami-ABCDEFG",
"keyname": "bookey",
"group": "boogroup",
"verbose": True,
"min": 1,
"max": 1,
}
yaml_dumped_contents = yaml.dump(contents)
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
self.assertRaises(BadConfigurationException, appscale.clean)
def testCleanInClusterDeployment(self):
# calling 'appscale clean' in a cluster deployment should ssh into each of
# the boxes specified in the ips_layout and run the terminate script
# Mock out the actual file reading itself, and slip in a YAML-dumped
# file
contents = {
"ips_layout": {"controller": "public1", "servers": ["public2", "public3"]},
"test": True,
}
yaml_dumped_contents = yaml.dump(contents)
flexmock(RemoteHelper)
RemoteHelper.should_receive("ssh").with_args(
re.compile("public[123]"), "appscale", str, False
)
flexmock(LocalState)
LocalState.should_receive("cleanup_appscale_files").with_args("appscale")
appscale = AppScale()
self.addMockForAppScalefile(appscale, yaml_dumped_contents)
expected = ["public1", "public2", "public3"]
self.assertEqual(expected, appscale.clean())
<|endoftext|> |
<|endoftext|>""" Backup & Recovery helper functions. """
import logging
import os
import re
import shutil
import SOAPpy
import statvfs
import sys
import tarfile
import time
from os.path import getsize
import backup_exceptions
import backup_recovery_constants
import gcs_helper
from backup_recovery_constants import APP_BACKUP_DIR_LOCATION
from backup_recovery_constants import APP_DIR_LOCATION
from backup_recovery_constants import BACKUP_DIR_LOCATION
from backup_recovery_constants import BACKUP_ROLLBACK_SUFFIX
from backup_recovery_constants import StorageTypes
sys.path.append(os.path.join(os.path.dirname(__file__), "../../lib"))
import appscale_info
from constants import APPSCALE_DATA_DIR
from google.appengine.api.appcontroller_client import AppControllerClient
# The port that the SOAP server listens to.
UA_SERVER_PORT = 4343
def delete_local_backup_file(local_file):
"""Removes the local backup file.
Args:
local_file: A str, the path to the backup file to delete.
"""
if not remove(local_file):
logging.warning(
"No local backup file '{0}' to delete. " "Skipping...".format(local_file)
)
def delete_secondary_backup(base_path):
"""Deletes the secondary backup if it exists, upon successful backup.
Args:
base_path: A str, the full path of the backup file without the secondary
suffix.
"""
if not remove("{0}{1}".format(base_path, BACKUP_ROLLBACK_SUFFIX)):
logging.warning("No secondary backup to remove. Skipping...")
def does_file_exist(path):
"""Checks if the given file is in the local filesystem.
Args:
path: A str, the path to the file.
Returns:
True on success, False otherwise.
"""
return os.path.isfile(path)
def enough_disk_space(service):
"""Checks if there's enough available disk space for a new backup.
Returns:
True on success, False otherwise.
"""
available_space = get_available_disk_space()
logging.debug("Available space: {0}".format(available_space))
backup_size = get_backup_size(service)
logging.debug("Backup size: {0}".format(backup_size))
if backup_size > available_space * backup_recovery_constants.PADDING_PERCENTAGE:
logging.warning("Not enough space for a backup.")
return False
return True
def get_available_disk_space():
"""Returns the amount of available disk space under /opt/appscale.
Returns:
An int, the available disk space in bytes.
"""
stat_struct = os.statvfs(os.path.dirname(BACKUP_DIR_LOCATION))
return stat_struct[statvfs.F_BAVAIL] * stat_struct[statvfs.F_BSIZE]
def get_backup_size(service):
"""Sums up the size of the snapshot files that consist the backup for the
given service.
Args:
service: A str, the service for which we'll calculate the backup size.
Returns:
An int, the total size of the files consisting the backup in bytes.
"""
backup_files = get_snapshot_paths(service)
total_size = sum(getsize(file) for file in backup_files)
return total_size
def get_snapshot_paths(service):
"""Returns a list of file names holding critical data for the given service.
Args:
service: A str, the service for which we're getting the data files.
Currently there is support for Cassandra and Zookeeper.
Returns:
A list of full paths.
"""
file_list = []
if service != "cassandra":
return file_list
look_for = "snapshots"
data_dir = "{0}/{1}".format(APPSCALE_DATA_DIR, service)
for full_path, _, file in os.walk(data_dir):
if look_for in full_path:
file_list.append(full_path)
logging.debug("List of data paths for '{0}': {1}".format(service, file_list))
return file_list
def move_secondary_backup(base_path):
"""Moves the secondary backup back in place, if it exists, upon an un
successful backup attempt.
Args:
base_path: A str, the final full path of the backup file after this move.
"""
source = "{0}{1}".format(base_path, BACKUP_ROLLBACK_SUFFIX)
target = base_path
if not rename(source, target):
logging.warning("No secondary backup to restore. Skipping...")
def mkdir(path):
"""Creates a dir with the given path.
Args:
path: A str, the name of the dir to create.
Returns:
True on success, False otherwise.
"""
try:
os.mkdir(path)
except OSError:
logging.error("OSError while creating dir '{0}'".format(path))
return False
return True
def makedirs(path):
"""Creates a dir with the given path and all directories in between.
Args:
path: A str, the name of the dir to create.
Returns:
True on success, False otherwise.
"""
try:
os.makedirs(path)
except OSError:
logging.error("OSError while creating dir '{0}'".format(path))
return False
return True
def rename(source, destination):
"""Renames source file into destination.
Args:
source: A str, the path of the file to rename.
destination: A str, the destination path.
Returns:
True on success, False otherwise.
"""
try:
os.rename(source, destination)
except OSError:
logging.error(
"OSError while renaming '{0}' to '{1}'".format(source, destination)
)
return False
return True
def remove(path):
"""Deletes the given file from the filesystem.
Args:
path: A str, the path of the file to delete.
Returns:
True on success, False otherwise.
"""
try:
os.remove(path)
except OSError:
logging.error("OSError while deleting '{0}'".format(path))
return False
return True
def tar_backup_files(file_paths, target):
"""Tars all snapshot files for a given snapshot name.
Args:
file_paths: A list of files to tar up.
target: A str, the full path to the tar file to be created.
Returns:
The path to the tar file, None otherwise.
"""
backup_file_location = target
# Rename previous backup, if it exists.
if not rename(
backup_file_location,
"{0}{1}".format(backup_file_location, BACKUP_ROLLBACK_SUFFIX),
):
logging.warning(
"'{0}' not found. Skipping file rename...".format(backup_file_location)
)
# Tar up the backup files.
tar = tarfile.open(backup_file_location, "w")
for name in file_paths:
tar.add(name)
tar.close()
return backup_file_location
def untar_backup_files(source):
"""Restores a previous backup into the Cassandra directory structure
from a tar ball.
Args:
source: A str, the path to the backup tar.
Raises:
BRException: On untar issues.
"""
logging.info("Untarring backup file '{0}'...".format(source))
try:
tar = tarfile.open(source, "r:gz")
tar.extractall(path="/")
tar.close()
except tarfile.TarError as tar_error:
logging.exception(tar_error)
raise backup_exceptions.BRException(
"Exception while untarring backup file '{0}'.".format(source)
)
logging.info("Done untarring '{0}'.".format(source))
def app_backup(storage, full_bucket_name=None):
"""Saves the app source code at the backups location on the filesystem.
Args:
storage: A str, one of the StorageTypes class members.
full_bucket_name: A str, the name of the backup file to upload to remote
storage.
Returns:
True on success, False otherwise.
"""
# Create app backups dir if it doesn't exist.
if not makedirs(APP_BACKUP_DIR_LOCATION):
logging.warning(
"Dir '{0}' already exists. Skipping dir creation...".format(
APP_BACKUP_DIR_LOCATION
)
)
for dir_path, _, filenames in os.walk(APP_DIR_LOCATION):
for filename in filenames:
# Copy source code tars to backups location.
source = "{0}/{1}".format(dir_path, filename)
destination = "{0}/{1}".format(APP_BACKUP_DIR_LOCATION, filename)
try:
shutil.copy(source, destination)
except:
logging.error("Error while backing up '{0}'. ".format(source))
delete_app_tars(APP_BACKUP_DIR_LOCATION)
return False
# Upload to GCS.
if storage == StorageTypes.GCS:
source = "{0}/{1}".format(APP_DIR_LOCATION, filename)
destination = "{0}/apps/{1}".format(full_bucket_name, filename)
logging.debug("Destination: {0}".format(destination))
if not gcs_helper.upload_to_bucket(destination, source):
logging.error("Error while uploading '{0}' to GCS. ".format(source))
delete_app_tars(APP_BACKUP_DIR_LOCATION)
return False
return True
def app_restore(storage, bucket_name=None):
"""Restores the app source code from the backups location on the filesystem.
Args:
storage: A str, one of the StorageTypes class members.
bucket_name: A str, the name of the bucket to restore apps from.
Returns:
True on success, False otherwise.
"""
# Create app backups dir if it doesn't exist.
if not makedirs(APP_BACKUP_DIR_LOCATION):
logging.warning(
"Dir '{0}' already exists. Skipping dir creation...".format(
APP_BACKUP_DIR_LOCATION
)
)
# Download from GCS to backups location.
if storage == StorageTypes.GCS:
objects = gcs_helper.list_bucket(bucket_name)
for app_path in objects:
if not app_path.startswith(gcs_helper.APPS_GCS_PREFIX):
continue
# Only keep the relative name of the app file.
# E.g. myapp.tar.gz (app_file) out of apps/myapp.tar.gz (app_path)
app_file = app_path[len(gcs_helper.APPS_GCS_PREFIX) :]
source = "gs://{0}/{1}".format(bucket_name, app_path)
destination = "{0}/{1}".format(APP_BACKUP_DIR_LOCATION, app_file)
if not gcs_helper.download_from_bucket(source, destination):
logging.error("Error while downloading '{0}' from GCS.".format(source))
delete_app_tars(APP_BACKUP_DIR_LOCATION)
return False
# Deploy apps.
apps_to_deploy = [
os.path.join(APP_BACKUP_DIR_LOCATION, app)
for app in os.listdir(APP_BACKUP_DIR_LOCATION)
]
if not deploy_apps(apps_to_deploy):
logging.error(
"Failed to successfully deploy one or more of the "
"following apps: {0}".format(apps_to_deploy)
)
return False
return True
def delete_app_tars(location):
"""Deletes applications tars from the designated location.
Args:
location: A str, the path to the application tar(s) to be deleted.
Returns:
True on success, False otherwise.
"""
for dir_path, _, filenames in os.walk(location):
for filename in filenames:
if not remove("{0}/{1}".format(dir_path, filename)):
return False
return True
def deploy_apps(app_paths):
"""Deploys all apps that reside in /opt/appscale/apps.
Args:
app_paths: A list of the full paths of the apps to be deployed.
Returns:
True on success, False otherwise.
"""
uaserver = SOAPpy.SOAPProxy(
"https://{0}:{1}".format(appscale_info.get_db_master_ip(), UA_SERVER_PORT)
)
acc = AppControllerClient(appscale_info.get_login_ip(), appscale_info.get_secret())
# Wait for Cassandra to come up after a restore.
time.sleep(15)
for app_path in app_paths:
# Extract app ID.
app_id = app_path[app_path.rfind("/") + 1 : app_path.find(".")]
if not app_id:
logging.error(
"Malformed source code archive. Cannot complete "
"application recovery for '{}'. Aborting...".format(app_path)
)
return False
# Retrieve app admin via uaserver.
app_data = uaserver.get_app_data(app_id, appscale_info.get_secret())
app_admin_re = re.search("\napp_owner:(.+)\n", app_data)
if app_admin_re:
app_admin = app_admin_re.group(1)
else:
logging.error(
"Missing application data. Cannot complete application "
"recovery for '{}'. Aborting...".format(app_id)
)
return False
file_suffix = re.search("\.(.*)\Z", app_path).group(1)
logging.warning(
"Restoring app '{}', from '{}', with owner '{}'.".format(
app_id, app_path, app_admin
)
)
acc.upload_app(app_path, file_suffix, app_admin)
return True
<|endoftext|> |
<|endoftext|>""" This process grooms the datastore cleaning up old state and
calculates datastore statistics. Removes tombstoned items for garbage
collection.
"""
import datetime
import logging
import os
import random
import re
import sys
import threading
import time
import appscale_datastore_batch
import dbconstants
import datastore_server
import entity_utils
from zkappscale import zktransaction as zk
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore_distributed
from google.appengine.api.memcache import memcache_distributed
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import entity_pb
from google.appengine.datastore.datastore_query import Cursor
from google.appengine.ext import db
from google.appengine.ext.db import stats
from google.appengine.ext.db import metadata
from google.appengine.api import datastore_errors
sys.path.append(os.path.join(os.path.dirname(__file__), "../lib/"))
import appscale_info
import constants
sys.path.append(os.path.join(os.path.dirname(__file__), "../AppDashboard/lib/"))
from app_dashboard_data import InstanceInfo
from app_dashboard_data import ServerStatus
from app_dashboard_data import RequestInfo
from dashboard_logs import RequestLogLine
sys.path.append(os.path.join(os.path.dirname(__file__), "../AppTaskQueue/"))
from distributed_tq import TaskName
class DatastoreGroomer(threading.Thread):
"""Scans the entire database for each application."""
# The amount of seconds between polling to get the groomer lock.
# Each datastore server does this poll, so it happens the number
# of datastore servers within this lock period.
LOCK_POLL_PERIOD = 4 * 60 * 60 # <- 4 hours
# Retry sleep on datastore error in seconds.
DB_ERROR_PERIOD = 30
# The number of entities retrieved in a datastore request.
BATCH_SIZE = 100
# Any kind that is of __*__ is private and should not have stats.
PRIVATE_KINDS = "__(.*)__"
# Any kind that is of _*_ is protected and should not have stats.
PROTECTED_KINDS = "_(.*)_"
# The amount of time in seconds before we want to clean up task name holders.
TASK_NAME_TIMEOUT = 24 * 60 * 60
# The amount of time before logs are considered too old.
LOG_STORAGE_TIMEOUT = 24 * 60 * 60 * 7
# Do not generate stats for AppScale internal apps.
APPSCALE_APPLICATIONS = ["apichecker", "appscaledashboard"]
# A sentinel value to signify that this app does not have composite indexes.
NO_COMPOSITES = "NO_COMPS_INDEXES_HERE"
# The amount of time in seconds dashboard data should be kept around for.
DASHBOARD_DATA_TIMEOUT = 60 * 60
# The dashboard types we want to clean up after.
DASHBOARD_DATA_MODELS = [InstanceInfo, ServerStatus, RequestInfo]
# The number of dashboard entities to grab at a time. Makes the cleanup
# process have an upper limit on each run.
DASHBOARD_BATCH = 1000
# The path in ZooKeeper where the groomer state is stored.
GROOMER_STATE_PATH = "/appscale/groomer_state"
# The characters used to separate values when storing the groomer state.
GROOMER_STATE_DELIMITER = "||"
# The ID for the task to clean up entities.
CLEAN_ENTITIES_TASK = "entities"
# The ID for the task to clean up ascending indices.
CLEAN_ASC_INDICES_TASK = "asc-indices"
# The ID for the task to clean up descending indices.
CLEAN_DSC_INDICES_TASK = "dsc-indices"
# The ID for the task to clean up kind indices.
CLEAN_KIND_INDICES_TASK = "kind-indices"
# The ID for the task to clean up old logs.
CLEAN_LOGS_TASK = "logs"
# The ID for the task to clean up old tasks.
CLEAN_TASKS_TASK = "tasks"
# The ID for the task to clean up old dashboard items.
CLEAN_DASHBOARD_TASK = "dashboard"
# Log progress every time this many seconds have passed.
LOG_PROGRESS_FREQUENCY = 60 * 5
def __init__(self, zoo_keeper, table_name, ds_path):
"""Constructor.
Args:
zk: ZooKeeper client.
table_name: The database used (ie, cassandra)
ds_path: The connection path to the datastore_server.
"""
log_format = logging.Formatter(
"%(asctime)s %(levelname)s %(filename)s: " "%(lineno)s %(message)s"
)
logging.getLogger().handlers[0].setFormatter(log_format)
logging.info("Logging started")
threading.Thread.__init__(self)
self.zoo_keeper = zoo_keeper
self.table_name = table_name
self.db_access = None
self.ds_access = None
self.datastore_path = ds_path
self.stats = {}
self.namespace_info = {}
self.num_deletes = 0
self.composite_index_cache = {}
self.entities_checked = 0
self.journal_entries_cleaned = 0
self.index_entries_checked = 0
self.index_entries_delete_failures = 0
self.index_entries_cleaned = 0
self.last_logged = time.time()
self.groomer_state = []
def stop(self):
"""Stops the groomer thread."""
self.zoo_keeper.close()
def run(self):
"""Starts the main loop of the groomer thread."""
while True:
logging.debug("Trying to get groomer lock.")
if self.get_groomer_lock():
logging.info("Got the groomer lock.")
self.run_groomer()
try:
self.zoo_keeper.release_lock_with_path(zk.DS_GROOM_LOCK_PATH)
except zk.ZKTransactionException as zk_exception:
logging.error(
"Unable to release zk lock {0}.".format(str(zk_exception))
)
except zk.ZKInternalException as zk_exception:
logging.error(
"Unable to release zk lock {0}.".format(str(zk_exception))
)
else:
logging.info("Did not get the groomer lock.")
sleep_time = random.randint(1, self.LOCK_POLL_PERIOD)
logging.info("Sleeping for {:.1f} minutes.".format(sleep_time / 60.0))
time.sleep(sleep_time)
def get_groomer_lock(self):
"""Tries to acquire the lock to the datastore groomer.
Returns:
True on success, False otherwise.
"""
return self.zoo_keeper.get_lock_with_path(zk.DS_GROOM_LOCK_PATH)
def get_entity_batch(self, last_key):
"""Gets a batch of entites to operate on.
Args:
last_key: The last key from a previous query.
Returns:
A list of entities.
"""
return self.db_access.range_query(
dbconstants.APP_ENTITY_TABLE,
dbconstants.APP_ENTITY_SCHEMA,
last_key,
"",
self.BATCH_SIZE,
start_inclusive=False,
)
def reset_statistics(self):
"""Reinitializes statistics."""
self.stats = {}
self.namespace_info = {}
self.num_deletes = 0
self.journal_entries_cleaned = 0
def remove_deprecated_dashboard_data(self, model_type):
"""Remove entities that do not have timestamps in Dashboard data.
AppScale 2.3 and earlier lacked a timestamp attribute.
Args:
model_type: A class type for a ndb model.
"""
query = model_type.query()
entities = query.fetch(self.DASHBOARD_BATCH)
counter = 0
for entity in entities:
if not hasattr(entity, "timestamp"):
entity.key.delete()
counter += 1
if counter > 0:
logging.warning(
"Removed {0} deprecated {1} dashboard entities".format(
counter, entity._get_kind()
)
)
def remove_old_dashboard_data(self):
"""Removes old statistics from the AppScale dashboard application."""
last_cursor = None
last_model = None
# If we have state information beyond what function to use,
# load the last seen model and cursor if available.
if (
len(self.groomer_state) > 1
and self.groomer_state[0] == self.CLEAN_DASHBOARD_TASK
):
last_model = self.DASHBOARD_DATA_MODELS[int(self.groomer_state[1])]
if len(self.groomer_state) > 2:
last_cursor = Cursor(self.groomer_state[2])
self.register_db_accessor(constants.DASHBOARD_APP_ID)
timeout = datetime.datetime.utcnow() - datetime.timedelta(
seconds=self.DASHBOARD_DATA_TIMEOUT
)
for model_number in range(len(self.DASHBOARD_DATA_MODELS)):
model_type = self.DASHBOARD_DATA_MODELS[model_number]
if last_model and model_type != last_model:
continue
counter = 0
while True:
query = model_type.query().filter(model_type.timestamp < timeout)
entities, next_cursor, more = query.fetch_page(
self.BATCH_SIZE, start_cursor=last_cursor
)
for entity in entities:
entity.key.delete()
counter += 1
if time.time() > self.last_logged + self.LOG_PROGRESS_FREQUENCY:
logging.info(
"Removed {} {} entities.".format(
counter, model_type.__class__.__name__
)
)
self.last_logged = time.time()
if more:
last_cursor = next_cursor
self.update_groomer_state(
[
self.CLEAN_DASHBOARD_TASK,
str(model_number),
last_cursor.urlsafe(),
]
)
else:
break
if model_number != len(self.DASHBOARD_DATA_MODELS) - 1:
self.update_groomer_state(
[self.CLEAN_DASHBOARD_TASK, str(model_number + 1)]
)
last_model = None
last_cursor = None
if counter > 0:
logging.info(
"Removed {0} {1} dashboard entities".format(counter, model_type)
)
# Do a scan of all entities and remove any that
# do not have timestamps for AppScale versions 2.3 and before.
# This may take some time on the initial run, but subsequent runs should
# be quick given a low dashboard data timeout.
self.remove_deprecated_dashboard_data(model_type)
return
def clean_journal_entries(self, txn_id, key):
"""Remove journal entries that are no longer needed. Assumes
transaction numbers are only increasing.
Args:
txn_id: An int of the transaction number to delete up to.
key: A str, the entity table key for which we are deleting.
Returns:
True on success, False otherwise.
"""
if txn_id == 0:
return True
start_row = datastore_server.DatastoreDistributed.get_journal_key(key, 0)
end_row = datastore_server.DatastoreDistributed.get_journal_key(
key, int(txn_id) - 1
)
last_key = start_row
keys_to_delete = []
while True:
try:
results = self.db_access.range_query(
dbconstants.JOURNAL_TABLE,
dbconstants.JOURNAL_SCHEMA,
last_key,
end_row,
self.BATCH_SIZE,
start_inclusive=False,
end_inclusive=True,
)
if len(results) == 0:
return True
keys_to_delete = []
for item in results:
keys_to_delete.append(list(item.keys())[0])
self.db_access.batch_delete(dbconstants.JOURNAL_TABLE, keys_to_delete)
self.journal_entries_cleaned += len(keys_to_delete)
except dbconstants.AppScaleDBConnectionError as db_error:
logging.error(
"Error hard deleting keys {0} --> {1}".format(
keys_to_delete, db_error
)
)
logging.error("Backing off!")
time.sleep(self.DB_ERROR_PERIOD)
return False
except Exception as exception:
logging.error("Caught unexcepted exception {0}".format(exception))
logging.error("Backing off!")
time.sleep(self.DB_ERROR_PERIOD)
return False
def hard_delete_row(self, row_key):
"""Does a hard delete on a given row key to the entity
table.
Args:
row_key: A str representing the row key to delete.
Returns:
True on success, False otherwise.
"""
try:
self.db_access.batch_delete(dbconstants.APP_ENTITY_TABLE, [row_key])
except dbconstants.AppScaleDBConnectionError as db_error:
logging.error("Error hard deleting key {0}-->{1}".format(row_key, db_error))
return False
except Exception as exception:
logging.error("Caught unexcepted exception {0}".format(exception))
return False
return True
def load_composite_cache(self, app_id):
"""Load the composite index cache for an application ID.
Args:
app_id: A str, the application ID.
Returns:
True if the application has composites. False otherwise.
"""
start_key = datastore_server.DatastoreDistributed.get_meta_data_key(
app_id, "index", ""
)
end_key = datastore_server.DatastoreDistributed.get_meta_data_key(
app_id, "index", dbconstants.TERMINATING_STRING
)
results = self.db_access.range_query(
dbconstants.METADATA_TABLE,
dbconstants.METADATA_TABLE,
start_key,
end_key,
dbconstants.MAX_NUMBER_OF_COMPOSITE_INDEXES,
)
list_result = []
for list_item in results:
for _, value in list_item.items():
list_result.append(value["data"])
self.composite_index_cache[app_id] = self.NO_COMPOSITES
kind_index_dictionary = {}
for index in list_result:
new_index = entity_pb.CompositeIndex()
new_index.ParseFromString(index)
kind = new_index.definition().entity_type()
if kind in kind_index_dictionary:
kind_index_dictionary[kind].append(new_index)
else:
kind_index_dictionary[kind] = [new_index]
if kind_index_dictionary:
self.composite_index_cache[app_id] = kind_index_dictionary
return True
return False
def acquire_lock_for_key(self, app_id, key, retries, retry_time):
"""Acquires a lock for a given entity key.
Args:
app_id: The application ID.
key: A string containing an entity key.
retries: An integer specifying the number of times to retry.
retry_time: How many seconds to wait before each retry.
Returns:
A transaction ID.
Raises:
ZKTransactionException if unable to acquire a lock from ZooKeeper.
"""
root_key = key.split(dbconstants.KIND_SEPARATOR)[0]
root_key += dbconstants.KIND_SEPARATOR
txn_id = self.zoo_keeper.get_transaction_id(app_id, is_xg=False)
try:
self.zoo_keeper.acquire_lock(app_id, txn_id, root_key)
except zk.ZKTransactionException as zkte:
logging.warning(
"Concurrent transaction exception for app id {} with "
"info {}".format(app_id, str(zkte))
)
if retries > 0:
logging.info(
"Trying again to acquire lock info {} with retry #{}".format(
str(zkte), retries
)
)
time.sleep(retry_time)
return self.acquire_lock_for_key(
app_id=app_id, key=key, retries=retries - 1, retry_time=retry_time
)
self.zoo_keeper.notify_failed_transaction(app_id, txn_id)
raise zkte
return txn_id
def release_lock_for_key(self, app_id, key, txn_id, retries, retry_time):
"""Releases a lock for a given entity key.
Args:
app_id: The application ID.
key: A string containing an entity key.
txn_id: A transaction ID.
retries: An integer specifying the number of times to retry.
retry_time: How many seconds to wait before each retry.
"""
root_key = key.split(dbconstants.KIND_SEPARATOR)[0]
root_key += dbconstants.KIND_SEPARATOR
try:
self.zoo_keeper.release_lock(app_id, txn_id)
except zk.ZKTransactionException as zkte:
logging.warning(str(zkte))
if retries > 0:
logging.info(
"Trying again to release lock {} with retry #{}".format(
txn_id, retries
)
)
time.sleep(retry_time)
self.release_lock_for_key(
app_id=app_id,
key=key,
txn_id=txn_id,
retries=retries - 1,
retry_time=retry_time,
)
else:
self.zoo_keeper.notify_failed_transaction(app_id, txn_id)
def fetch_entity_dict_for_references(self, references):
"""Fetches a dictionary of valid entities for a list of references.
Args:
references: A list of index references to entities.
Returns:
A dictionary of validated entities.
"""
keys = []
for item in references:
keys.append(list(item.values())[0][self.ds_access.INDEX_REFERENCE_COLUMN])
keys = list(set(keys))
entities = self.db_access.batch_get_entity(
dbconstants.APP_ENTITY_TABLE, keys, dbconstants.APP_ENTITY_SCHEMA
)
# The datastore needs to know the app ID. The indices could be scattered
# across apps.
entities_by_app = {}
for key in entities:
app = key.split(self.ds_access._SEPARATOR)[0]
if app not in entities_by_app:
entities_by_app[app] = {}
entities_by_app[app][key] = entities[key]
entities = {}
for app in entities_by_app:
app_entities = entities_by_app[app]
app_entities = self.ds_access.validated_result(app, app_entities)
app_entities = self.ds_access.remove_tombstoned_entities(app_entities)
for key in keys:
if key not in app_entities:
continue
if dbconstants.APP_ENTITY_SCHEMA[0] not in app_entities[key]:
continue
entities[key] = app_entities[key][dbconstants.APP_ENTITY_SCHEMA[0]]
return entities
def lock_and_delete_indexes(self, references, direction, entity_key):
"""For a list of index entries that have the same entity, lock the entity
and delete the indexes.
Since another process can update an entity after we've determined that
an index entry is invalid, we need to re-check the index entries after
locking their entity key.
Args:
references: A list of references to an entity.
direction: The direction of the index.
entity_key: A string containing the entity key.
"""
if direction == datastore_pb.Query_Order.ASCENDING:
table_name = dbconstants.ASC_PROPERTY_TABLE
else:
table_name = dbconstants.DSC_PROPERTY_TABLE
app = entity_key.split(self.ds_access._SEPARATOR)[0]
try:
txn_id = self.acquire_lock_for_key(
app_id=app,
key=entity_key,
retries=self.ds_access.NON_TRANS_LOCK_RETRY_COUNT,
retry_time=self.ds_access.LOCK_RETRY_TIME,
)
except zk.ZKTransactionException:
self.index_entries_delete_failures += 1
return
entities = self.fetch_entity_dict_for_references(references)
refs_to_delete = []
for reference in references:
index_elements = list(reference.keys())[0].split(self.ds_access._SEPARATOR)
prop_name = index_elements[self.ds_access.PROP_NAME_IN_SINGLE_PROP_INDEX]
if not self.ds_access._DatastoreDistributed__valid_index_entry(
reference, entities, direction, prop_name
):
refs_to_delete.append(list(reference.keys())[0])
logging.debug(
"Removing {} indexes starting with {}".format(
len(refs_to_delete), [refs_to_delete[0]]
)
)
try:
self.db_access.batch_delete(
table_name, refs_to_delete, column_names=dbconstants.PROPERTY_SCHEMA
)
self.index_entries_cleaned += len(refs_to_delete)
except Exception:
logging.exception("Unable to delete indexes")
self.index_entries_delete_failures += 1
self.release_lock_for_key(
app_id=app,
key=entity_key,
txn_id=txn_id,
retries=self.ds_access.NON_TRANS_LOCK_RETRY_COUNT,
retry_time=self.ds_access.LOCK_RETRY_TIME,
)
def lock_and_delete_kind_index(self, reference):
"""For a list of index entries that have the same entity, lock the entity
and delete the indexes.
Since another process can update an entity after we've determined that
an index entry is invalid, we need to re-check the index entries after
locking their entity key.
Args:
reference: A dictionary containing a kind reference.
"""
table_name = dbconstants.APP_KIND_TABLE
entity_key = list(reference.values())[0].values()[0]
app = entity_key.split(self.ds_access._SEPARATOR)[0]
try:
txn_id = self.acquire_lock_for_key(
app_id=app,
key=entity_key,
retries=self.ds_access.NON_TRANS_LOCK_RETRY_COUNT,
retry_time=self.ds_access.LOCK_RETRY_TIME,
)
except zk.ZKTransactionException:
self.index_entries_delete_failures += 1
return
entities = self.fetch_entity_dict_for_references([reference])
if entity_key not in entities:
index_to_delete = list(reference.keys())[0]
logging.debug("Removing {}".format([index_to_delete]))
try:
self.db_access.batch_delete(
table_name,
[index_to_delete],
column_names=dbconstants.APP_KIND_SCHEMA,
)
self.index_entries_cleaned += 1
except dbconstants.AppScaleDBConnectionError:
logging.exception("Unable to delete index.")
self.index_entries_delete_failures += 1
self.release_lock_for_key(
app_id=app,
key=entity_key,
txn_id=txn_id,
retries=self.ds_access.NON_TRANS_LOCK_RETRY_COUNT,
retry_time=self.ds_access.LOCK_RETRY_TIME,
)
def clean_up_indexes(self, direction):
"""Deletes invalid single property index entries.
This is needed because we do not delete index entries when updating or
deleting entities. With time, this results in queries taking an increasing
amount of time.
Args:
direction: The direction of the index.
"""
if direction == datastore_pb.Query_Order.ASCENDING:
table_name = dbconstants.ASC_PROPERTY_TABLE
task_id = self.CLEAN_ASC_INDICES_TASK
else:
table_name = dbconstants.DSC_PROPERTY_TABLE
task_id = self.CLEAN_DSC_INDICES_TASK
# If we have state information beyond what function to use,
# load the last seen start key.
if len(self.groomer_state) > 1 and self.groomer_state[0] == task_id:
start_key = self.groomer_state[1]
else:
start_key = ""
end_key = dbconstants.TERMINATING_STRING
while True:
references = self.db_access.range_query(
table_name=table_name,
column_names=dbconstants.PROPERTY_SCHEMA,
start_key=start_key,
end_key=end_key,
limit=self.BATCH_SIZE,
start_inclusive=False,
)
if len(references) == 0:
break
self.index_entries_checked += len(references)
if time.time() > self.last_logged + self.LOG_PROGRESS_FREQUENCY:
logging.info(
"Checked {} index entries".format(self.index_entries_checked)
)
self.last_logged = time.time()
first_ref = list(references[0].keys())[0]
logging.debug(
"Fetched {} total refs, starting with {}, direction: {}".format(
self.index_entries_checked, [first_ref], direction
)
)
last_start_key = start_key
start_key = list(references[-1].keys())[0]
if start_key == last_start_key:
raise dbconstants.AppScaleDBError(
"An infinite loop was detected while fetching references."
)
entities = self.fetch_entity_dict_for_references(references)
# Group invalid references by entity key so we can minimize locks.
invalid_refs = {}
for reference in references:
prop_name = list(reference.keys())[0].split(self.ds_access._SEPARATOR)[
3
]
if not self.ds_access._DatastoreDistributed__valid_index_entry(
reference, entities, direction, prop_name
):
entity_key = list(reference.values())[0][
self.ds_access.INDEX_REFERENCE_COLUMN
]
if entity_key not in invalid_refs:
invalid_refs[entity_key] = []
invalid_refs[entity_key].append(reference)
for entity_key in invalid_refs:
self.lock_and_delete_indexes(
invalid_refs[entity_key], direction, entity_key
)
self.update_groomer_state([task_id, start_key])
def clean_up_kind_indices(self):
"""Deletes invalid kind index entries.
This is needed because the datastore does not delete kind index entries
when deleting entities.
"""
table_name = dbconstants.APP_KIND_TABLE
task_id = self.CLEAN_KIND_INDICES_TASK
start_key = ""
end_key = dbconstants.TERMINATING_STRING
if len(self.groomer_state) > 1:
start_key = self.groomer_state[1]
while True:
references = self.db_access.range_query(
table_name=table_name,
column_names=dbconstants.APP_KIND_SCHEMA,
start_key=start_key,
end_key=end_key,
limit=self.BATCH_SIZE,
start_inclusive=False,
)
if len(references) == 0:
break
self.index_entries_checked += len(references)
if time.time() > self.last_logged + self.LOG_PROGRESS_FREQUENCY:
logging.info(
"Checked {} index entries".format(self.index_entries_checked)
)
self.last_logged = time.time()
first_ref = list(references[0].keys())[0]
logging.debug(
"Fetched {} kind indices, starting with {}".format(
len(references), [first_ref]
)
)
last_start_key = start_key
start_key = list(references[-1].keys())[0]
if start_key == last_start_key:
raise dbconstants.AppScaleDBError(
"An infinite loop was detected while fetching references."
)
entities = self.fetch_entity_dict_for_references(references)
for reference in references:
entity_key = list(reference.values())[0].values()[0]
if entity_key not in entities:
self.lock_and_delete_kind_index(reference)
self.update_groomer_state([task_id, start_key])
def clean_up_composite_indexes(self):
"""Deletes old composite indexes and bad references.
Returns:
True on success, False otherwise.
"""
return True
def get_composite_indexes(self, app_id, kind):
"""Fetches the composite indexes for a kind.
Args:
app_id: The application ID.
kind: A string, the kind for which we need composite indexes.
Returns:
A list of composite indexes.
"""
if not kind:
return []
if app_id in self.composite_index_cache:
if self.composite_index_cache[app_id] == self.NO_COMPOSITES:
return []
elif kind in self.composite_index_cache[app_id]:
return self.composite_index_cache[app_id][kind]
else:
return []
else:
if self.load_composite_cache(app_id):
if kind in self.composite_index_cache[app_id]:
return self.composite_index_cache[kind]
return []
def delete_indexes(self, entity):
"""Deletes indexes for a given entity.
Args:
entity: An EntityProto.
"""
return
def delete_composite_indexes(self, entity, composites):
"""Deletes composite indexes for an entity.
Args:
entity: An EntityProto.
composites: A list of datastore_pb.CompositeIndexes composite indexes.
"""
row_keys = datastore_server.DatastoreDistributed.get_composite_indexes_rows(
[entity], composites
)
self.db_access.batch_delete(
dbconstants.COMPOSITE_TABLE,
row_keys,
column_names=dbconstants.COMPOSITE_SCHEMA,
)
def fix_badlisted_entity(self, key, version):
"""Places the correct entity given the current one is from a blacklisted
transaction.
Args:
key: The key to the entity table.
version: The bad version of the entity.
Returns:
True on success, False otherwise.
"""
app_prefix = entity_utils.get_prefix_from_entity_key(key)
root_key = entity_utils.get_root_key_from_entity_key(key)
# TODO watch out for the race condition of doing a GET then a PUT.
try:
txn_id = self.zoo_keeper.get_transaction_id(app_prefix)
if self.zoo_keeper.acquire_lock(app_prefix, txn_id, root_key):
valid_id = self.zoo_keeper.get_valid_transaction_id(
app_prefix, version, key
)
# Insert the entity along with regular indexes and composites.
ds_distributed = self.register_db_accessor(app_prefix)
bad_key = datastore_server.DatastoreDistributed.get_journal_key(
key, version
)
good_key = datastore_server.DatastoreDistributed.get_journal_key(
key, valid_id
)
# Fetch the journal and replace the bad entity.
good_entry = entity_utils.fetch_journal_entry(self.db_access, good_key)
bad_entry = entity_utils.fetch_journal_entry(self.db_access, bad_key)
# Get the kind to lookup composite indexes.
kind = None
if good_entry:
kind = datastore_server.DatastoreDistributed.get_entity_kind(
good_entry.key()
)
elif bad_entry:
kind = datastore_server.DatastoreDistributed.get_entity_kind(
bad_entry.key()
)
# Fetch latest composites for this entity
composites = self.get_composite_indexes(app_prefix, kind)
# Remove previous regular indexes and composites if it's not a
# TOMBSTONE.
if bad_entry:
self.delete_indexes(bad_entry)
self.delete_composite_indexes(bad_entry, composites)
# Overwrite the entity table with the correct version.
# Insert into entity table, regular indexes, and composites.
if good_entry:
# TODO
# self.db_access.batch_put_entities(...)
# self.insert_indexes(good_entry)
# self.insert_composite_indexes(good_entry, composites)
pass
else:
# TODO
# self.db_access.batch_delete_entities(...)
pass
del ds_distributed
else:
success = False
except zk.ZKTransactionException as zk_exception:
logging.error("Caught exception {0}".format(zk_exception))
success = False
except zk.ZKInternalException as zk_exception:
logging.error("Caught exception {0}".format(zk_exception))
success = False
except dbconstants.AppScaleDBConnectionError as db_exception:
logging.error("Caught exception {0}".format(db_exception))
success = False
finally:
if not success:
if not self.zoo_keeper.notify_failed_transaction(app_prefix, txn_id):
logging.error(
"Unable to invalidate txn for {0} with txnid: {1}".format(
app_prefix, txn_id
)
)
try:
self.zoo_keeper.release_lock(app_prefix, txn_id)
except zk.ZKTransactionException as zk_exception:
# There was an exception releasing the lock, but
# the replacement has already happened.
pass
except zk.ZKInternalException as zk_exception:
pass
return True
def process_tombstone(self, key, entity, version):
"""Processes any entities which have been soft deleted.
Does an actual delete to reclaim disk space.
Args:
key: The key to the entity table.
entity: The entity in string serialized form.
version: The version of the entity in the datastore.
Returns:
True if a hard delete occurred, False otherwise.
"""
success = False
app_prefix = entity_utils.get_prefix_from_entity_key(key)
root_key = entity_utils.get_root_key_from_entity_key(key)
try:
if self.zoo_keeper.is_blacklisted(app_prefix, version):
logging.error(
"Found a blacklisted item for version {0} on key {1}".format(
version, key
)
)
return True
# TODO actually fix the badlisted entity
return self.fix_badlisted_entity(key, version)
except zk.ZKTransactionException as zk_exception:
logging.error("Caught exception {0}.\nBacking off!".format(zk_exception))
time.sleep(self.DB_ERROR_PERIOD)
return False
except zk.ZKInternalException as zk_exception:
logging.error("Caught exception {0}.\nBacking off!".format(zk_exception))
time.sleep(self.DB_ERROR_PERIOD)
return False
txn_id = 0
try:
txn_id = self.zoo_keeper.get_transaction_id(app_prefix)
except zk.ZKTransactionException as zk_exception:
logging.error("Exception tossed: {0}".format(zk_exception))
logging.error("Backing off!")
time.sleep(self.DB_ERROR_PERIOD)
return False
except zk.ZKInternalException as zk_exception:
logging.error("Exception tossed: {0}".format(zk_exception))
logging.error("Backing off!")
time.sleep(self.DB_ERROR_PERIOD)
return False
try:
if self.zoo_keeper.acquire_lock(app_prefix, txn_id, root_key):
success = self.hard_delete_row(key)
if success:
# Increment the txn ID by one because we want to delete this current
# entry as well.
success = self.clean_journal_entries(txn_id + 1, key)
else:
success = False
except zk.ZKTransactionException as zk_exception:
logging.error("Exception tossed: {0}".format(zk_exception))
logging.error("Backing off!")
time.sleep(self.DB_ERROR_PERIOD)
success = False
except zk.ZKInternalException as zk_exception:
logging.error("Exception tossed: {0}".format(zk_exception))
logging.error("Backing off!")
time.sleep(self.DB_ERROR_PERIOD)
success = False
finally:
if not success:
try:
if not self.zoo_keeper.notify_failed_transaction(
app_prefix, txn_id
):
logging.error(
"Unable to invalidate txn for {0} with txnid: {1}".format(
app_prefix, txn_id
)
)
self.zoo_keeper.release_lock(app_prefix, txn_id)
except zk.ZKTransactionException as zk_exception:
logging.error(
"Caught exception: {0}\nIgnoring...".format(zk_exception)
)
# There was an exception releasing the lock, but
# the hard delete has already happened.
except zk.ZKInternalException as zk_exception:
logging.error(
"Caught exception: {0}\nIgnoring...".format(zk_exception)
)
if success:
try:
self.zoo_keeper.release_lock(app_prefix, txn_id)
except Exception as exception:
logging.error("Unable to release lock: {0}".format(exception))
self.num_deletes += 1
logging.debug("Deleting tombstone for key {0}: {1}".format(key, success))
return success
def initialize_kind(self, app_id, kind):
"""Puts a kind into the statistics object if
it does not already exist.
Args:
app_id: The application ID.
kind: A string representing an entity kind.
"""
if app_id not in self.stats:
self.stats[app_id] = {kind: {"size": 0, "number": 0}}
if kind not in self.stats[app_id]:
self.stats[app_id][kind] = {"size": 0, "number": 0}
def initialize_namespace(self, app_id, namespace):
"""Puts a namespace into the namespace object if
it does not already exist.
Args:
app_id: The application ID.
namespace: A string representing a namespace.
"""
if app_id not in self.namespace_info:
self.namespace_info[app_id] = {namespace: {"size": 0, "number": 0}}
if namespace not in self.namespace_info[app_id]:
self.namespace_info[app_id] = {namespace: {"size": 0, "number": 0}}
if namespace not in self.namespace_info[app_id]:
self.stats[app_id][namespace] = {"size": 0, "number": 0}
def process_statistics(self, key, entity, size):
"""Processes an entity and adds to the global statistics.
Args:
key: The key to the entity table.
entity: EntityProto entity.
size: A int of the size of the entity.
Returns:
True on success, False otherwise.
"""
kind = datastore_server.DatastoreDistributed.get_entity_kind(entity.key())
namespace = entity.key().name_space()
if not kind:
logging.warning("Entity did not have a kind {0}".format(entity))
return False
if re.match(self.PROTECTED_KINDS, kind):
return True
if re.match(self.PRIVATE_KINDS, kind):
return True
app_id = entity.key().app()
if not app_id:
logging.warning("Entity of kind {0} did not have an app id".format(kind))
return False
# Do not generate statistics for applications which are internal to
# AppScale.
if app_id in self.APPSCALE_APPLICATIONS:
return True
self.initialize_kind(app_id, kind)
self.initialize_namespace(app_id, namespace)
self.namespace_info[app_id][namespace]["size"] += size
self.namespace_info[app_id][namespace]["number"] += 1
self.stats[app_id][kind]["size"] += size
self.stats[app_id][kind]["number"] += 1
return True
def txn_blacklist_cleanup(self):
"""Clean up old transactions and removed unused references
to reap storage.
Returns:
True on success, False otherwise.
"""
# TODO implement
return True
def verify_entity(self, entity, key, txn_id):
"""Verify that the entity is not blacklisted. Clean up old journal
entries if it is valid.
Args:
entity: The entity to verify.
key: The key to the entity table.
txn_id: An int, a transaction ID.
Returns:
True on success, False otherwise.
"""
app_prefix = entity_utils.get_prefix_from_entity_key(key)
try:
if not self.zoo_keeper.is_blacklisted(app_prefix, txn_id):
self.clean_journal_entries(txn_id, key)
else:
logging.error(
"Found a blacklisted item for version {0} on key {1}".format(
txn_id, key
)
)
return True
# TODO fix the badlisted entity.
return self.fix_badlisted_entity(key, txn_id)
except zk.ZKTransactionException as zk_exception:
logging.error("Caught exception {0}, backing off!".format(zk_exception))
time.sleep(self.DB_ERROR_PERIOD)
return True
except zk.ZKInternalException as zk_exception:
logging.error("Caught exception: {0}, backing off!".format(zk_exception))
time.sleep(self.DB_ERROR_PERIOD)
return True
return True
def process_entity(self, entity):
"""Processes an entity by updating statistics, indexes, and removes
tombstones.
Args:
entity: The entity to operate on.
Returns:
True on success, False otherwise.
"""
logging.debug("Process entity {0}".format(str(entity)))
key = list(entity.keys())[0]
one_entity = entity[key][dbconstants.APP_ENTITY_SCHEMA[0]]
version = entity[key][dbconstants.APP_ENTITY_SCHEMA[1]]
logging.debug("Entity value: {0}".format(entity))
if one_entity == datastore_server.TOMBSTONE:
return self.process_tombstone(key, one_entity, version)
ent_proto = entity_pb.EntityProto()
ent_proto.ParseFromString(one_entity)
self.verify_entity(ent_proto, key, version)
self.process_statistics(key, ent_proto, len(one_entity))
return True
def create_namespace_entry(self, namespace, size, number, timestamp):
"""Puts a namespace into the datastore.
Args:
namespace: A string, the namespace.
size: An int representing the number of bytes taken by a namespace.
number: The total number of entities in a namespace.
timestamp: A datetime.datetime object.
Returns:
True on success, False otherwise.
"""
entities_to_write = []
namespace_stat = stats.NamespaceStat(
subject_namespace=namespace, bytes=size, count=number, timestamp=timestamp
)
entities_to_write.append(namespace_stat)
# All application are assumed to have the default namespace.
if namespace != "":
namespace_entry = metadata.Namespace(key_name=namespace)
entities_to_write.append(namespace_entry)
try:
db.put(entities_to_write)
except datastore_errors.InternalError as internal_error:
logging.error("Error inserting namespace info: {0}.".format(internal_error))
return False
logging.debug("Done creating namespace stats")
return True
def create_kind_stat_entry(self, kind, size, number, timestamp):
"""Puts a kind statistic into the datastore.
Args:
kind: The entity kind.
size: An int representing the number of bytes taken by entity kind.
number: The total number of entities.
timestamp: A datetime.datetime object.
Returns:
True on success, False otherwise.
"""
kind_stat = stats.KindStat(
kind_name=kind, bytes=size, count=number, timestamp=timestamp
)
kind_entry = metadata.Kind(key_name=kind)
entities_to_write = [kind_stat, kind_entry]
try:
db.put(entities_to_write)
except datastore_errors.InternalError as internal_error:
logging.error("Error inserting kind stat: {0}.".format(internal_error))
return False
logging.debug("Done creating kind stat")
return True
def create_global_stat_entry(self, app_id, size, number, timestamp):
"""Puts a global statistic into the datastore.
Args:
app_id: The application identifier.
size: The number of bytes of all entities.
number: The total number of entities of an application.
timestamp: A datetime.datetime object.
Returns:
True on success, False otherwise.
"""
global_stat = stats.GlobalStat(
key_name=app_id, bytes=size, count=number, timestamp=timestamp
)
try:
db.put(global_stat)
except datastore_errors.InternalError as internal_error:
logging.error("Error inserting global stat: {0}.".format(internal_error))
return False
logging.debug("Done creating global stat")
return True
def remove_old_tasks_entities(self):
"""Queries for old tasks and removes the entity which tells
use whether a named task was enqueued.
Returns:
True on success.
"""
# If we have state information beyond what function to use,
# load the last seen cursor.
if (
len(self.groomer_state) > 1
and self.groomer_state[0] == self.CLEAN_TASKS_TASK
):
last_cursor = Cursor(self.groomer_state[1])
else:
last_cursor = None
self.register_db_accessor(constants.DASHBOARD_APP_ID)
timeout = datetime.datetime.utcnow() - datetime.timedelta(
seconds=self.TASK_NAME_TIMEOUT
)
counter = 0
logging.debug("The current time is {0}".format(datetime.datetime.utcnow()))
logging.debug("The timeout time is {0}".format(timeout))
while True:
query = TaskName.all()
if last_cursor:
query.with_cursor(last_cursor)
query.filter("timestamp <", timeout)
entities = query.fetch(self.BATCH_SIZE)
if len(entities) == 0:
break
last_cursor = query.cursor()
for entity in entities:
logging.debug("Removing task name {0}".format(entity.timestamp))
entity.delete()
counter += 1
if time.time() > self.last_logged + self.LOG_PROGRESS_FREQUENCY:
logging.info("Removed {} task entities.".format(counter))
self.last_logged = self.LOG_PROGRESS_FREQUENCY
self.update_groomer_state([self.CLEAN_TASKS_TASK, last_cursor])
logging.info("Removed {0} task name entities".format(counter))
return True
def clean_up_entities(self):
# If we have state information beyond what function to use,
# load the last seen key.
if (
len(self.groomer_state) > 1
and self.groomer_state[0] == self.CLEAN_ENTITIES_TASK
):
last_key = self.groomer_state[1]
else:
last_key = ""
while True:
try:
logging.debug("Fetching {} entities".format(self.BATCH_SIZE))
entities = self.get_entity_batch(last_key)
if not entities:
break
for entity in entities:
self.process_entity(entity)
last_key = list(entities[-1].keys())[0]
self.entities_checked += len(entities)
if time.time() > self.last_logged + self.LOG_PROGRESS_FREQUENCY:
logging.info("Checked {} entities".format(self.entities_checked))
self.last_logged = time.time()
self.update_groomer_state([self.CLEAN_ENTITIES_TASK, last_key])
except datastore_errors.Error as error:
logging.error("Error getting a batch: {0}".format(error))
time.sleep(self.DB_ERROR_PERIOD)
except dbconstants.AppScaleDBConnectionError as connection_error:
logging.error("Error getting a batch: {0}".format(connection_error))
time.sleep(self.DB_ERROR_PERIOD)
def register_db_accessor(self, app_id):
"""Gets a distributed datastore object to interact with
the datastore for a certain application.
Args:
app_id: The application ID.
Returns:
A distributed_datastore.DatastoreDistributed object.
"""
ds_distributed = datastore_distributed.DatastoreDistributed(
app_id, self.datastore_path, require_indexes=False
)
apiproxy_stub_map.apiproxy.RegisterStub("datastore_v3", ds_distributed)
apiproxy_stub_map.apiproxy.RegisterStub(
"memcache", memcache_distributed.MemcacheService()
)
os.environ["APPLICATION_ID"] = app_id
os.environ["APPNAME"] = app_id
os.environ["AUTH_DOMAIN"] = "appscale.com"
return ds_distributed
def remove_old_logs(self, log_timeout):
"""Removes old logs.
Args:
log_timeout: The timeout value in seconds.
Returns:
True on success, False otherwise.
"""
# If we have state information beyond what function to use,
# load the last seen cursor.
if (
len(self.groomer_state) > 1
and self.groomer_state[0] == self.CLEAN_LOGS_TASK
):
last_cursor = Cursor(self.groomer_state[1])
else:
last_cursor = None
self.register_db_accessor(constants.DASHBOARD_APP_ID)
if log_timeout:
timeout = datetime.datetime.utcnow() - datetime.timedelta(
seconds=log_timeout
)
query = RequestLogLine.query(RequestLogLine.timestamp < timeout)
logging.debug("The timeout time is {0}".format(timeout))
else:
query = RequestLogLine.query()
counter = 0
logging.debug("The current time is {0}".format(datetime.datetime.utcnow()))
while True:
entities, next_cursor, more = query.fetch_page(
self.BATCH_SIZE, start_cursor=last_cursor
)
for entity in entities:
logging.debug("Removing {0}".format(entity))
entity.key.delete()
counter += 1
if time.time() > self.last_logged + self.LOG_PROGRESS_FREQUENCY:
logging.info("Removed {} log entries.".format(counter))
self.last_logged = time.time()
if more:
last_cursor = next_cursor
self.update_groomer_state([self.CLEAN_LOGS_TASK, last_cursor.urlsafe()])
else:
break
logging.info("Removed {0} log entries.".format(counter))
return True
def remove_old_statistics(self):
"""Does a range query on the current batch of statistics and
deletes them.
"""
# TODO only remove statistics older than 30 days.
for app_id in list(self.stats.keys()):
self.register_db_accessor(app_id)
query = stats.KindStat.all()
entities = query.run()
logging.debug("Result from kind stat query: {0}".format(str(entities)))
for entity in entities:
logging.debug("Removing kind {0}".format(entity))
entity.delete()
query = stats.GlobalStat.all()
entities = query.run()
logging.debug("Result from global stat query: {0}".format(str(entities)))
for entity in entities:
logging.debug("Removing global {0}".format(entity))
entity.delete()
logging.debug("Done removing old stats for app {0}".format(app_id))
def update_namespaces(self, timestamp):
"""Puts the namespace information into the datastore for applications to
access.
Args:
timestamp: A datetime time stamp to know which stat items belong
together.
Returns:
True if there were no errors, False otherwise.
"""
for app_id in list(self.namespace_info.keys()):
ds_distributed = self.register_db_accessor(app_id)
namespaces = list(self.namespace_info[app_id].keys())
for namespace in namespaces:
size = self.namespace_info[app_id][namespace]["size"]
number = self.namespace_info[app_id][namespace]["number"]
if not self.create_namespace_entry(namespace, size, number, timestamp):
return False
logging.info(
"Namespace for {0} are {1}".format(app_id, self.namespace_info[app_id])
)
del ds_distributed
return True
def update_statistics(self, timestamp):
"""Puts the statistics into the datastore for applications
to access.
Args:
timestamp: A datetime time stamp to know which stat items belong
together.
Returns:
True if there were no errors, False otherwise.
"""
for app_id in list(self.stats.keys()):
ds_distributed = self.register_db_accessor(app_id)
total_size = 0
total_number = 0
kinds = list(self.stats[app_id].keys())
for kind in kinds:
size = self.stats[app_id][kind]["size"]
number = self.stats[app_id][kind]["number"]
total_size += size
total_number += number
if not self.create_kind_stat_entry(kind, size, number, timestamp):
return False
if not self.create_global_stat_entry(
app_id, total_size, total_number, timestamp
):
return False
logging.info(
"Kind stats for {0} are {1}".format(app_id, self.stats[app_id])
)
logging.info(
"Global stats for {0} are total size of {1} with "
"{2} entities".format(app_id, total_size, total_number)
)
logging.info("Number of hard deletes: {0}".format(self.num_deletes))
del ds_distributed
return True
def update_groomer_state(self, state):
"""Updates the groomer's internal state and persists the state to
ZooKeeper.
Args:
state: A list of strings representing the ID of the task to resume along
with any additional data about the task.
"""
zk_data = self.GROOMER_STATE_DELIMITER.join(state)
# We don't want to crash the groomer if we can't update the state.
try:
self.zoo_keeper.update_node(self.GROOMER_STATE_PATH, zk_data)
except zk.ZKInternalException as zkie:
logging.exception(zkie)
self.groomer_state = state
def run_groomer(self):
"""Runs the grooming process. Loops on the entire dataset sequentially
and updates stats, indexes, and transactions.
"""
self.db_access = appscale_datastore_batch.DatastoreFactory.getDatastore(
self.table_name
)
self.ds_access = datastore_server.DatastoreDistributed(
datastore_batch=self.db_access, zookeeper=self.zoo_keeper
)
logging.info("Groomer started")
start = time.time()
self.reset_statistics()
self.composite_index_cache = {}
tasks = [
{
"id": self.CLEAN_ENTITIES_TASK,
"description": "clean up entities",
"function": self.clean_up_entities,
"args": [],
},
{
"id": self.CLEAN_ASC_INDICES_TASK,
"description": "clean up ascending indices",
"function": self.clean_up_indexes,
"args": [datastore_pb.Query_Order.ASCENDING],
},
{
"id": self.CLEAN_DSC_INDICES_TASK,
"description": "clean up descending indices",
"function": self.clean_up_indexes,
"args": [datastore_pb.Query_Order.DESCENDING],
},
{
"id": self.CLEAN_KIND_INDICES_TASK,
"description": "clean up kind indices",
"function": self.clean_up_kind_indices,
"args": [],
},
{
"id": self.CLEAN_LOGS_TASK,
"description": "clean up old logs",
"function": self.remove_old_logs,
"args": [self.LOG_STORAGE_TIMEOUT],
},
{
"id": self.CLEAN_TASKS_TASK,
"description": "clean up old tasks",
"function": self.remove_old_tasks_entities,
"args": [],
},
{
"id": self.CLEAN_DASHBOARD_TASK,
"description": "clean up old dashboard items",
"function": self.remove_old_dashboard_data,
"args": [],
},
]
groomer_state = self.zoo_keeper.get_node(self.GROOMER_STATE_PATH)
logging.info("groomer_state: {}".format(groomer_state))
if groomer_state:
self.update_groomer_state(
groomer_state[0].split(self.GROOMER_STATE_DELIMITER)
)
for task_number in range(len(tasks)):
task = tasks[task_number]
if (
len(self.groomer_state) > 0
and self.groomer_state[0] != ""
and self.groomer_state[0] != task["id"]
):
continue
logging.info("Starting to {}".format(task["description"]))
try:
task["function"](*task["args"])
if task_number != len(tasks) - 1:
next_task = tasks[task_number + 1]
self.update_groomer_state([next_task["id"]])
except Exception as exception:
logging.error(
"Exception encountered while trying to {}:".format(
task["description"]
)
)
logging.exception(exception)
self.update_groomer_state([])
timestamp = datetime.datetime.utcnow()
if not self.update_statistics(timestamp):
logging.error("There was an error updating the statistics")
if not self.update_namespaces(timestamp):
logging.error("There was an error updating the namespaces")
del self.db_access
del self.ds_access
time_taken = time.time() - start
logging.info(
"Groomer cleaned {0} journal entries".format(self.journal_entries_cleaned)
)
logging.info(
"Groomer checked {0} index entries".format(self.index_entries_checked)
)
logging.info(
"Groomer cleaned {0} index entries".format(self.index_entries_cleaned)
)
if self.index_entries_delete_failures > 0:
logging.info(
"Groomer failed to remove {0} index entries".format(
self.index_entries_delete_failures
)
)
logging.info("Groomer took {0} seconds".format(str(time_taken)))
def main():
"""This main function allows you to run the groomer manually."""
zk_connection_locations = appscale_info.get_zk_locations_string()
zookeeper = zk.ZKTransaction(host=zk_connection_locations, start_gc=False)
db_info = appscale_info.get_db_info()
table = db_info[":table"]
master = appscale_info.get_db_master_ip()
datastore_path = "{0}:8888".format(master)
ds_groomer = DatastoreGroomer(zookeeper, table, datastore_path)
logging.debug("Trying to get groomer lock.")
if ds_groomer.get_groomer_lock():
logging.info("Got the groomer lock.")
try:
ds_groomer.run_groomer()
except Exception as exception:
logging.exception(
"Encountered exception {} while running the groomer.".format(
str(exception)
)
)
try:
ds_groomer.zoo_keeper.release_lock_with_path(zk.DS_GROOM_LOCK_PATH)
except zk.ZKTransactionException as zk_exception:
logging.error("Unable to release zk lock {0}.".format(str(zk_exception)))
except zk.ZKInternalException as zk_exception:
logging.error("Unable to release zk lock {0}.".format(str(zk_exception)))
finally:
zookeeper.close()
else:
logging.info("Did not get the groomer lock.")
if __name__ == "__main__":
main()
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
# Programmer: Navraj Chohan <nlake44@gmail.com>
import os
import sys
import time
import unittest
from flexmock import flexmock
import kazoo.client
import kazoo.exceptions
import kazoo.protocol
import kazoo.protocol.states
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
from dbconstants import *
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
from zkappscale import zktransaction as zk
from zkappscale.zktransaction import ZKTransactionException
class TestZookeeperTransaction(unittest.TestCase):
""" """
def setUp(self):
self.appid = "appid"
self.handle = None
def test_increment_and_get_counter(self):
# mock out getTransactionRootPath
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("get_transaction_prefix_path").with_args(
self.appid
).and_return("/rootpath")
# mock out initializing a ZK connection
fake_zookeeper = flexmock(
name="fake_zoo",
create="create",
delete_async="delete_async",
connected=lambda: True,
)
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry").and_return(None)
fake_counter = flexmock(name="fake_counter", value="value")
fake_counter.value = 1
fake_counter.should_receive("__add__").and_return(2)
fake_zookeeper.should_receive("Counter").and_return(fake_counter)
# mock out deleting the zero id we get the first time around
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
# assert, make sure we got back our id
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual((0, 1), transaction.increment_and_get_counter(self.appid, 1))
def test_create_sequence_node(self):
# mock out getTransactionRootPath
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("get_transaction_prefix_path").with_args(
self.appid
).and_return("/rootpath")
# mock out initializing a ZK connection
fake_zookeeper = flexmock(
name="fake_zoo", create="create", delete="delete", connected=lambda: True
)
fake_zookeeper.should_receive("start")
# mock out zookeeper.create for txn id
path_to_create = "/rootpath/" + self.appid
zero_path = path_to_create + "/0"
nonzero_path = path_to_create + "/1"
fake_zookeeper.should_receive("retry").with_args(
"create",
str,
value=str,
acl=None,
makepath=bool,
sequence=bool,
ephemeral=bool,
).and_return(zero_path).and_return(nonzero_path)
# mock out deleting the zero id we get the first time around
fake_zookeeper.should_receive("retry").with_args("delete", zero_path)
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
# assert, make sure we got back our id
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(
1, transaction.create_sequence_node("/rootpath/" + self.appid, "now")
)
def test_create_node(self):
# mock out getTransactionRootPath
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("get_transaction_prefix_path").with_args(
self.appid
).and_return("/rootpath")
# mock out initializing a ZK connection
fake_zookeeper = flexmock(
name="fake_zoo", create="create", connected=lambda: True
)
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry").with_args(
"create",
str,
value=str,
acl=None,
makepath=bool,
sequence=bool,
ephemeral=bool,
)
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
# mock out zookeeper.create for txn id
path_to_create = "/rootpath/" + self.appid
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(
None, transaction.create_node("/rootpath/" + self.appid, "now")
)
def test_get_transaction_id(self):
# mock out getTransactionRootPath
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("get_transaction_prefix_path").with_args(
self.appid
).and_return("/rootpath/" + self.appid)
path_to_create = "/rootpath/" + self.appid + "/" + zk.APP_TX_PREFIX
zk.ZKTransaction.should_receive("get_txn_path_before_getting_id").with_args(
self.appid
).and_return(path_to_create)
# mock out time.time
flexmock(time)
time.should_receive("time").and_return(1000)
# mock out initializing a ZK connection
fake_zookeeper = flexmock(name="fake_zoo", connected=lambda: True)
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry")
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
# mock out making the txn id
zk.ZKTransaction.should_receive("create_sequence_node").with_args(
path_to_create, "1000"
).and_return(1)
# mock out zookeeper.create for is_xg
xg_path = path_to_create + "/1/" + zk.XG_PREFIX
zk.ZKTransaction.should_receive("get_xg_path").and_return(xg_path)
zk.ZKTransaction.should_receive("create_node").with_args(xg_path, "1000")
# assert, make sure we got back our id
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(1, transaction.get_transaction_id(self.appid, is_xg=True))
def test_get_txn_path_before_getting_id(self):
# mock out initializing a ZK connection
flexmock(zk.ZKTransaction)
fake_zookeeper = flexmock(name="fake_zoo")
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry")
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
zk.ZKTransaction.should_receive("get_app_root_path").and_return("app_root_path")
expected = zk.PATH_SEPARATOR.join(
["app_root_path", zk.APP_TX_PATH, zk.APP_TX_PREFIX]
)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(
expected, transaction.get_txn_path_before_getting_id(self.appid)
)
def test_get_xg_path(self):
# mock out initializing a ZK connection
flexmock(zk.ZKTransaction)
fake_zookeeper = flexmock(name="fake_zoo")
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry")
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
tx_id = 100
tx_str = zk.APP_TX_PREFIX + "%010d" % tx_id
zk.ZKTransaction.should_receive("get_app_root_path").and_return("app_root_path")
expected = zk.PATH_SEPARATOR.join(
["app_root_path", zk.APP_TX_PATH, tx_str, zk.XG_PREFIX]
)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(expected, transaction.get_xg_path("xxx", 100))
def test_is_in_transaction(self):
# shared mocks
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("get_transaction_path").and_return(
"/transaction/path"
)
fake_zookeeper = flexmock(
name="fake_zoo", exists="exists", connected=lambda: True
)
fake_zookeeper.should_receive("start")
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
# test when the transaction is running
zk.ZKTransaction.should_receive("is_blacklisted").and_return(False)
fake_zookeeper.should_receive("retry").with_args("exists", str).and_return(True)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(True, transaction.is_in_transaction(self.appid, 1))
# and when it's not
zk.ZKTransaction.should_receive("is_blacklisted").and_return(False)
fake_zookeeper.should_receive("retry").with_args("exists", str).and_return(
False
)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(False, transaction.is_in_transaction(self.appid, 1))
# and when it's blacklisted
zk.ZKTransaction.should_receive("is_blacklisted").and_return(True)
fake_transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertRaises(
zk.ZKTransactionException, transaction.is_in_transaction, self.appid, 1
)
def test_acquire_lock(self):
# mock out waitForConnect
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("get_lock_root_path").and_return(
"/lock/root/path"
)
zk.ZKTransaction.should_receive("get_transaction_prefix_path").and_return(
"/rootpath/" + self.appid
)
fake_zookeeper = flexmock(name="fake_zoo", get="get", connected=lambda: True)
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry")
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
# first, test out getting a lock for a regular transaction, that we don't
# already have the lock for
zk.ZKTransaction.should_receive("is_in_transaction").and_return(False)
zk.ZKTransaction.should_receive("acquire_additional_lock").and_return(True)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(True, transaction.acquire_lock(self.appid, "txid", "somekey"))
# next, test when we're in a transaction and we already have the lock
zk.ZKTransaction.should_receive("is_in_transaction").and_return(True)
zk.ZKTransaction.should_receive("get_transaction_lock_list_path").and_return(
"/rootpath/" + self.appid + "/tx1"
)
fake_zookeeper.should_receive("retry").with_args("get", str).and_return(
["/lock/root/path"]
)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(True, transaction.acquire_lock(self.appid, "txid", "somekey"))
# next, test when we're in a non-XG transaction and we're not in the lock
# root path
zk.ZKTransaction.should_receive("is_in_transaction").and_return(True)
zk.ZKTransaction.should_receive("get_transaction_lock_list_path").and_return(
"/rootpath/" + self.appid + "/tx1"
)
fake_zookeeper.should_receive("retry").with_args("get", str).and_return(
["/lock/root/path2"]
)
zk.ZKTransaction.should_receive("is_xg").and_return(False)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertRaises(
zk.ZKTransactionException,
transaction.acquire_lock,
self.appid,
"txid",
"somekey",
)
# next, test when we're in a XG transaction and we're not in the lock
# root path
zk.ZKTransaction.should_receive("is_in_transaction").and_return(True)
zk.ZKTransaction.should_receive("get_transaction_lock_list_path").and_return(
"/rootpath/" + self.appid + "/tx1"
)
fake_zookeeper.should_receive("retry").with_args("get", str).and_return(
["/lock/root/path2"]
)
zk.ZKTransaction.should_receive("is_xg").and_return(True)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(True, transaction.acquire_lock(self.appid, "txid", "somekey"))
def test_acquire_additional_lock(self):
# mock out waitForConnect
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("check_transaction")
zk.ZKTransaction.should_receive("get_transaction_path").and_return("/txn/path")
zk.ZKTransaction.should_receive("get_lock_root_path").and_return(
"/lock/root/path"
)
zk.ZKTransaction.should_receive("get_transaction_prefix_path").and_return(
"/rootpath/" + self.appid
)
fake_zookeeper = flexmock(
name="fake_zoo",
create="create",
create_async="create_async",
get="get",
set_async="set_async",
connected=lambda: True,
)
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry").with_args(
"create",
str,
makepath=bool,
sequence=bool,
ephemeral=bool,
value=str,
acl=None,
).and_return("/some/lock/path")
fake_zookeeper.should_receive("retry").with_args(
"create_async",
str,
value=str,
acl=None,
ephemeral=bool,
makepath=bool,
sequence=bool,
)
fake_zookeeper.should_receive("retry").with_args(
"create_async",
str,
value=str,
acl=str,
ephemeral=bool,
makepath=bool,
sequence=bool,
)
lock_list = ["path1", "path2", "path3"]
lock_list_str = zk.LOCK_LIST_SEPARATOR.join(lock_list)
fake_zookeeper.should_receive("retry").with_args("get", str).and_return(
[lock_list_str]
)
fake_zookeeper.should_receive("retry").with_args("set_async", str, str)
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(
True,
transaction.acquire_additional_lock(self.appid, "txid", "somekey", False),
)
# Test for when we want to create a new ZK node for the lock path
self.assertEqual(
True,
transaction.acquire_additional_lock(self.appid, "txid", "somekey", True),
)
# Test for existing max groups
lock_list = ["path" + str(num + 1) for num in range(zk.MAX_GROUPS_FOR_XG)]
lock_list_str = zk.LOCK_LIST_SEPARATOR.join(lock_list)
fake_zookeeper.should_receive("retry").with_args("get", str).and_return(
[lock_list_str]
)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertRaises(
zk.ZKTransactionException,
transaction.acquire_additional_lock,
self.appid,
"txid",
"somekey",
False,
)
# Test for when there is a node which already exists.
fake_zookeeper.should_receive("retry").with_args(
"create", str, str, None, bool, bool, bool
).and_raise(kazoo.exceptions.NodeExistsError)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertRaises(
zk.ZKTransactionException,
transaction.acquire_additional_lock,
self.appid,
"txid",
"somekey",
False,
)
def test_check_transaction(self):
# mock out getTransactionRootPath
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("get_transaction_prefix_path").with_args(
self.appid
).and_return("/rootpath")
zk.ZKTransaction.should_receive("is_blacklisted").and_return(False)
# mock out initializing a ZK connection
fake_zookeeper = flexmock(
name="fake_zoo", exists="exists", connected=lambda: True
)
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry").with_args("exists", str).and_return(True)
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(True, transaction.check_transaction(self.appid, 1))
# Check to make sure it raises exception for blacklisted transactions.
zk.ZKTransaction.should_receive("is_blacklisted").and_return(True)
self.assertRaises(
zk.ZKTransactionException, transaction.check_transaction, self.appid, 1
)
zk.ZKTransaction.should_receive("is_blacklisted").and_return(False)
fake_zookeeper.should_receive("retry").with_args("exists", str).and_return(
False
)
self.assertRaises(
zk.ZKTransactionException, transaction.check_transaction, self.appid, 1
)
def test_is_xg(self):
# mock out initializing a ZK connection
fake_zookeeper = flexmock(
name="fake_zoo", exists="exists", connected=lambda: True
)
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry").with_args("exists", str).and_return(True)
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(True, transaction.is_xg(self.appid, 1))
def test_release_lock(self):
# mock out getTransactionRootPath
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("check_transaction")
zk.ZKTransaction.should_receive("get_transaction_path").and_return("/rootpath")
zk.ZKTransaction.should_receive("get_transaction_lock_list_path").and_return(
"/rootpath"
)
zk.ZKTransaction.should_receive("is_xg").and_return(False)
# mock out initializing a ZK connection
fake_zookeeper = flexmock(
name="fake_zoo",
exists="exists",
get="get",
delete="delete",
delete_async="delete_async",
get_children="get_children",
connected=lambda: True,
)
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry").with_args("exists", str).and_return(True)
fake_zookeeper.should_receive("retry").with_args("get", str).and_return(
["/1/2/3"]
)
fake_zookeeper.should_receive("retry").with_args("delete_async", str)
fake_zookeeper.should_receive("retry").with_args("delete", str)
fake_zookeeper.should_receive("retry").with_args(
"get_children", str
).and_return(["1", "2"])
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(True, transaction.release_lock(self.appid, 1))
zk.ZKTransaction.should_receive("is_xg").and_return(True)
self.assertEqual(True, transaction.release_lock(self.appid, 1))
# Check to make sure it raises exception for blacklisted transactions.
zk.ZKTransaction.should_receive("is_xg").and_return(False)
fake_zookeeper.should_receive("retry").with_args("get", str).and_raise(
kazoo.exceptions.NoNodeError
)
self.assertRaises(
zk.ZKTransactionException, transaction.release_lock, self.appid, 1
)
def test_is_blacklisted(self):
# mock out getTransactionRootPath
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("get_blacklist_root_path").and_return(
"bl_root_path"
)
# mock out initializing a ZK connection
fake_zookeeper = flexmock(
name="fake_zoo",
create="create",
exists="exists",
get_children="get_children",
connected=lambda: True,
)
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry").with_args(
"create", str, str, None, bool, bool, bool
).and_return()
fake_zookeeper.should_receive("retry").with_args("exists", str).and_return(True)
fake_zookeeper.should_receive("retry").with_args(
"get_children", str
).and_return(["1", "2"])
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(True, transaction.is_blacklisted(self.appid, 1))
def test_register_updated_key(self):
# mock out getTransactionRootPath
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("get_valid_transaction_path").and_return(
"/txn/path"
)
zk.ZKTransaction.should_receive("get_transaction_path").and_return("/txn/path")
zk.ZKTransaction.should_receive("get_blacklist_root_path").and_return(
"bl_root_path"
)
# mock out initializing a ZK connection
fake_zookeeper = flexmock(
name="fake_zoo",
exists="exists",
set_async="set_async",
connected=lambda: True,
)
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry").with_args("exists", str).and_return(True)
fake_zookeeper.should_receive("retry").with_args("set_async", str, str)
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(
True, transaction.register_updated_key(self.appid, "1", "2", "somekey")
)
fake_zookeeper.should_receive("retry").with_args("exists", str).and_return(
False
)
self.assertRaises(
ZKTransactionException,
transaction.register_updated_key,
self.appid,
"1",
"2",
"somekey",
)
def test_try_garbage_collection(self):
# mock out getTransactionRootPath
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("update_node")
# mock out initializing a ZK connection
fake_zookeeper = flexmock(
name="fake_zoo",
exists="exists",
get="get",
get_children="get_children",
create="create",
delete="delete",
)
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry").with_args("exists", str).and_return(True)
fake_zookeeper.should_receive("retry").with_args("get", str).and_return(
[str(time.time() + 10000)]
)
fake_zookeeper.should_receive("retry").with_args(
"get_children", str
).and_return(["1", "2", "3"])
fake_zookeeper.should_receive("retry").with_args(
"create", str, value=str, acl=None, ephemeral=bool
)
fake_zookeeper.should_receive("retry").with_args("delete", str)
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
# Put the last time we ran GC way into the future.
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(
False, transaction.try_garbage_collection(self.appid, "/some/path")
)
# Make it so we recently ran the GC
fake_zookeeper.should_receive("retry").with_args("get", str).and_return(
[str(time.time())]
)
self.assertEqual(
False, transaction.try_garbage_collection(self.appid, "/some/path")
)
# Make it so we ran the GC a long time ago.
fake_zookeeper.should_receive("retry").with_args("get", str).and_return(
[str(time.time() - 1000)]
)
self.assertEqual(
True, transaction.try_garbage_collection(self.appid, "/some/path")
)
# No node means we have not run the GC before, so run it.
fake_zookeeper.should_receive("retry").with_args("get", str).and_raise(
kazoo.exceptions.NoNodeError
)
self.assertEqual(
True, transaction.try_garbage_collection(self.appid, "/some/path")
)
def test_notify_failed_transaction(self):
pass
# TODO
def test_execute_garbage_collection(self):
# mock out getTransactionRootPath
flexmock(zk.ZKTransaction)
zk.ZKTransaction.should_receive("notify_failed_transaction")
# mock out initializing a ZK connection
fake_zookeeper = flexmock(
name="fake_zoo", exists="exists", get="get", get_children="get_children"
)
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry").with_args("exists", str).and_return(True)
fake_zookeeper.should_receive("retry").with_args("get", str).and_return(
[str(time.time() + 10000)]
)
fake_zookeeper.should_receive("retry").with_args(
"get_children", str
).and_return(["1", "2", "3"])
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
transaction = zk.ZKTransaction(host="something", start_gc=False)
transaction.execute_garbage_collection(self.appid, "some/path")
def test_get_lock_with_path(self):
flexmock(zk.ZKTransaction)
# mock out initializing a ZK connection
fake_zookeeper = flexmock(name="fake_zoo", create="create")
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry").with_args(
"create", str, value=str, acl=None, ephemeral=bool
).and_return(True)
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(True, transaction.get_lock_with_path("path"))
fake_zookeeper.should_receive("retry").with_args(
"create", str, value=str, acl=None, ephemeral=bool
).and_raise(kazoo.exceptions.NodeExistsError)
self.assertEqual(False, transaction.get_lock_with_path("some/path"))
def test_release_lock_with_path(self):
flexmock(zk.ZKTransaction)
# mock out initializing a ZK connection
fake_zookeeper = flexmock(name="fake_zoo", delete="delete")
fake_zookeeper.should_receive("start")
fake_zookeeper.should_receive("retry").with_args("delete", str)
flexmock(kazoo.client)
kazoo.client.should_receive("KazooClient").and_return(fake_zookeeper)
transaction = zk.ZKTransaction(host="something", start_gc=False)
self.assertEqual(True, transaction.release_lock_with_path("some/path"))
fake_zookeeper.should_receive("retry").with_args("delete", str).and_raise(
kazoo.exceptions.NoNodeError
)
self.assertRaises(
ZKTransactionException, transaction.release_lock_with_path, "some/path"
)
if __name__ == "__main__":
unittest.main()
<|endoftext|> |
<|endoftext|>import cgi
import datetime
import wsgiref.handlers
from google.appengine.ext import webapp
class MainPage(webapp.RequestHandler):
def get(self):
self.response.out.write("<html><body>")
self.response.out.write("<p>Hello</p>")
self.response.out.write("</body></html>")
application = webapp.WSGIApplication(
[
("/", MainPage),
],
debug=True,
)
def main():
wsgiref.handlers.CGIHandler().run(application)
if __name__ == "__main__":
main()
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
from google.appengine._internal.django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write(
"Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n"
% __file__
)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
<|endoftext|> |
<|endoftext|>class FileProxyMixin(object):
"""
A mixin class used to forward file methods to an underlaying file
object. The internal file object has to be called "file"::
class FileProxy(FileProxyMixin):
def __init__(self, file):
self.file = file
"""
encoding = property(lambda self: self.file.encoding)
fileno = property(lambda self: self.file.fileno)
flush = property(lambda self: self.file.flush)
isatty = property(lambda self: self.file.isatty)
newlines = property(lambda self: self.file.newlines)
read = property(lambda self: self.file.read)
readinto = property(lambda self: self.file.readinto)
readline = property(lambda self: self.file.readline)
readlines = property(lambda self: self.file.readlines)
seek = property(lambda self: self.file.seek)
softspace = property(lambda self: self.file.softspace)
tell = property(lambda self: self.file.tell)
truncate = property(lambda self: self.file.truncate)
write = property(lambda self: self.file.write)
writelines = property(lambda self: self.file.writelines)
xreadlines = property(lambda self: self.file.__iter__)
def __iter__(self):
return iter(self.file)
<|endoftext|> |
<|endoftext|>import fnmatch
import glob
import os
import re
import sys
from itertools import dropwhile
from optparse import make_option
from subprocess import PIPE, Popen
from google.appengine._internal.django.core.management.base import (
CommandError,
BaseCommand,
)
from google.appengine._internal.django.utils.text import get_text_list
pythonize_re = re.compile(r"(?:^|\n)\s*//")
plural_forms_re = re.compile(
r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL
)
def handle_extensions(extensions=("html",)):
"""
organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
for example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in a extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
['.html', '.js']
>>> handle_extensions(['.html, txt,.tpl'])
['.html', '.tpl', '.txt']
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(" ", "").split(","))
for i, ext in enumerate(ext_list):
if not ext.startswith("."):
ext_list[i] = ".%s" % ext_list[i]
# we don't want *.py files here because of the way non-*.py files
# are handled in make_messages() (they are copied to file.ext.py files to
# trick xgettext to parse them as Python files)
return set([x for x in ext_list if x != ".py"])
def _popen(cmd):
"""
Friendly wrapper around Popen for Windows
"""
p = Popen(
cmd,
shell=True,
stdout=PIPE,
stderr=PIPE,
close_fds=os.name != "nt",
universal_newlines=True,
)
return p.communicate()
def walk(root, topdown=True, onerror=None, followlinks=False):
"""
A version of os.walk that can follow symlinks for Python < 2.6
"""
for dirpath, dirnames, filenames in os.walk(root, topdown, onerror):
yield (dirpath, dirnames, filenames)
if followlinks:
for d in dirnames:
p = os.path.join(dirpath, d)
if os.path.islink(p):
for link_dirpath, link_dirnames, link_filenames in walk(p):
yield (link_dirpath, link_dirnames, link_filenames)
def is_ignored(path, ignore_patterns):
"""
Helper function to check if the given path should be ignored or not.
"""
for pattern in ignore_patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def find_files(root, ignore_patterns, verbosity, symlinks=False):
"""
Helper function to get all files in the given root.
"""
all_files = []
for dirpath, dirnames, filenames in walk(".", followlinks=symlinks):
for f in filenames:
norm_filepath = os.path.normpath(os.path.join(dirpath, f))
if is_ignored(norm_filepath, ignore_patterns):
if verbosity > 1:
sys.stdout.write("ignoring file %s in %s\n" % (f, dirpath))
else:
all_files.extend([(dirpath, f)])
all_files.sort()
return all_files
def copy_plural_forms(msgs, locale, domain, verbosity):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
import django
django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__)))
if domain == "djangojs":
domains = ("djangojs", "django")
else:
domains = ("django",)
for domain in domains:
django_po = os.path.join(
django_dir, "conf", "locale", locale, "LC_MESSAGES", "%s.po" % domain
)
if os.path.exists(django_po):
m = plural_forms_re.search(open(django_po, "rU").read())
if m:
if verbosity > 1:
sys.stderr.write("copying plural forms: %s\n" % m.group("value"))
lines = []
seen = False
for line in msgs.split("\n"):
if not line and not seen:
line = "%s\n" % m.group("value")
seen = True
lines.append(line)
msgs = "\n".join(lines)
break
return msgs
def make_messages(
locale=None,
domain="django",
verbosity="1",
all=False,
extensions=None,
symlinks=False,
ignore_patterns=[],
):
"""
Uses the locale directory from the Django SVN tree or an application/
project to process all
"""
# Need to ensure that the i18n framework is enabled
from google.appengine._internal.django.conf import settings
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N=True)
from google.appengine._internal.django.utils.translation import templatize
invoked_for_django = False
if os.path.isdir(os.path.join("conf", "locale")):
localedir = os.path.abspath(os.path.join("conf", "locale"))
invoked_for_django = True
elif os.path.isdir("locale"):
localedir = os.path.abspath("locale")
else:
raise CommandError(
"This script should be run from the Django SVN tree or your project or app tree. If you did indeed run it from the SVN checkout or your project or application, maybe you are just missing the conf/locale (in the django tree) or locale (for project and application) directory? It is not created automatically, you have to create it by hand if you want to enable i18n for your project or application."
)
if domain not in ("django", "djangojs"):
raise CommandError(
"currently makemessages only supports domains 'django' and 'djangojs'"
)
if (locale is None and not all) or domain is None:
# backwards compatible error message
if not sys.argv[0].endswith("make-messages.py"):
message = "Type '%s help %s' for usage.\n" % (
os.path.basename(sys.argv[0]),
sys.argv[1],
)
else:
message = (
"usage: make-messages.py -l <language>\n or: make-messages.py -a\n"
)
raise CommandError(message)
# We require gettext version 0.15 or newer.
output = _popen("xgettext --version")[0]
match = re.search(r"(?P<major>\d+)\.(?P<minor>\d+)", output)
if match:
xversion = (int(match.group("major")), int(match.group("minor")))
if xversion < (0, 15):
raise CommandError(
"Django internationalization requires GNU gettext 0.15 or newer. You are using version %s, please upgrade your gettext toolset."
% match.group()
)
languages = []
if locale is not None:
languages.append(locale)
elif all:
locale_dirs = list(filter(os.path.isdir, glob.glob("%s/*" % localedir)))
languages = [os.path.basename(l) for l in locale_dirs]
for locale in languages:
if verbosity > 0:
print("processing language", locale)
basedir = os.path.join(localedir, locale, "LC_MESSAGES")
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, "%s.po" % domain)
potfile = os.path.join(basedir, "%s.pot" % domain)
if os.path.exists(potfile):
os.unlink(potfile)
for dirpath, file in find_files(
".", ignore_patterns, verbosity, symlinks=symlinks
):
file_base, file_ext = os.path.splitext(file)
if domain == "djangojs" and file_ext in extensions:
if verbosity > 1:
sys.stdout.write("processing file %s in %s\n" % (file, dirpath))
src = open(os.path.join(dirpath, file), "rU").read()
src = pythonize_re.sub("\n#", src)
thefile = "%s.py" % file
f = open(os.path.join(dirpath, thefile), "w")
try:
f.write(src)
finally:
f.close()
cmd = (
'xgettext -d %s -L Perl --keyword=gettext_noop --keyword=gettext_lazy --keyword=ngettext_lazy:1,2 --from-code UTF-8 -o - "%s"'
% (domain, os.path.join(dirpath, thefile))
)
msgs, errors = _popen(cmd)
if errors:
raise CommandError(
"errors happened while running xgettext on %s\n%s"
% (file, errors)
)
old = "#: " + os.path.join(dirpath, thefile)[2:]
new = "#: " + os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if os.path.exists(potfile):
# Strip the header
msgs = "\n".join(dropwhile(len, msgs.split("\n")))
else:
msgs = msgs.replace("charset=CHARSET", "charset=UTF-8")
if msgs:
f = open(potfile, "ab")
try:
f.write(msgs)
finally:
f.close()
os.unlink(os.path.join(dirpath, thefile))
elif domain == "django" and (file_ext == ".py" or file_ext in extensions):
thefile = file
if file_ext in extensions:
src = open(os.path.join(dirpath, file), "rU").read()
thefile = "%s.py" % file
try:
f = open(os.path.join(dirpath, thefile), "w")
try:
f.write(templatize(src))
finally:
f.close()
except SyntaxError as msg:
msg = "%s (file: %s)" % (msg, os.path.join(dirpath, file))
raise SyntaxError(msg)
if verbosity > 1:
sys.stdout.write("processing file %s in %s\n" % (file, dirpath))
cmd = (
'xgettext -d %s -L Python --keyword=gettext_noop --keyword=gettext_lazy --keyword=ngettext_lazy:1,2 --keyword=ugettext_noop --keyword=ugettext_lazy --keyword=ungettext_lazy:1,2 --from-code UTF-8 -o - "%s"'
% (domain, os.path.join(dirpath, thefile))
)
msgs, errors = _popen(cmd)
if errors:
raise CommandError(
"errors happened while running xgettext on %s\n%s"
% (file, errors)
)
if thefile != file:
old = "#: " + os.path.join(dirpath, thefile)[2:]
new = "#: " + os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if os.path.exists(potfile):
# Strip the header
msgs = "\n".join(dropwhile(len, msgs.split("\n")))
else:
msgs = msgs.replace("charset=CHARSET", "charset=UTF-8")
if msgs:
f = open(potfile, "ab")
try:
f.write(msgs)
finally:
f.close()
if thefile != file:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
msgs, errors = _popen('msguniq --to-code=utf-8 "%s"' % potfile)
if errors:
raise CommandError("errors happened while running msguniq\n%s" % errors)
f = open(potfile, "w")
try:
f.write(msgs)
finally:
f.close()
if os.path.exists(pofile):
msgs, errors = _popen('msgmerge -q "%s" "%s"' % (pofile, potfile))
if errors:
raise CommandError(
"errors happened while running msgmerge\n%s" % errors
)
elif not invoked_for_django:
msgs = copy_plural_forms(msgs, locale, domain, verbosity)
f = open(pofile, "wb")
try:
f.write(msgs)
finally:
f.close()
os.unlink(potfile)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
"--locale",
"-l",
default=None,
dest="locale",
help="Creates or updates the message files only for the given locale (e.g. pt_BR).",
),
make_option(
"--domain",
"-d",
default="django",
dest="domain",
help='The domain of the message files (default: "django").',
),
make_option(
"--all",
"-a",
action="store_true",
dest="all",
default=False,
help="Reexamines all source code and templates for new translation strings and updates all message files for all available languages.",
),
make_option(
"--extension",
"-e",
dest="extensions",
help='The file extension(s) to examine (default: ".html", separate multiple extensions with commas, or use -e multiple times)',
action="append",
),
make_option(
"--symlinks",
"-s",
action="store_true",
dest="symlinks",
default=False,
help="Follows symlinks to directories when examining source code and templates for translation strings.",
),
make_option(
"--ignore",
"-i",
action="append",
dest="ignore_patterns",
default=[],
metavar="PATTERN",
help="Ignore files or directories matching this glob-style pattern. Use multiple times to ignore more.",
),
make_option(
"--no-default-ignore",
action="store_false",
dest="use_default_ignore_patterns",
default=True,
help="Don't ignore the common glob-style patterns 'CVS', '.*' and '*~'.",
),
)
help = "Runs over the entire source tree of the current directory and pulls out all strings marked for translation. It creates (or updates) a message file in the conf/locale (in the django tree) or locale (for project and application) directory."
requires_model_validation = False
can_import_settings = False
def handle(self, *args, **options):
if len(args) != 0:
raise CommandError("Command doesn't accept any arguments")
locale = options.get("locale")
domain = options.get("domain")
verbosity = int(options.get("verbosity"))
process_all = options.get("all")
extensions = options.get("extensions")
symlinks = options.get("symlinks")
ignore_patterns = options.get("ignore_patterns")
if options.get("use_default_ignore_patterns"):
ignore_patterns += ["CVS", ".*", "*~"]
ignore_patterns = list(set(ignore_patterns))
if domain == "djangojs":
extensions = handle_extensions(extensions or ["js"])
else:
extensions = handle_extensions(extensions or ["html"])
if verbosity > 1:
sys.stdout.write(
"examining files with the extensions: %s\n"
% get_text_list(list(extensions), "and")
)
make_messages(
locale,
domain,
verbosity,
process_all,
extensions,
symlinks,
ignore_patterns,
)
<|endoftext|> |
<|endoftext|>"""
XML serializer.
"""
from google.appengine._internal.django.conf import settings
from google.appengine._internal.django.core.serializers import base
from google.appengine._internal.django.db import models, DEFAULT_DB_ALIAS
from google.appengine._internal.django.utils.xmlutils import SimplerXMLGenerator
from google.appengine._internal.django.utils.encoding import smart_unicode
from xml.dom import pulldom
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get("indent", None) is not None:
self.xml.ignorableWhitespace(
"\n" + " " * self.options.get("indent", None) * level
)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(
self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET)
)
self.xml.startDocument()
self.xml.startElement("django-objects", {"version": "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError(
"Non-model object (%s) encountered during serialization" % type(obj)
)
self.indent(1)
obj_pk = obj._get_pk_val()
if obj_pk is None:
attrs = {
"model": smart_unicode(obj._meta),
}
else:
attrs = {
"pk": smart_unicode(obj._get_pk_val()),
"model": smart_unicode(obj._meta),
}
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement(
"field", {"name": field.name, "type": field.get_internal_type()}
)
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
self.xml.characters(field.value_to_string(obj))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related = getattr(obj, field.name)
if related is not None:
if self.use_natural_keys and hasattr(related, "natural_key"):
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(smart_unicode(key_value))
self.xml.endElement("natural")
else:
if field.rel.field_name == related._meta.pk.name:
# Related to remote object via primary key
related = related._get_pk_val()
else:
# Related to remote object via other field
related = getattr(related, field.rel.field_name)
self.xml.characters(smart_unicode(related))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.rel.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_keys and hasattr(field.rel.to, "natural_key"):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(smart_unicode(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement(
"object", attrs={"pk": smart_unicode(value._get_pk_val())}
)
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement(
"field",
{
"name": field.name,
"rel": field.rel.__class__.__name__,
"to": smart_unicode(field.rel.to._meta),
},
)
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream)
self.db = options.pop("using", DEFAULT_DB_ALIAS)
def __next__(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
# If the node is missing the pk set it to None
if node.hasAttribute("pk"):
pk = node.getAttribute("pk")
else:
pk = None
data = {Model._meta.pk.attname: Model._meta.pk.to_python(pk)}
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
# Deseralize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError(
"<field> node is missing the 'name' attribute"
)
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly.
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.rel and isinstance(field.rel, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName("None"):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(Model(**data), m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName("None"):
return None
else:
if hasattr(field.rel.to._default_manager, "get_by_natural_key"):
keys = node.getElementsByTagName("natural")
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = field.rel.to._default_manager.db_manager(
self.db
).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.rel.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.rel.to._meta.pk.rel:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = field.rel.to._meta.get_field(
field.rel.field_name
).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return field.rel.to._meta.get_field(field.rel.field_name).to_python(
field_value
)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
if hasattr(field.rel.to._default_manager, "get_by_natural_key"):
def m2m_convert(n):
keys = n.getElementsByTagName("natural")
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = (
field.rel.to._default_manager.db_manager(self.db)
.get_by_natural_key(*field_value)
.pk
)
else:
# Otherwise, treat like a normal PK value.
obj_pk = field.rel.to._meta.pk.to_python(n.getAttribute("pk"))
return obj_pk
else:
m2m_convert = lambda n: field.rel.to._meta.pk.to_python(
n.getAttribute("pk")
)
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute"
% (node.nodeName, attr)
)
try:
Model = models.get_model(*model_identifier.split("."))
except TypeError:
Model = None
if Model is None:
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'"
% (node.nodeName, model_identifier)
)
return Model
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if (
child.nodeType == child.TEXT_NODE
or child.nodeType == child.CDATA_SECTION_NODE
):
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text)
<|endoftext|> |
<|endoftext|>import os
import sys
if os.name == "posix":
def become_daemon(
our_home_dir=".", out_log="/dev/null", err_log="/dev/null", umask=0o22
):
"Robustly turn into a UNIX daemon, running in our_home_dir."
# First fork
try:
if os.fork() > 0:
sys.exit(0) # kill off parent
except OSError as e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid()
os.chdir(our_home_dir)
os.umask(umask)
# Second fork
try:
if os.fork() > 0:
os._exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
os._exit(1)
si = open("/dev/null", "r")
so = open(out_log, "a+", 0)
se = open(err_log, "a+", 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Set custom file descriptors so that they get proper buffering.
sys.stdout, sys.stderr = so, se
else:
def become_daemon(our_home_dir=".", out_log=None, err_log=None, umask=0o22):
"""
If we're not running under a POSIX system, just simulate the daemon
mode by doing redirections and directory changing.
"""
os.chdir(our_home_dir)
os.umask(umask)
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
if err_log:
sys.stderr = open(err_log, "a", 0)
else:
sys.stderr = NullDevice()
if out_log:
sys.stdout = open(out_log, "a", 0)
else:
sys.stdout = NullDevice()
class NullDevice:
"A writeable object that writes to nowhere -- like /dev/null."
def write(self, s):
pass
<|endoftext|> |
<|endoftext|>"""
Code used in a couple of places to work with the current thread's environment.
Current users include i18n and request prefix handling.
"""
try:
import threading
currentThread = threading.currentThread
except ImportError:
def currentThread():
return "no threading"
<|endoftext|> |
<|endoftext|>"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = "$Id: Types.py,v 1.19 2005/02/22 04:29:43 warnes Exp $"
from version import __version__
import collections
import base64
import cgi
import urllib.request, urllib.parse, urllib.error
import copy
import re
import time
from types import *
# SOAPpy modules
from .Errors import *
from NS import NS
from Utilities import encodeHexString, cleanDate
from .Config import Config
###############################################################################
# Utility functions
###############################################################################
def isPrivate(name):
return name[0] == "_"
def isPublic(name):
return name[0] != "_"
###############################################################################
# Types and Wrappers
###############################################################################
class anyType:
_validURIs = (NS.XSD, NS.XSD2, NS.XSD3, NS.ENC)
def __init__(self, data=None, name=None, typed=1, attrs=None):
if self.__class__ == anyType:
raise Error("anyType can't be instantiated directly")
if type(name) in (ListType, TupleType):
self._ns, self._name = name
else:
self._ns = self._validURIs[0]
self._name = name
self._typed = typed
self._attrs = {}
self._cache = None
self._type = self._typeName()
self._data = self._checkValueSpace(data)
if attrs != None:
self._setAttrs(attrs)
def __str__(self):
if hasattr(self, "_name") and self._name:
return "<%s %s at %d>" % (self.__class__, self._name, id(self))
return "<%s at %d>" % (self.__class__, id(self))
__repr__ = __str__
def _checkValueSpace(self, data):
return data
def _marshalData(self):
return str(self._data)
def _marshalAttrs(self, ns_map, builder):
a = ""
for attr, value in list(self._attrs.items()):
ns, n = builder.genns(ns_map, attr[0])
a += n + ' %s%s="%s"' % (ns, attr[1], cgi.escape(str(value), 1))
return a
def _fixAttr(self, attr):
if type(attr) in (StringType, UnicodeType):
attr = (None, attr)
elif type(attr) == ListType:
attr = tuple(attr)
elif type(attr) != TupleType:
raise AttributeError("invalid attribute type")
if len(attr) != 2:
raise AttributeError("invalid attribute length")
if type(attr[0]) not in (NoneType, StringType, UnicodeType):
raise AttributeError("invalid attribute namespace URI type")
return attr
def _getAttr(self, attr):
attr = self._fixAttr(attr)
try:
return self._attrs[attr]
except:
return None
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if type(value) is StringType:
value = str(value)
self._attrs[attr] = value
def _setAttrs(self, attrs):
if type(attrs) in (ListType, TupleType):
for i in range(0, len(attrs), 2):
self._setAttr(attrs[i], attrs[i + 1])
return
if type(attrs) == DictType:
d = attrs
elif isinstance(attrs, anyType):
d = attrs._attrs
else:
raise AttributeError("invalid attribute type")
for attr, value in list(d.items()):
self._setAttr(attr, value)
def _setMustUnderstand(self, val):
self._setAttr((NS.ENV, "mustUnderstand"), val)
def _getMustUnderstand(self):
return self._getAttr((NS.ENV, "mustUnderstand"))
def _setActor(self, val):
self._setAttr((NS.ENV, "actor"), val)
def _getActor(self):
return self._getAttr((NS.ENV, "actor"))
def _typeName(self):
return self.__class__.__name__[:-4]
def _validNamespaceURI(self, URI, strict):
if not hasattr(self, "_typed") or not self._typed:
return None
if URI in self._validURIs:
return URI
if not strict:
return self._ns
raise AttributeError("not a valid namespace for type %s" % self._type)
class voidType(anyType):
pass
class stringType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type:" % self._type)
return data
class untypedType(stringType):
def __init__(self, data=None, name=None, attrs=None):
stringType.__init__(self, data, name, 0, attrs)
class IDType(stringType):
pass
class NCNameType(stringType):
pass
class NameType(stringType):
pass
class ENTITYType(stringType):
pass
class IDREFType(stringType):
pass
class languageType(stringType):
pass
class NMTOKENType(stringType):
pass
class QNameType(stringType):
pass
class tokenType(anyType):
_validURIs = (NS.XSD2, NS.XSD3)
__invalidre = "[\n\t]|^ | $| "
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type" % self._type)
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError("invalid %s value" % self._type)
return data
class normalizedStringType(anyType):
_validURIs = (NS.XSD3,)
__invalidre = "[\n\r\t]"
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type" % self._type)
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError("invalid %s value" % self._type)
return data
class CDATAType(normalizedStringType):
_validURIs = (NS.XSD2,)
class booleanType(anyType):
def __int__(self):
return self._data
__nonzero__ = __int__
def _marshalData(self):
return ["false", "true"][self._data]
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if data in (0, "0", "false", ""):
return 0
if data in (1, "1", "true"):
return 1
raise ValueError("invalid %s value" % self._type)
class decimalType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (IntType, LongType, FloatType):
raise Error("invalid %s value" % self._type)
return data
class floatType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if (
type(data) not in (IntType, LongType, FloatType)
or data < -3.4028234663852886e38
or data > 3.4028234663852886e38
):
raise ValueError("invalid %s value: %s" % (self._type, repr(data)))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class doubleType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if (
type(data) not in (IntType, LongType, FloatType)
or data < -1.7976931348623158e308
or data > 1.7976931348623157e308
):
raise ValueError("invalid %s value: %s" % (self._type, repr(data)))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class durationType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
try:
# A tuple or a scalar is OK, but make them into a list
if type(data) == TupleType:
data = list(data)
elif type(data) != ListType:
data = [data]
if len(data) > 6:
raise Exception("too many values")
# Now check the types of all the components, and find
# the first nonzero element along the way.
f = -1
for i in range(len(data)):
if data[i] == None:
data[i] = 0
continue
if type(data[i]) not in (IntType, LongType, FloatType):
raise Exception("element %d a bad type" % i)
if data[i] and f == -1:
f = i
# If they're all 0, just use zero seconds.
if f == -1:
self._cache = "PT0S"
return (0,) * 6
# Make sure only the last nonzero element has a decimal fraction
# and only the first element is negative.
d = -1
for i in range(f, len(data)):
if data[i]:
if d != -1:
raise Exception(
"all except the last nonzero element must be " "integers"
)
if data[i] < 0 and i > f:
raise Exception(
"only the first nonzero element can be negative"
)
elif data[i] != int(data[i]):
d = i
# Pad the list on the left if necessary.
if len(data) < 6:
n = 6 - len(data)
f += n
d += n
data = [0] * n + data
# Save index of the first nonzero element and the decimal
# element for _marshalData.
self.__firstnonzero = f
self.__decimal = d
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
t = 0
if d[self.__firstnonzero] < 0:
s = "-P"
else:
s = "P"
t = 0
for i in range(self.__firstnonzero, len(d)):
if d[i]:
if i > 2 and not t:
s += "T"
t = 1
if self.__decimal == i:
s += "%g" % abs(d[i])
else:
s += "%d" % int(abs(d[i]))
s += ["Y", "M", "D", "H", "M", "S"][i]
self._cache = s
return self._cache
class timeDurationType(durationType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class dateTimeType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.time()
if type(data) in (IntType, LongType):
data = list(time.gmtime(data)[:6])
elif type(data) == FloatType:
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 6:
raise Exception("not enough values")
if len(data) > 9:
raise Exception("too many values")
data = list(data[:6])
cleanDate(data)
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dT%02d:%02d:%02d" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = "-" + s
f = d[5] - int(d[5])
if f != 0:
s += ("%g" % f)[1:]
s += "Z"
self._cache = s
return self._cache
class recurringInstantType(anyType):
_validURIs = (NS.XSD,)
def _checkValueSpace(self, data):
try:
if data == None:
data = list(time.gmtime(time.time())[:6])
if type(data) in (IntType, LongType):
data = list(time.gmtime(data)[:6])
elif type(data) == FloatType:
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 1:
raise Exception("not enough values")
if len(data) > 9:
raise Exception("too many values")
data = list(data[:6])
if len(data) < 6:
data += [0] * (6 - len(data))
f = len(data)
for i in range(f):
if data[i] == None:
if f < i:
raise Exception("only leftmost elements can be none")
else:
f = i
break
cleanDate(data, f)
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
e = list(d)
neg = ""
if not e[0]:
e[0] = "--"
else:
if e[0] < 0:
neg = "-"
e[0] = abs(e[0])
if e[0] < 100:
e[0] = "-" + "%02d" % e[0]
else:
e[0] = "%04d" % e[0]
for i in range(1, len(e)):
if e[i] == None or (i < 3 and e[i] == 0):
e[i] = "-"
else:
if e[i] < 0:
neg = "-"
e[i] = abs(e[i])
e[i] = "%02d" % e[i]
if d[5]:
f = abs(d[5] - int(d[5]))
if f:
e[5] += ("%g" % f)[1:]
s = "%s%s-%s-%sT%s:%s:%sZ" % ((neg,) + tuple(e))
self._cache = s
return self._cache
class timeInstantType(dateTimeType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class timePeriodType(dateTimeType):
_validURIs = (NS.XSD2, NS.ENC)
class timeType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[3:6]
elif type(data) == FloatType:
f = data - int(data)
data = list(time.gmtime(int(data))[3:6])
data[2] += f
elif type(data) in (IntType, LongType):
data = time.gmtime(data)[3:6]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[3:6]
elif len(data) > 3:
raise Exception("too many values")
data = [None, None, None] + list(data)
if len(data) < 6:
data += [0] * (6 - len(data))
cleanDate(data, 3)
data = data[3:]
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = ""
s = time.strftime("%H:%M:%S", (0, 0, 0) + d + (0, 0, -1))
f = d[2] - int(d[2])
if f != 0:
s += ("%g" % f)[1:]
s += "Z"
self._cache = s
return self._cache
class dateType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:3]
elif len(data) > 3:
raise Exception("too many values")
data = list(data)
if len(data) < 3:
data += [1, 1, 1][len(data) :]
data += [0, 0, 0]
cleanDate(data)
data = data[:3]
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = "-" + s
self._cache = s
return self._cache
class gYearMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:2]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:2]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception("too many values")
data = list(data)
if len(data) < 2:
data += [1, 1][len(data) :]
data += [1, 0, 0, 0]
cleanDate(data)
data = data[:2]
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = "-" + s
self._cache = s
return self._cache
class gYearType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1]
elif len(data) < 1:
raise Exception("too few values")
elif len(data) > 1:
raise Exception("too many values")
if type(data[0]) == FloatType:
try:
s = int(data[0])
except:
s = int(data[0])
if s != data[0]:
raise Exception("not integral")
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception("bad type")
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04dZ" % abs(d)
if d < 0:
s = "-" + s
self._cache = s
return self._cache
class centuryType(anyType):
_validURIs = (NS.XSD2, NS.ENC)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1] / 100
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1] / 100
elif len(data) < 1:
raise Exception("too few values")
elif len(data) > 1:
raise Exception("too many values")
if type(data[0]) == FloatType:
try:
s = int(data[0])
except:
s = int(data[0])
if s != data[0]:
raise Exception("not integral")
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception("bad type")
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%02dZ" % abs(d)
if d < 0:
s = "-" + s
self._cache = s
return self._cache
class yearType(gYearType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[1:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception("too many values")
data = list(data)
if len(data) < 2:
data += [1, 1][len(data) :]
data = [0] + data + [0, 0, 0]
cleanDate(data, 1)
data = data[1:3]
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return tuple(data)
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d-%02dZ" % self._data
return self._cache
class recurringDateType(gMonthDayType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:2]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[1:2]
elif len(data) < 1:
raise Exception("too few values")
elif len(data) > 1:
raise Exception("too many values")
if type(data[0]) == FloatType:
try:
s = int(data[0])
except:
s = int(data[0])
if s != data[0]:
raise Exception("not integral")
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception("bad type")
if data[0] < 1 or data[0] > 12:
raise Exception("bad value")
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d--Z" % self._data
return self._cache
class monthType(gMonthType):
_validURIs = (NS.XSD2, NS.ENC)
class gDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[2:3]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[2:3]
elif len(data) < 1:
raise Exception("too few values")
elif len(data) > 1:
raise Exception("too many values")
if type(data[0]) == FloatType:
try:
s = int(data[0])
except:
s = int(data[0])
if s != data[0]:
raise Exception("not integral")
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception("bad type")
if data[0] < 1 or data[0] > 31:
raise Exception("bad value")
else:
raise Exception("invalid type")
except Exception as e:
raise ValueError("invalid %s value - %s" % (self._type, e))
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "---%02dZ" % self._data
return self._cache
class recurringDayType(gDayType):
_validURIs = (NS.XSD2, NS.ENC)
class hexBinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type" % self._type)
return data
def _marshalData(self):
if self._cache == None:
self._cache = encodeHexString(self._data)
return self._cache
class base64BinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type" % self._type)
return data
def _marshalData(self):
if self._cache == None:
self._cache = base64.encodestring(self._data)
return self._cache
class base64Type(base64BinaryType):
_validURIs = (NS.ENC,)
class binaryType(anyType):
_validURIs = (NS.XSD, NS.ENC)
def __init__(self, data, name=None, typed=1, encoding="base64", attrs=None):
anyType.__init__(self, data, name, typed, attrs)
self._setAttr("encoding", encoding)
def _marshalData(self):
if self._cache == None:
if self._getAttr((None, "encoding")) == "base64":
self._cache = base64.encodestring(self._data)
else:
self._cache = encodeHexString(self._data)
return self._cache
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type" % self._type)
return data
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if attr[1] == "encoding":
if attr[0] != None or value not in ("base64", "hex"):
raise AttributeError("invalid encoding")
self._cache = None
anyType._setAttr(self, attr, value)
class anyURIType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (StringType, UnicodeType):
raise AttributeError("invalid %s type" % self._type)
return data
def _marshalData(self):
if self._cache == None:
self._cache = urllib.parse.quote(self._data)
return self._cache
class uriType(anyURIType):
_validURIs = (NS.XSD,)
class uriReferenceType(anyURIType):
_validURIs = (NS.XSD2,)
class NOTATIONType(anyType):
def __init__(self, data, name=None, typed=1, attrs=None):
if self.__class__ == NOTATIONType:
raise Error("a NOTATION can't be instantiated directly")
anyType.__init__(self, data, name, typed, attrs)
class ENTITIESType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) in (StringType, UnicodeType):
return (data,)
if type(data) not in (ListType, TupleType) or [
x for x in data if type(x) not in (StringType, UnicodeType)
]:
raise AttributeError("invalid %s type" % self._type)
return data
def _marshalData(self):
return " ".join(self._data)
class IDREFSType(ENTITIESType):
pass
class NMTOKENSType(ENTITIESType):
pass
class integerType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (IntType, LongType):
raise ValueError("invalid %s value" % self._type)
return data
class nonPositiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (IntType, LongType) or data > 0:
raise ValueError("invalid %s value" % self._type)
return data
class non_Positive_IntegerType(nonPositiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return "non-positive-integer"
class negativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (IntType, LongType) or data >= 0:
raise ValueError("invalid %s value" % self._type)
return data
class negative_IntegerType(negativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return "negative-integer"
class longType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if (
type(data) not in (IntType, LongType)
or data < -9223372036854775808
or data > 9223372036854775807
):
raise ValueError("invalid %s value" % self._type)
return data
class intType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if (
type(data) not in (IntType, LongType)
or data < -2147483648
or data > 2147483647
):
raise ValueError("invalid %s value" % self._type)
return data
class shortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (IntType, LongType) or data < -32768 or data > 32767:
raise ValueError("invalid %s value" % self._type)
return data
class byteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (IntType, LongType) or data < -128 or data > 127:
raise ValueError("invalid %s value" % self._type)
return data
class nonNegativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (IntType, LongType) or data < 0:
raise ValueError("invalid %s value" % self._type)
return data
class non_Negative_IntegerType(nonNegativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return "non-negative-integer"
class unsignedLongType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if (
type(data) not in (IntType, LongType)
or data < 0
or data > 18446744073709551615
):
raise ValueError("invalid %s value" % self._type)
return data
class unsignedIntType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (IntType, LongType) or data < 0 or data > 4294967295:
raise ValueError("invalid %s value" % self._type)
return data
class unsignedShortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (IntType, LongType) or data < 0 or data > 65535:
raise ValueError("invalid %s value" % self._type)
return data
class unsignedByteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (IntType, LongType) or data < 0 or data > 255:
raise ValueError("invalid %s value" % self._type)
return data
class positiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError("must supply initial %s value" % self._type)
if type(data) not in (IntType, LongType) or data <= 0:
raise ValueError("invalid %s value" % self._type)
return data
class positive_IntegerType(positiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return "positive-integer"
# Now compound types
class compoundType(anyType):
def __init__(self, data=None, name=None, typed=1, attrs=None):
if self.__class__ == compoundType:
raise Error("a compound can't be instantiated directly")
anyType.__init__(self, data, name, typed, attrs)
self._keyord = []
if type(data) == DictType:
self.__dict__.update(data)
def _aslist(self, item=None):
if item is not None:
return self.__dict__[self._keyord[item]]
else:
return [self.__dict__[x] for x in self._keyord]
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType, StringType):
item = item.encode(encoding)
return self.__dict__[item]
else:
retval = {}
def fun(x):
retval[x.encode(encoding)] = self.__dict__[x]
if hasattr(self, "_keyord"):
list(map(fun, self._keyord))
else:
for name in dir(self):
if isPublic(name):
retval[name] = getattr(self, name)
return retval
def __getitem__(self, item):
if type(item) == IntType:
return self.__dict__[self._keyord[item]]
else:
return getattr(self, item)
def __len__(self):
return len(self._keyord)
def __bool__(self):
return 1
def _keys(self):
return [x for x in list(self.__dict__.keys()) if x[0] != "_"]
def _addItem(self, name, value, attrs=None):
if name in self._keyord:
if type(self.__dict__[name]) != ListType:
self.__dict__[name] = [self.__dict__[name]]
self.__dict__[name].append(value)
else:
self.__dict__[name] = value
self._keyord.append(name)
def _placeItem(self, name, value, pos, subpos=0, attrs=None):
if subpos == 0 and type(self.__dict__[name]) != ListType:
self.__dict__[name] = value
else:
self.__dict__[name][subpos] = value
self._keyord[pos] = name
def _getItemAsList(self, name, default=[]):
try:
d = self.__dict__[name]
except:
return default
if type(d) == ListType:
return d
return [d]
def __str__(self):
return anyType.__str__(self) + ": " + str(self._asdict())
def __repr__(self):
return self.__str__()
class structType(compoundType):
pass
class headerType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data=None, typed=1, attrs=None):
structType.__init__(self, data, "Header", typed, attrs)
class bodyType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data=None, typed=1, attrs=None):
structType.__init__(self, data, "Body", typed, attrs)
class arrayType(collections.UserList, compoundType):
def __init__(
self,
data=None,
name=None,
attrs=None,
offset=0,
rank=None,
asize=0,
elemsname=None,
):
if data:
if type(data) not in (ListType, TupleType):
raise Error("Data must be a sequence")
collections.UserList.__init__(self, data)
compoundType.__init__(self, data, name, 0, attrs)
self._elemsname = elemsname or "item"
if data == None:
self._rank = rank
# According to 5.4.2.2 in the SOAP spec, each element in a
# sparse array must have a position. _posstate keeps track of
# whether we've seen a position or not. It's possible values
# are:
# -1 No elements have been added, so the state is indeterminate
# 0 An element without a position has been added, so no
# elements can have positions
# 1 An element with a position has been added, so all elements
# must have positions
self._posstate = -1
self._full = 0
if asize in ("", None):
asize = "0"
self._dims = [int(x) for x in str(asize).split(",")]
self._dims.reverse() # It's easier to work with this way
self._poss = [0] * len(self._dims) # This will end up
# reversed too
for i in range(len(self._dims)):
if self._dims[i] < 0 or self._dims[i] == 0 and len(self._dims) > 1:
raise TypeError("invalid Array dimensions")
if offset > 0:
self._poss[i] = offset % self._dims[i]
offset = int(offset / self._dims[i])
# Don't break out of the loop if offset is 0 so we test all the
# dimensions for > 0.
if offset:
raise AttributeError("invalid Array offset")
a = [None] * self._dims[0]
for i in range(1, len(self._dims)):
b = []
for j in range(self._dims[i]):
b.append(copy.deepcopy(a))
a = b
self.data = a
def _aslist(self, item=None):
if item is not None:
return self.data[int(item)]
else:
return self.data
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType, StringType):
item = item.encode(encoding)
return self.data[int(item)]
else:
retval = {}
def fun(x):
retval[str(x).encode(encoding)] = self.data[x]
list(map(fun, list(range(len(self.data)))))
return retval
def __getitem__(self, item):
try:
return self.data[int(item)]
except ValueError:
return getattr(self, item)
def __len__(self):
return len(self.data)
def __bool__(self):
return 1
def __str__(self):
return anyType.__str__(self) + ": " + str(self._aslist())
def _keys(self):
return [x for x in list(self.__dict__.keys()) if x[0] != "_"]
def _addItem(self, name, value, attrs):
if self._full:
raise ValueError("Array is full")
pos = attrs.get((NS.ENC, "position"))
if pos != None:
if self._posstate == 0:
raise AttributeError(
"all elements in a sparse Array must have a " "position attribute"
)
self._posstate = 1
try:
if pos[0] == "[" and pos[-1] == "]":
pos = [int(x) for x in pos[1:-1].split(",")]
pos.reverse()
if len(pos) == 1:
pos = pos[0]
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if pos:
raise Exception
elif len(pos) != len(self._dims):
raise Exception
else:
for i in range(len(self._dims)):
if pos[i] >= self._dims[i]:
raise Exception
curpos = pos
else:
raise Exception
except:
raise AttributeError("invalid Array element position %s" % str(pos))
else:
if self._posstate == 1:
raise AttributeError(
"only elements in a sparse Array may have a " "position attribute"
)
self._posstate = 0
curpos = self._poss
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
if pos == None:
self._poss[0] += 1
for i in range(len(self._dims) - 1):
if self._poss[i] < self._dims[i]:
break
self._poss[i] = 0
self._poss[i + 1] += 1
if self._dims[-1] and self._poss[-1] >= self._dims[-1]:
# self._full = 1
# FIXME: why is this occuring?
pass
def _placeItem(self, name, value, pos, subpos, attrs=None):
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
if self._dims[i] == 0:
curpos[0] = pos
break
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if self._dims[i] != 0 and pos:
raise Error("array index out of range")
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
class typedArrayType(arrayType):
def __init__(
self,
data=None,
name=None,
typed=None,
attrs=None,
offset=0,
rank=None,
asize=0,
elemsname=None,
complexType=0,
):
arrayType.__init__(self, data, name, attrs, offset, rank, asize, elemsname)
self._typed = 1
self._type = typed
self._complexType = complexType
class faultType(structType, Error):
def __init__(self, faultcode="", faultstring="", detail=None):
self.faultcode = faultcode
self.faultstring = faultstring
if detail != None:
self.detail = detail
structType.__init__(self, None, 0)
def _setDetail(self, detail=None):
if detail != None:
self.detail = detail
else:
try:
del self.detail
except AttributeError:
pass
def __repr__(self):
if getattr(self, "detail", None) != None:
return "<Fault %s: %s: %s>" % (
self.faultcode,
self.faultstring,
self.detail,
)
else:
return "<Fault %s: %s>" % (self.faultcode, self.faultstring)
__str__ = __repr__
def __call__(self):
return (self.faultcode, self.faultstring, self.detail)
class SOAPException(Exception):
def __init__(self, code="", string="", detail=None):
self.value = ("SOAPpy SOAP Exception", code, string, detail)
self.code = code
self.string = string
self.detail = detail
def __str__(self):
return repr(self.value)
class RequiredHeaderMismatch(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodNotFound(Exception):
def __init__(self, value):
(val, detail) = value.split(":")
self.value = val
self.detail = detail
def __str__(self):
return repr(self.value, self.detail)
class AuthorizationFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#######
# Convert complex SOAPpy objects to native python equivalents
#######
def simplify(object, level=0):
"""
Convert the SOAPpy objects and thier contents to simple python types.
This function recursively converts the passed 'container' object,
and all public subobjects. (Private subobjects have names that
start with '_'.)
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level > 10:
return object
if isinstance(object, faultType):
if object.faultstring == "Required Header Misunderstood":
raise RequiredHeaderMismatch(object.detail)
elif object.faultstring == "Method Not Found":
raise MethodNotFound(object.detail)
elif object.faultstring == "Authorization Failed":
raise AuthorizationFailed(object.detail)
elif object.faultstring == "Method Failed":
raise MethodFailed(object.detail)
else:
se = SOAPException(object.faultcode, object.faultstring, object.detail)
raise se
elif isinstance(object, arrayType):
data = object._aslist()
for k in range(len(data)):
data[k] = simplify(data[k], level=level + 1)
return data
elif isinstance(object, compoundType) or isinstance(object, structType):
data = object._asdict()
for k in list(data.keys()):
if isPublic(k):
data[k] = simplify(data[k], level=level + 1)
return data
elif type(object) == DictType:
for k in list(object.keys()):
if isPublic(k):
object[k] = simplify(object[k])
return object
elif type(object) == list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
else:
return object
def simplify_contents(object, level=0):
"""
Convert the contents of SOAPpy objects to simple python types.
This function recursively converts the sub-objects contained in a
'container' object to simple python types.
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level > 10:
return object
if isinstance(object, faultType):
for k in object._keys():
if isPublic(k):
setattr(object, k, simplify(object[k], level=level + 1))
raise object
elif isinstance(object, arrayType):
data = object._aslist()
for k in range(len(data)):
object[k] = simplify(data[k], level=level + 1)
elif isinstance(object, structType):
data = object._asdict()
for k in list(data.keys()):
if isPublic(k):
setattr(object, k, simplify(data[k], level=level + 1))
elif isinstance(object, compoundType):
data = object._asdict()
for k in list(data.keys()):
if isPublic(k):
object[k] = simplify(data[k], level=level + 1)
elif type(object) == DictType:
for k in list(object.keys()):
if isPublic(k):
object[k] = simplify(object[k])
elif type(object) == list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The Python datastore API used by app developers.
Defines Entity, Query, and Iterator classes, as well as methods for all of the
datastore's calls. Also defines conversions between the Python classes and
their PB counterparts.
The datastore errors are defined in the datastore_errors module. That module is
only required to avoid circular imports. datastore imports datastore_types,
which needs BadValueError, so it can't be defined in datastore.
"""
import heapq
import itertools
import logging
import os
import re
import sys
import threading
import traceback
from xml.sax import saxutils
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import capabilities
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.datastore import entity_pb
MAX_ALLOWABLE_QUERIES = 30
MAXIMUM_RESULTS = 1000
DEFAULT_TRANSACTION_RETRIES = 3
READ_CAPABILITY = capabilities.CapabilitySet("datastore_v3")
WRITE_CAPABILITY = capabilities.CapabilitySet("datastore_v3", capabilities=["write"])
_MAX_INDEXED_PROPERTIES = 20000
_MAX_ID_BATCH_SIZE = datastore_rpc._MAX_ID_BATCH_SIZE
Key = datastore_types.Key
typename = datastore_types.typename
STRONG_CONSISTENCY = datastore_rpc.Configuration.STRONG_CONSISTENCY
EVENTUAL_CONSISTENCY = datastore_rpc.Configuration.EVENTUAL_CONSISTENCY
_MAX_INT_32 = 2**31 - 1
def NormalizeAndTypeCheck(arg, types):
"""Normalizes and type checks the given argument.
Args:
arg: an instance or iterable of the given type(s)
types: allowed type or tuple of types
Returns:
A (list, bool) tuple. The list is a normalized, shallow copy of the
argument. The boolean is True if the argument was a sequence, False
if it was a single object.
Raises:
AssertionError: types includes list or tuple.
BadArgumentError: arg is not an instance or sequence of one of the given
types.
"""
if not isinstance(types, (list, tuple)):
types = (types,)
assert list not in types and tuple not in types
if isinstance(arg, types):
return [arg], False
else:
if isinstance(arg, str):
raise datastore_errors.BadArgumentError(
"Expected an instance or iterable of %s; received %s (a %s)."
% (types, arg, typename(arg))
)
try:
arg_list = list(arg)
except TypeError:
raise datastore_errors.BadArgumentError(
"Expected an instance or iterable of %s; received %s (a %s)."
% (types, arg, typename(arg))
)
for val in arg_list:
if not isinstance(val, types):
raise datastore_errors.BadArgumentError(
"Expected one of %s; received %s (a %s)."
% (types, val, typename(val))
)
return arg_list, True
def NormalizeAndTypeCheckKeys(keys):
"""Normalizes and type checks that the given argument is a valid key or keys.
A wrapper around NormalizeAndTypeCheck() that accepts strings, Keys, and
Entities, and normalizes to Keys.
Args:
keys: a Key or sequence of Keys
Returns:
A (list of Keys, bool) tuple. See NormalizeAndTypeCheck.
Raises:
BadArgumentError: arg is not an instance or sequence of one of the given
types.
"""
keys, multiple = NormalizeAndTypeCheck(keys, (str, Entity, Key))
keys = [_GetCompleteKeyOrError(key) for key in keys]
return (keys, multiple)
def _GetConfigFromKwargs(
kwargs, convert_rpc=False, config_class=datastore_rpc.Configuration
):
"""Get a Configuration object from the keyword arguments.
This is purely an internal helper for the various public APIs below
such as Get().
Args:
kwargs: A dict containing the keyword arguments passed to a public API.
convert_rpc: If the an rpc should be converted or passed on directly.
config_class: The config class that should be generated.
Returns:
A UserRPC instance, or a Configuration instance, or None.
Raises:
TypeError if unexpected keyword arguments are present.
"""
if not kwargs:
return None
rpc = kwargs.pop("rpc", None)
if rpc is not None:
if not isinstance(rpc, apiproxy_stub_map.UserRPC):
raise datastore_errors.BadArgumentError(
"rpc= argument should be None or a UserRPC instance"
)
if "config" in kwargs:
raise datastore_errors.BadArgumentError(
"Expected rpc= or config= argument but not both"
)
if not convert_rpc:
if kwargs:
raise datastore_errors.BadArgumentError(
"Unexpected keyword arguments: %s" % ", ".join(kwargs)
)
return rpc
read_policy = getattr(rpc, "read_policy", None)
kwargs["config"] = datastore_rpc.Configuration(
deadline=rpc.deadline,
read_policy=read_policy,
config=_GetConnection().config,
)
return config_class(**kwargs)
class _BaseIndex(object):
BUILDING, SERVING, DELETING, ERROR = list(range(4))
ASCENDING = datastore_query.PropertyOrder.ASCENDING
DESCENDING = datastore_query.PropertyOrder.DESCENDING
def __init__(self, index_id, kind, has_ancestor, properties):
"""Construct a datastore index instance.
Args:
index_id: Required long; Uniquely identifies the index
kind: Required string; Specifies the kind of the entities to index
has_ancestor: Required boolean; indicates if the index supports a query
that filters entities by the entity group parent
properties: Required list of (string, int) tuples; The entity properties
to index. First item in a tuple is the property name and the second
item is the sorting direction (ASCENDING|DESCENDING).
The order of the properties is based on the order in the index.
"""
argument_error = datastore_errors.BadArgumentError
datastore_types.ValidateInteger(
index_id, "index_id", argument_error, zero_ok=True
)
datastore_types.ValidateString(kind, "kind", argument_error, empty_ok=True)
if not isinstance(properties, (list, tuple)):
raise argument_error("properties must be a list or a tuple")
for idx, index_property in enumerate(properties):
if not isinstance(index_property, (list, tuple)):
raise argument_error("property[%d] must be a list or a tuple" % idx)
if len(index_property) != 2:
raise argument_error(
"property[%d] length should be 2 but was %d"
% (idx, len(index_property))
)
datastore_types.ValidateString(
index_property[0], "property name", argument_error
)
_BaseIndex.__ValidateEnum(
index_property[1], (self.ASCENDING, self.DESCENDING), "sort direction"
)
self.__id = int(index_id)
self.__kind = kind
self.__has_ancestor = bool(has_ancestor)
self.__properties = properties
@staticmethod
def __ValidateEnum(
value,
accepted_values,
name="value",
exception=datastore_errors.BadArgumentError,
):
datastore_types.ValidateInteger(value, name, exception)
if not value in accepted_values:
raise exception(
"%s should be one of %s but was %d"
% (name, str(accepted_values), value)
)
def _Id(self):
"""Returns the index id, a long."""
return self.__id
def _Kind(self):
"""Returns the index kind, a string. Empty string ('') if none."""
return self.__kind
def _HasAncestor(self):
"""Indicates if this is an ancestor index, a boolean."""
return self.__has_ancestor
def _Properties(self):
"""Returns the index properties. a tuple of
(index name as a string, [ASCENDING|DESCENDING]) tuples.
"""
return self.__properties
def __eq__(self, other):
return self.__id == other.__id
def __ne__(self, other):
return self.__id != other.__id
def __hash__(self):
return hash(self.__id)
class Index(_BaseIndex):
"""A datastore index."""
Id = _BaseIndex._Id
Kind = _BaseIndex._Kind
HasAncestor = _BaseIndex._HasAncestor
Properties = _BaseIndex._Properties
class DatastoreAdapter(datastore_rpc.AbstractAdapter):
"""Adapter between datatypes defined here (Entity etc.) and protobufs.
See the base class in datastore_rpc.py for more docs.
"""
index_state_mappings = {
entity_pb.CompositeIndex.ERROR: Index.ERROR,
entity_pb.CompositeIndex.DELETED: Index.DELETING,
entity_pb.CompositeIndex.READ_WRITE: Index.SERVING,
entity_pb.CompositeIndex.WRITE_ONLY: Index.BUILDING,
}
index_direction_mappings = {
entity_pb.Index_Property.ASCENDING: Index.ASCENDING,
entity_pb.Index_Property.DESCENDING: Index.DESCENDING,
}
def key_to_pb(self, key):
return key._Key__reference
def pb_to_key(self, pb):
return Key._FromPb(pb)
def entity_to_pb(self, entity):
return entity._ToPb()
def pb_to_entity(self, pb):
return Entity._FromPb(pb)
def pb_to_index(self, pb):
index_def = pb.definition()
properties = [
(
property.name(),
DatastoreAdapter.index_direction_mappings.get(property.direction()),
)
for property in index_def.property_list()
]
index = Index(
pb.id(), index_def.entity_type(), index_def.ancestor(), properties
)
state = DatastoreAdapter.index_state_mappings.get(pb.state())
return index, state
_adapter = DatastoreAdapter()
_thread_local = threading.local()
_ENV_KEY = "__DATASTORE_CONNECTION_INITIALIZED__"
def _GetConnection():
"""Retrieve a datastore connection local to the thread."""
connection = None
if os.getenv(_ENV_KEY):
try:
connection = _thread_local.connection
except AttributeError:
pass
if connection is None:
connection = datastore_rpc.Connection(adapter=_adapter)
_SetConnection(connection)
return connection
def _SetConnection(connection):
"""Sets the datastore connection local to the thread."""
_thread_local.connection = connection
os.environ[_ENV_KEY] = "1"
def _MakeSyncCall(service, call, request, response, config=None):
"""The APIProxy entry point for a synchronous API call.
Args:
service: For backwards compatibility, must be 'datastore_v3'.
call: String representing which function to call.
request: Protocol buffer for the request.
response: Protocol buffer for the response.
config: Optional Configuration to use for this request.
Returns:
Response protocol buffer. Caller should always use returned value
which may or may not be same as passed in 'response'.
Raises:
apiproxy_errors.Error or a subclass.
"""
conn = _GetConnection()
if isinstance(request, datastore_pb.Query):
conn._set_request_read_policy(request, config)
conn._set_request_transaction(request)
rpc = conn.make_rpc_call(config, call, request, response)
conn.check_rpc_success(rpc)
return response
def CreateRPC(service="datastore_v3", deadline=None, callback=None, read_policy=None):
"""Create an rpc for use in configuring datastore calls.
NOTE: This functions exists for backwards compatibility. Please use
CreateConfig() instead. NOTE: the latter uses 'on_completion',
which is a function taking an argument, wherease CreateRPC uses
'callback' which is a function without arguments.
Args:
service: Optional string; for backwards compatibility, must be
'datastore_v3'.
deadline: Optional int or float, deadline for calls in seconds.
callback: Optional callable, a callback triggered when this rpc
completes; takes no arguments.
read_policy: Optional read policy; set to EVENTUAL_CONSISTENCY to
enable eventually consistent reads (i.e. reads that may be
satisfied from an older version of the datastore in some cases).
The default read policy may have to wait until in-flight
transactions are committed.
Returns:
A UserRPC instance.
"""
assert service == "datastore_v3"
conn = _GetConnection()
config = None
if deadline is not None:
config = datastore_rpc.Configuration(deadline=deadline)
rpc = conn.create_rpc(config)
rpc.callback = callback
if read_policy is not None:
rpc.read_policy = read_policy
return rpc
def CreateConfig(**kwds):
"""Create a Configuration object for use in configuring datastore calls.
This configuration can be passed to most datastore calls using the
'config=...' argument.
Args:
deadline: Optional deadline; default None (which means the
system default deadline will be used, typically 5 seconds).
on_completion: Optional callback function; default None. If
specified, it will be called with a UserRPC object as argument
when an RPC completes.
read_policy: Optional read policy; set to EVENTUAL_CONSISTENCY to
enable eventually consistent reads (i.e. reads that may be
satisfied from an older version of the datastore in some cases).
The default read policy may have to wait until in-flight
transactions are committed.
**kwds: Other keyword arguments as long as they are supported by
datastore_rpc.Configuration().
Returns:
A datastore_rpc.Configuration instance.
"""
return datastore_rpc.Configuration(**kwds)
def CreateTransactionOptions(**kwds):
"""Create a configuration object for use in configuring transactions.
This configuration can be passed as run_in_transaction_option's first
argument.
Args:
deadline: Optional deadline; default None (which means the
system default deadline will be used, typically 5 seconds).
on_completion: Optional callback function; default None. If
specified, it will be called with a UserRPC object as argument
when an RPC completes.
xg: set to true to allow cross-group transactions (high replication
datastore only)
retries: set the number of retries for a transaction
**kwds: Other keyword arguments as long as they are supported by
datastore_rpc.TransactionOptions().
Returns:
A datastore_rpc.TransactionOptions instance.
"""
return datastore_rpc.TransactionOptions(**kwds)
def PutAsync(entities, **kwargs):
"""Asynchronously store one or more entities in the datastore.
Identical to datastore.Put() except returns an asynchronous object. Call
get_result() on the return value to block on the call and get the results.
"""
extra_hook = kwargs.pop("extra_hook", None)
config = _GetConfigFromKwargs(kwargs)
if getattr(config, "read_policy", None) == EVENTUAL_CONSISTENCY:
raise datastore_errors.BadRequestError(
"read_policy is only supported on read operations."
)
entities, multiple = NormalizeAndTypeCheck(entities, Entity)
for entity in entities:
if entity.is_projection():
raise datastore_errors.BadRequestError(
"Cannot put a partial entity: %s" % entity
)
if not entity.kind() or not entity.app():
raise datastore_errors.BadRequestError(
"App and kind must not be empty, in entity: %s" % entity
)
def local_extra_hook(keys):
num_keys = len(keys)
num_entities = len(entities)
if num_keys != num_entities:
raise datastore_errors.InternalError(
"Put accepted %d entities but returned %d keys."
% (num_entities, num_keys)
)
for entity, key in zip(entities, keys):
if entity._Entity__key._Key__reference != key._Key__reference:
assert not entity._Entity__key.has_id_or_name()
entity._Entity__key._Key__reference.CopyFrom(key._Key__reference)
if multiple:
result = keys
else:
result = keys[0]
if extra_hook:
return extra_hook(result)
return result
return _GetConnection().async_put(config, entities, local_extra_hook)
def Put(entities, **kwargs):
"""Store one or more entities in the datastore.
The entities may be new or previously existing. For new entities, Put() will
fill in the app id and key assigned by the datastore.
If the argument is a single Entity, a single Key will be returned. If the
argument is a list of Entity, a list of Keys will be returned.
Args:
entities: Entity or list of Entities
config: Optional Configuration to use for this request, must be specified
as a keyword argument.
Returns:
Key or list of Keys
Raises:
TransactionFailedError, if the Put could not be committed.
"""
return PutAsync(entities, **kwargs).get_result()
def GetAsync(keys, **kwargs):
"""Asynchronously retrieves one or more entities from the datastore.
Identical to datastore.Get() except returns an asynchronous object. Call
get_result() on the return value to block on the call and get the results.
"""
extra_hook = kwargs.pop("extra_hook", None)
config = _GetConfigFromKwargs(kwargs)
keys, multiple = NormalizeAndTypeCheckKeys(keys)
def local_extra_hook(entities):
if multiple:
result = entities
else:
if not entities or entities[0] is None:
raise datastore_errors.EntityNotFoundError()
result = entities[0]
if extra_hook:
return extra_hook(result)
return result
return _GetConnection().async_get(config, keys, local_extra_hook)
def Get(keys, **kwargs):
"""Retrieves one or more entities from the datastore.
Retrieves the entity or entities with the given key(s) from the datastore
and returns them as fully populated Entity objects, as defined below. If
there is an error, raises a subclass of datastore_errors.Error.
If keys is a single key or string, an Entity will be returned, or
EntityNotFoundError will be raised if no existing entity matches the key.
However, if keys is a list or tuple, a list of entities will be returned
that corresponds to the sequence of keys. It will include entities for keys
that were found and None placeholders for keys that were not found.
Args:
keys: Key or string or list of Keys or strings
config: Optional Configuration to use for this request, must be specified
as a keyword argument.
Returns:
Entity or list of Entity objects
"""
return GetAsync(keys, **kwargs).get_result()
def GetIndexesAsync(**kwargs):
"""Asynchronously retrieves the application indexes and their states.
Identical to GetIndexes() except returns an asynchronous object. Call
get_result() on the return value to block on the call and get the results.
"""
extra_hook = kwargs.pop("extra_hook", None)
config = _GetConfigFromKwargs(kwargs)
def local_extra_hook(result):
if extra_hook:
return extra_hook(result)
return result
return _GetConnection().async_get_indexes(config, local_extra_hook)
def GetIndexes(**kwargs):
"""Retrieves the application indexes and their states.
Args:
config: Optional Configuration to use for this request, must be specified
as a keyword argument.
Returns:
A list of (Index, Index.[BUILDING|SERVING|DELETING|ERROR]) tuples.
An index can be in the following states:
Index.BUILDING: Index is being built and therefore can not serve queries
Index.SERVING: Index is ready to service queries
Index.DELETING: Index is being deleted
Index.ERROR: Index encounted an error in the BUILDING state
"""
return GetIndexesAsync(**kwargs).get_result()
def DeleteAsync(keys, **kwargs):
"""Asynchronously deletes one or more entities from the datastore.
Identical to datastore.Delete() except returns an asynchronous object. Call
get_result() on the return value to block on the call.
"""
config = _GetConfigFromKwargs(kwargs)
if getattr(config, "read_policy", None) == EVENTUAL_CONSISTENCY:
raise datastore_errors.BadRequestError(
"read_policy is only supported on read operations."
)
keys, _ = NormalizeAndTypeCheckKeys(keys)
return _GetConnection().async_delete(config, keys)
def Delete(keys, **kwargs):
"""Deletes one or more entities from the datastore. Use with care!
Deletes the given entity(ies) from the datastore. You can only delete
entities from your app. If there is an error, raises a subclass of
datastore_errors.Error.
Args:
# the primary key(s) of the entity(ies) to delete
keys: Key or string or list of Keys or strings
config: Optional Configuration to use for this request, must be specified
as a keyword argument.
Raises:
TransactionFailedError, if the Delete could not be committed.
"""
return DeleteAsync(keys, **kwargs).get_result()
class Entity(dict):
"""A datastore entity.
Includes read-only accessors for app id, kind, and primary key. Also
provides dictionary-style access to properties.
"""
__projection = False
def __init__(
self,
kind,
parent=None,
_app=None,
name=None,
id=None,
unindexed_properties=[],
namespace=None,
**kwds
):
"""Constructor. Takes the kind and transaction root, which cannot be
changed after the entity is constructed, and an optional parent. Raises
BadArgumentError or BadKeyError if kind is invalid or parent is not an
existing Entity or Key in the datastore.
Args:
# this entity's kind
kind: string
# if provided, this entity's parent. Its key must be complete.
parent: Entity or Key
# if provided, this entity's name.
name: string
# if provided, this entity's id.
id: integer
# if provided, a sequence of property names that should not be indexed
# by the built-in single property indices.
unindexed_properties: list or tuple of strings
namespace: string
# if provided, overrides the default namespace_manager setting.
"""
ref = entity_pb.Reference()
_app = datastore_types.ResolveAppId(_app)
ref.set_app(_app)
_namespace = kwds.pop("_namespace", None)
if kwds:
raise datastore_errors.BadArgumentError(
"Excess keyword arguments " + repr(kwds)
)
if namespace is None:
namespace = _namespace
elif _namespace is not None:
raise datastore_errors.BadArgumentError(
"Must not set both _namespace and namespace parameters."
)
datastore_types.ValidateString(kind, "kind", datastore_errors.BadArgumentError)
if parent is not None:
parent = _GetCompleteKeyOrError(parent)
if _app != parent.app():
raise datastore_errors.BadArgumentError(
" %s doesn't match parent's app %s" % (_app, parent.app())
)
if namespace is None:
namespace = parent.namespace()
elif namespace != parent.namespace():
raise datastore_errors.BadArgumentError(
" %s doesn't match parent's namespace %s"
% (namespace, parent.namespace())
)
ref.CopyFrom(parent._Key__reference)
namespace = datastore_types.ResolveNamespace(namespace)
datastore_types.SetNamespace(ref, namespace)
last_path = ref.mutable_path().add_element()
last_path.set_type(kind.encode("utf-8"))
if name is not None and id is not None:
raise datastore_errors.BadArgumentError(
"Cannot set both name and id on an Entity"
)
if name is not None:
datastore_types.ValidateString(name, "name")
last_path.set_name(name.encode("utf-8"))
if id is not None:
datastore_types.ValidateInteger(id, "id")
last_path.set_id(id)
self.set_unindexed_properties(unindexed_properties)
self.__key = Key._FromPb(ref)
def app(self):
"""Returns the name of the application that created this entity, a
string or None if not set.
"""
return self.__key.app()
def namespace(self):
"""Returns the namespace of this entity, a string or None."""
return self.__key.namespace()
def kind(self):
"""Returns this entity's kind, a string."""
return self.__key.kind()
def is_saved(self):
"""Returns if this entity has been saved to the datastore."""
last_path = self.__key._Key__reference.path().element_list()[-1]
return (
last_path.has_name() ^ last_path.has_id()
) and self.__key.has_id_or_name()
def is_projection(self):
"""Returns if this entity is a projection from full entity.
Projected entities:
- may not contain all properties from the original entity;
- only contain single values for lists;
- may not contain values with the same type as the original entity.
"""
return self.__projection
def key(self):
"""Returns this entity's primary key, a Key instance."""
return self.__key
def parent(self):
"""Returns this entity's parent, as a Key. If this entity has no parent,
returns None.
"""
return self.key().parent()
def entity_group(self):
"""Returns this entity's entity group as a Key.
Note that the returned Key will be incomplete if this is a a root entity
and its key is incomplete.
"""
return self.key().entity_group()
def unindexed_properties(self):
"""Returns this entity's unindexed properties, as a frozenset of strings."""
return getattr(self, "_Entity__unindexed_properties", [])
def set_unindexed_properties(self, unindexed_properties):
unindexed_properties, multiple = NormalizeAndTypeCheck(
unindexed_properties, str
)
if not multiple:
raise datastore_errors.BadArgumentError(
"unindexed_properties must be a sequence; received %s (a %s)."
% (unindexed_properties, typename(unindexed_properties))
)
for prop in unindexed_properties:
datastore_types.ValidateProperty(prop, None)
self.__unindexed_properties = frozenset(unindexed_properties)
def __setitem__(self, name, value):
"""Implements the [] operator. Used to set property value(s).
If the property name is the empty string or not a string, raises
BadPropertyError. If the value is not a supported type, raises
BadValueError.
"""
datastore_types.ValidateProperty(name, value)
dict.__setitem__(self, name, value)
def setdefault(self, name, value):
"""If the property exists, returns its value. Otherwise sets it to value.
If the property name is the empty string or not a string, raises
BadPropertyError. If the value is not a supported type, raises
BadValueError.
"""
datastore_types.ValidateProperty(name, value)
return dict.setdefault(self, name, value)
def update(self, other):
"""Updates this entity's properties from the values in other.
If any property name is the empty string or not a string, raises
BadPropertyError. If any value is not a supported type, raises
BadValueError.
"""
for name, value in list(other.items()):
self.__setitem__(name, value)
def copy(self):
"""The copy method is not supported."""
raise NotImplementedError("Entity does not support the copy() method.")
def ToXml(self):
"""Returns an XML representation of this entity. Atom and gd:namespace
properties are converted to XML according to their respective schemas. For
more information, see:
http://www.atomenabled.org/developers/syndication/
http://code.google.com/apis/gdata/common-elements.html
This is *not* optimized. It shouldn't be used anywhere near code that's
performance-critical.
"""
xml = "<entity kind=%s" % saxutils.quoteattr(self.kind())
if self.__key.has_id_or_name():
xml += " key=%s" % saxutils.quoteattr(str(self.__key))
xml += ">"
if self.__key.has_id_or_name():
xml += "\n <key>%s</key>" % self.__key.ToTagUri()
properties = list(self.keys())
if properties:
properties.sort()
xml += "\n " + "\n ".join(self._PropertiesToXml(properties))
xml += "\n</entity>\n"
return xml
def _PropertiesToXml(self, properties):
"""Returns a list of the XML representations of each of the given
properties. Ignores properties that don't exist in this entity.
Arg:
properties: string or list of strings
Returns:
list of strings
"""
xml_properties = []
for propname in properties:
if propname not in self:
continue
propname_xml = saxutils.quoteattr(propname)
values = self[propname]
if not isinstance(values, list):
values = [values]
proptype = datastore_types.PropertyTypeName(values[0])
proptype_xml = saxutils.quoteattr(proptype)
escaped_values = self._XmlEscapeValues(propname)
open_tag = "<property name=%s type=%s>" % (propname_xml, proptype_xml)
close_tag = "</property>"
xml_properties += [open_tag + val + close_tag for val in escaped_values]
return xml_properties
def _XmlEscapeValues(self, property):
"""Returns a list of the XML-escaped string values for the given property.
Raises an AssertionError if the property doesn't exist.
Arg:
property: string
Returns:
list of strings
"""
assert property in self
xml = []
values = self[property]
if not isinstance(values, list):
values = [values]
for val in values:
if hasattr(val, "ToXml"):
xml.append(val.ToXml())
else:
if val is None:
xml.append("")
else:
xml.append(saxutils.escape(str(val)))
return xml
def ToPb(self):
"""Converts this Entity to its protocol buffer representation.
Returns:
entity_pb.Entity
"""
return self._ToPb(False)
def _ToPb(self, mark_key_as_saved=True):
"""Converts this Entity to its protocol buffer representation. Not
intended to be used by application developers.
Returns:
entity_pb.Entity
"""
pb = entity_pb.EntityProto()
pb.mutable_key().CopyFrom(self.key()._ToPb())
last_path = pb.key().path().element_list()[-1]
if mark_key_as_saved and last_path.has_name() and last_path.has_id():
last_path.clear_id()
group = pb.mutable_entity_group()
if self.__key.has_id_or_name():
root = pb.key().path().element(0)
group.add_element().CopyFrom(root)
properties = list(self.items())
properties.sort()
for name, values in properties:
properties = datastore_types.ToPropertyPb(name, values)
if not isinstance(properties, list):
properties = [properties]
for prop in properties:
if (
prop.has_meaning()
and prop.meaning() in datastore_types._RAW_PROPERTY_MEANINGS
) or name in self.unindexed_properties():
pb.raw_property_list().append(prop)
else:
pb.property_list().append(prop)
if pb.property_size() > _MAX_INDEXED_PROPERTIES:
raise datastore_errors.BadRequestError(
"Too many indexed properties for entity %r." % self.key()
)
return pb
@staticmethod
def FromPb(pb, validate_reserved_properties=True, default_kind="<not specified>"):
"""Static factory method. Returns the Entity representation of the
given protocol buffer (datastore_pb.Entity).
Args:
pb: datastore_pb.Entity or str encoding of a datastore_pb.Entity
validate_reserved_properties: deprecated
default_kind: str, the kind to use if the pb has no key.
Returns:
Entity: the Entity representation of pb
"""
if isinstance(pb, str):
real_pb = entity_pb.EntityProto()
real_pb.ParsePartialFromString(pb)
pb = real_pb
return Entity._FromPb(pb, require_valid_key=False, default_kind=default_kind)
@staticmethod
def _FromPb(pb, require_valid_key=True, default_kind="<not specified>"):
"""Static factory method. Returns the Entity representation of the
given protocol buffer (datastore_pb.Entity). Not intended to be used by
application developers.
The Entity PB's key must be complete. If it isn't, an AssertionError is
raised.
Args:
# a protocol buffer Entity
pb: datastore_pb.Entity
default_kind: str, the kind to use if the pb has no key.
Returns:
# the Entity representation of the argument
Entity
"""
if not pb.key().path().element_size():
pb.mutable_key().CopyFrom(Key.from_path(default_kind, 0)._ToPb())
last_path = pb.key().path().element_list()[-1]
if require_valid_key:
assert last_path.has_id() ^ last_path.has_name()
if last_path.has_id():
assert last_path.id() != 0
else:
assert last_path.has_name()
assert last_path.name()
unindexed_properties = [str(p.name(), "utf-8") for p in pb.raw_property_list()]
if pb.key().has_name_space():
namespace = pb.key().name_space()
else:
namespace = ""
e = Entity(
str(last_path.type(), "utf-8"),
unindexed_properties=unindexed_properties,
_app=pb.key().app(),
namespace=namespace,
)
ref = e.__key._Key__reference
ref.CopyFrom(pb.key())
temporary_values = {}
for prop_list in (pb.property_list(), pb.raw_property_list()):
for prop in prop_list:
if prop.meaning() == entity_pb.Property.INDEX_VALUE:
e.__projection = True
try:
value = datastore_types.FromPropertyPb(prop)
except (AssertionError, AttributeError, TypeError, ValueError) as e:
raise datastore_errors.Error(
"Property %s is corrupt in the datastore:\n%s"
% (prop.name(), traceback.format_exc())
)
multiple = prop.multiple()
if multiple:
value = [value]
name = prop.name()
cur_value = temporary_values.get(name)
if cur_value is None:
temporary_values[name] = value
elif not multiple or not isinstance(cur_value, list):
raise datastore_errors.Error(
"Property %s is corrupt in the datastore; it has multiple "
"values, but is not marked as multiply valued." % name
)
else:
cur_value.extend(value)
for name, value in temporary_values.items():
decoded_name = str(name, "utf-8")
datastore_types.ValidateReadProperty(decoded_name, value)
dict.__setitem__(e, decoded_name, value)
return e
class Query(dict):
"""A datastore query.
(Instead of this, consider using appengine.ext.gql.Query! It provides a
query language interface on top of the same functionality.)
Queries are used to retrieve entities that match certain criteria, including
app id, kind, and property filters. Results may also be sorted by properties.
App id and kind are required. Only entities from the given app, of the given
type, are returned. If an ancestor is set, with Ancestor(), only entities
with that ancestor are returned.
Property filters are used to provide criteria based on individual property
values. A filter compares a specific property in each entity to a given
value or list of possible values.
An entity is returned if its property values match *all* of the query's
filters. In other words, filters are combined with AND, not OR. If an
entity does not have a value for a property used in a filter, it is not
returned.
Property filters map filter strings of the form '<property name> <operator>'
to filter values. Use dictionary accessors to set property filters, like so:
> query = Query('Person')
> query['name ='] = 'Ryan'
> query['age >='] = 21
This query returns all Person entities where the name property is 'Ryan',
'Ken', or 'Bret', and the age property is at least 21.
Another way to build this query is:
> query = Query('Person')
> query.update({'name =': 'Ryan', 'age >=': 21})
The supported operators are =, >, <, >=, and <=. Only one inequality
filter may be used per query. Any number of equals filters may be used in
a single Query.
A filter value may be a list or tuple of values. This is interpreted as
multiple filters with the same filter string and different values, all ANDed
together. For example, this query returns everyone with the tags "google"
and "app engine":
> Query('Person', {'tag =': ('google', 'app engine')})
Result entities can be returned in different orders. Use the Order()
method to specify properties that results will be sorted by, and in which
direction.
Note that filters and orderings may be provided at any time before the query
is run. When the query is fully specified, Run() runs the query and returns
an iterator. The query results can be accessed through the iterator.
A query object may be reused after it's been run. Its filters and
orderings can be changed to create a modified query.
If you know how many result entities you need, use Get() to fetch them:
> query = Query('Person', {'age >': 21})
> for person in query.Get(4):
> print 'I have four pints left. Have one on me, %s!' % person['name']
If you don't know how many results you need, or if you need them all, you
can get an iterator over the results by calling Run():
> for person in Query('Person', {'age >': 21}).Run():
> print 'Have a pint on me, %s!' % person['name']
Get() is more efficient than Run(), so use Get() whenever possible.
Finally, the Count() method returns the number of result entities matched by
the query. The returned count is cached; successive Count() calls will not
re-scan the datastore unless the query is changed.
"""
ASCENDING = datastore_query.PropertyOrder.ASCENDING
DESCENDING = datastore_query.PropertyOrder.DESCENDING
ORDER_FIRST = datastore_query.QueryOptions.ORDER_FIRST
ANCESTOR_FIRST = datastore_query.QueryOptions.ANCESTOR_FIRST
FILTER_FIRST = datastore_query.QueryOptions.FILTER_FIRST
OPERATORS = {"==": datastore_query.PropertyFilter._OPERATORS["="]}
OPERATORS.update(datastore_query.PropertyFilter._OPERATORS)
INEQUALITY_OPERATORS = datastore_query.PropertyFilter._INEQUALITY_OPERATORS
UPPERBOUND_INEQUALITY_OPERATORS = frozenset(["<", "<="])
FILTER_REGEX = re.compile(
"^\s*([^\s]+)(\s+(%s)\s*)?$" % "|".join(OPERATORS), re.IGNORECASE | re.UNICODE
)
__kind = None
__app = None
__namespace = None
__orderings = None
__ancestor_pb = None
__distinct = False
__group_by = None
__index_list_source = None
__cursor_source = None
__compiled_query_source = None
__filter_order = None
__filter_counter = 0
__inequality_prop = None
__inequality_count = 0
def __init__(
self,
kind=None,
filters={},
_app=None,
keys_only=False,
compile=True,
cursor=None,
namespace=None,
end_cursor=None,
projection=None,
distinct=None,
_namespace=None,
):
"""Constructor.
Raises BadArgumentError if kind is not a string. Raises BadValueError or
BadFilterError if filters is not a dictionary of valid filters.
Args:
namespace: string, the namespace to query.
kind: string, the kind of entities to query, or None.
filters: dict, initial set of filters.
keys_only: boolean, if keys should be returned instead of entities.
projection: iterable of property names to project.
distinct: boolean, if projection should be distinct.
compile: boolean, if the query should generate cursors.
cursor: datastore_query.Cursor, the start cursor to use.
end_cursor: datastore_query.Cursor, the end cursor to use.
_namespace: deprecated, use namespace instead.
"""
if namespace is None:
namespace = _namespace
elif _namespace is not None:
raise datastore_errors.BadArgumentError(
"Must not set both _namespace and namespace parameters."
)
if kind is not None:
datastore_types.ValidateString(
kind, "kind", datastore_errors.BadArgumentError
)
self.__kind = kind
self.__orderings = []
self.__filter_order = {}
self.update(filters)
self.__app = datastore_types.ResolveAppId(_app)
self.__namespace = datastore_types.ResolveNamespace(namespace)
self.__query_options = datastore_query.QueryOptions(
keys_only=keys_only,
produce_cursors=compile,
start_cursor=cursor,
end_cursor=end_cursor,
projection=projection,
)
if distinct:
if not self.__query_options.projection:
raise datastore_errors.BadQueryError(
"cannot specify distinct without a projection"
)
self.__distinct = True
self.__group_by = self.__query_options.projection
def Order(self, *orderings):
"""Specify how the query results should be sorted.
Result entities will be sorted by the first property argument, then by the
second, and so on. For example, this:
> query = Query('Person')
> query.Order('bday', ('age', Query.DESCENDING))
sorts everyone in order of their birthday, starting with January 1.
People with the same birthday are sorted by age, oldest to youngest.
The direction for each sort property may be provided; if omitted, it
defaults to ascending.
Order() may be called multiple times. Each call resets the sort order
from scratch.
If an inequality filter exists in this Query it must be the first property
passed to Order. Any number of sort orders may be used after the
inequality filter property. Without inequality filters, any number of
filters with different orders may be specified.
Entities with multiple values for an order property are sorted by their
lowest value.
Note that a sort order implies an existence filter! In other words,
Entities without the sort order property are filtered out, and *not*
included in the query results.
If the sort order property has different types in different entities - ie,
if bob['id'] is an int and fred['id'] is a string - the entities will be
grouped first by the property type, then sorted within type. No attempt is
made to compare property values across types.
Raises BadArgumentError if any argument is of the wrong format.
Args:
# the properties to sort by, in sort order. each argument may be either a
# string or (string, direction) 2-tuple.
Returns:
# this query
Query
"""
orderings = list(orderings)
for order, i in zip(orderings, list(range(len(orderings)))):
if not (
isinstance(order, str)
or (isinstance(order, tuple) and len(order) in [2, 3])
):
raise datastore_errors.BadArgumentError(
"Order() expects strings or 2- or 3-tuples; received %s (a %s). "
% (order, typename(order))
)
if isinstance(order, str):
order = (order,)
datastore_types.ValidateString(
order[0], "sort order property", datastore_errors.BadArgumentError
)
property = order[0]
direction = order[-1]
if direction not in (Query.ASCENDING, Query.DESCENDING):
if len(order) == 3:
raise datastore_errors.BadArgumentError(
"Order() expects Query.ASCENDING or DESCENDING; received %s"
% str(direction)
)
direction = Query.ASCENDING
if self.__kind is None and (
property != datastore_types.KEY_SPECIAL_PROPERTY
or direction != Query.ASCENDING
):
raise datastore_errors.BadArgumentError(
"Only %s ascending orders are supported on kindless queries"
% datastore_types.KEY_SPECIAL_PROPERTY
)
orderings[i] = (property, direction)
if (
orderings
and self.__inequality_prop
and orderings[0][0] != self.__inequality_prop
):
raise datastore_errors.BadArgumentError(
"First ordering property must be the same as inequality filter "
"property, if specified for this query; received %s, expected %s"
% (orderings[0][0], self.__inequality_prop)
)
self.__orderings = orderings
return self
def Hint(self, hint):
"""Sets a hint for how this query should run.
The query hint gives us information about how best to execute your query.
Currently, we can only do one index scan, so the query hint should be used
to indicates which index we should scan against.
Use FILTER_FIRST if your first filter will only match a few results. In
this case, it will be most efficient to scan against the index for this
property, load the results into memory, and apply the remaining filters
and sort orders there.
Similarly, use ANCESTOR_FIRST if the query's ancestor only has a few
descendants. In this case, it will be most efficient to scan all entities
below the ancestor and load them into memory first.
Use ORDER_FIRST if the query has a sort order and the result set is large
or you only plan to fetch the first few results. In that case, we
shouldn't try to load all of the results into memory; instead, we should
scan the index for this property, which is in sorted order.
Note that hints are currently ignored in the v3 datastore!
Arg:
one of datastore.Query.[ORDER_FIRST, ANCESTOR_FIRST, FILTER_FIRST]
Returns:
# this query
Query
"""
if hint is not self.__query_options.hint:
self.__query_options = datastore_query.QueryOptions(
hint=hint, config=self.__query_options
)
return self
def Ancestor(self, ancestor):
"""Sets an ancestor for this query.
This restricts the query to only return result entities that are descended
from a given entity. In other words, all of the results will have the
ancestor as their parent, or parent's parent, or etc.
Raises BadArgumentError or BadKeyError if parent is not an existing Entity
or Key in the datastore.
Args:
# the key must be complete
ancestor: Entity or Key
Returns:
# this query
Query
"""
self.__ancestor_pb = _GetCompleteKeyOrError(ancestor)._ToPb()
return self
def IsKeysOnly(self):
"""Returns True if this query is keys only, false otherwise."""
return self.__query_options.keys_only
def GetQueryOptions(self):
"""Returns a datastore_query.QueryOptions for the current instance."""
return self.__query_options
def GetQuery(self):
"""Returns a datastore_query.Query for the current instance."""
return datastore_query.Query(
app=self.__app,
namespace=self.__namespace,
kind=self.__kind,
ancestor=self.__ancestor_pb,
filter_predicate=self.GetFilterPredicate(),
order=self.GetOrder(),
group_by=self.__group_by,
)
def GetOrder(self):
"""Gets a datastore_query.Order for the current instance.
Returns:
datastore_query.Order or None if there are no sort orders set on the
current Query.
"""
orders = [
datastore_query.PropertyOrder(property, direction)
for property, direction in self.__orderings
]
if orders:
return datastore_query.CompositeOrder(orders)
return None
def GetFilterPredicate(self):
"""Returns a datastore_query.FilterPredicate for the current instance.
Returns:
datastore_query.FilterPredicate or None if no filters are set on the
current Query.
"""
ordered_filters = [(i, f) for f, i in self.__filter_order.items()]
ordered_filters.sort()
property_filters = []
for _, filter_str in ordered_filters:
if filter_str not in self:
continue
values = self[filter_str]
match = self._CheckFilter(filter_str, values)
name = match.group(1)
op = match.group(3)
if op is None or op == "==":
op = "="
property_filters.append(datastore_query.make_filter(name, op, values))
if property_filters:
return datastore_query.CompositeFilter(
datastore_query.CompositeFilter.AND, property_filters
)
return None
def GetDistinct(self):
"""Returns True if the current instance is distinct.
Returns:
A boolean indicating if the distinct flag is set.
"""
return self.__distinct
def GetIndexList(self):
"""Get the index list from the last run of this query.
Returns:
A list of indexes used by the last run of this query.
Raises:
AssertionError: The query has not yet been run.
"""
index_list_function = self.__index_list_source
if index_list_function:
return index_list_function()
raise AssertionError(
"No index list available because this query has not " "been executed"
)
def GetCursor(self):
"""Get the cursor from the last run of this query.
The source of this cursor varies depending on what the last call was:
- Run: A cursor that points immediately after the last result pulled off
the returned iterator.
- Get: A cursor that points immediately after the last result in the
returned list.
- Count: A cursor that points immediately after the last result counted.
Returns:
A datastore_query.Cursor object that can be used in subsequent query
requests.
Raises:
AssertionError: The query has not yet been run or cannot be compiled.
"""
cursor_function = self.__cursor_source
if cursor_function:
cursor = cursor_function()
if cursor:
return cursor
raise AssertionError(
"No cursor available, either this query has not "
"been executed or there is no compilation "
"available for this kind of query"
)
def GetBatcher(self, config=None):
"""Runs this query and returns a datastore_query.Batcher.
This is not intended to be used by application developers. Use Get()
instead!
Args:
config: Optional Configuration to use for this request.
Returns:
# an iterator that provides access to the query results
Iterator
"""
query_options = self.GetQueryOptions().merge(config)
if self.__distinct and query_options.projection != self.__group_by:
raise datastore_errors.BadArgumentError(
"cannot override projection when distinct is set"
)
return self.GetQuery().run(_GetConnection(), query_options)
def Run(self, **kwargs):
"""Runs this query.
If a filter string is invalid, raises BadFilterError. If a filter value is
invalid, raises BadValueError. If an IN filter is provided, and a sort
order on another property is provided, raises BadQueryError.
If you know in advance how many results you want, use limit=#. It's
more efficient.
Args:
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
# an iterator that provides access to the query results
Iterator
"""
config = _GetConfigFromKwargs(
kwargs, convert_rpc=True, config_class=datastore_query.QueryOptions
)
itr = Iterator(self.GetBatcher(config=config))
self.__index_list_source = itr.GetIndexList
self.__cursor_source = itr.cursor
self.__compiled_query_source = itr._compiled_query
return itr
def Get(self, limit, offset=0, **kwargs):
"""Deprecated, use list(Run(...)) instead.
Args:
limit: int or long representing the maximum number of entities to return.
offset: int or long representing the number of entities to skip
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
# a list of entities
[Entity, ...]
"""
if limit is None:
kwargs.setdefault("batch_size", _MAX_INT_32)
return list(self.Run(limit=limit, offset=offset, **kwargs))
def Count(self, limit=1000, **kwargs):
"""Returns the number of entities that this query matches.
Args:
limit, a number or None. If there are more results than this, stop short
and just return this number. Providing this argument makes the count
operation more efficient.
config: Optional Configuration to use for this request.
Returns:
The number of results.
"""
original_offset = kwargs.pop("offset", 0)
if limit is None:
offset = _MAX_INT_32
else:
offset = min(limit + original_offset, _MAX_INT_32)
kwargs["limit"] = 0
kwargs["offset"] = offset
config = _GetConfigFromKwargs(
kwargs, convert_rpc=True, config_class=datastore_query.QueryOptions
)
batch = next(self.GetBatcher(config=config))
self.__index_list_source = lambda: [index for index, state in batch.index_list]
self.__cursor_source = lambda: batch.cursor(0)
self.__compiled_query_source = lambda: batch._compiled_query
return max(0, batch.skipped_results - original_offset)
def __iter__(self):
raise NotImplementedError(
"Query objects should not be used as iterators. Call Run() first."
)
def __getstate__(self):
state = self.__dict__.copy()
state["_Query__index_list_source"] = None
state["_Query__cursor_source"] = None
state["_Query__compiled_query_source"] = None
return state
def __setstate__(self, state):
if "_Query__query_options" not in state:
state["_Query__query_options"] = datastore_query.QueryOptions(
keys_only=state.pop("_Query__keys_only"),
produce_cursors=state.pop("_Query__compile"),
start_cursor=state.pop("_Query__cursor"),
end_cursor=state.pop("_Query__end_cursor"),
)
self.__dict__ = state
def __setitem__(self, filter, value):
"""Implements the [] operator. Used to set filters.
If the filter string is empty or not a string, raises BadFilterError. If
the value is not a supported type, raises BadValueError.
"""
if isinstance(value, tuple):
value = list(value)
datastore_types.ValidateProperty(" ", value)
match = self._CheckFilter(filter, value)
property = match.group(1)
operator = match.group(3)
dict.__setitem__(self, filter, value)
if (
operator in self.INEQUALITY_OPERATORS
and property != datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY
):
if self.__inequality_prop is None:
self.__inequality_prop = property
else:
assert self.__inequality_prop == property
self.__inequality_count += 1
if filter not in self.__filter_order:
self.__filter_order[filter] = self.__filter_counter
self.__filter_counter += 1
def setdefault(self, filter, value):
"""If the filter exists, returns its value. Otherwise sets it to value.
If the property name is the empty string or not a string, raises
BadPropertyError. If the value is not a supported type, raises
BadValueError.
"""
datastore_types.ValidateProperty(" ", value)
self._CheckFilter(filter, value)
return dict.setdefault(self, filter, value)
def __delitem__(self, filter):
"""Implements the del [] operator. Used to remove filters."""
dict.__delitem__(self, filter)
del self.__filter_order[filter]
match = Query.FILTER_REGEX.match(filter)
property = match.group(1)
operator = match.group(3)
if operator in self.INEQUALITY_OPERATORS:
assert self.__inequality_count >= 1
assert property == self.__inequality_prop
self.__inequality_count -= 1
if self.__inequality_count == 0:
self.__inequality_prop = None
def update(self, other):
"""Updates this query's filters from the ones in other.
If any filter string is invalid, raises BadFilterError. If any value is
not a supported type, raises BadValueError.
"""
for filter, value in list(other.items()):
self.__setitem__(filter, value)
def copy(self):
"""The copy method is not supported."""
raise NotImplementedError("Query does not support the copy() method.")
def _CheckFilter(self, filter, values):
"""Type check a filter string and list of values.
Raises BadFilterError if the filter string is empty, not a string, or
invalid. Raises BadValueError if the value type is not supported.
Args:
filter: String containing the filter text.
values: List of associated filter values.
Returns:
re.MatchObject (never None) that matches the 'filter'. Group 1 is the
property name, group 3 is the operator. (Group 2 is unused.)
"""
try:
match = Query.FILTER_REGEX.match(filter)
if not match:
raise datastore_errors.BadFilterError(
"Could not parse filter string: %s" % str(filter)
)
except TypeError:
raise datastore_errors.BadFilterError(
"Could not parse filter string: %s" % str(filter)
)
property = match.group(1)
operator = match.group(3)
if operator is None:
operator = "="
if isinstance(values, tuple):
values = list(values)
elif not isinstance(values, list):
values = [values]
if isinstance(values[0], datastore_types._RAW_PROPERTY_TYPES):
raise datastore_errors.BadValueError(
"Filtering on %s properties is not supported." % typename(values[0])
)
if (
operator in self.INEQUALITY_OPERATORS
and property != datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY
):
if self.__inequality_prop and property != self.__inequality_prop:
raise datastore_errors.BadFilterError(
"Only one property per query may have inequality filters (%s)."
% ", ".join(self.INEQUALITY_OPERATORS)
)
elif len(self.__orderings) >= 1 and self.__orderings[0][0] != property:
raise datastore_errors.BadFilterError(
"Inequality operators (%s) must be on the same property as the "
"first sort order, if any sort orders are supplied"
% ", ".join(self.INEQUALITY_OPERATORS)
)
if (
self.__kind is None
and property != datastore_types.KEY_SPECIAL_PROPERTY
and property != datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY
):
raise datastore_errors.BadFilterError(
"Only %s filters are allowed on kindless queries."
% datastore_types.KEY_SPECIAL_PROPERTY
)
if property == datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY:
if self.__kind:
raise datastore_errors.BadFilterError(
"Only kindless queries can have %s filters."
% datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY
)
if not operator in self.UPPERBOUND_INEQUALITY_OPERATORS:
raise datastore_errors.BadFilterError(
"Only %s operators are supported with %s filters."
% (
self.UPPERBOUND_INEQUALITY_OPERATORS,
datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY,
)
)
if property in datastore_types._SPECIAL_PROPERTIES:
if property == datastore_types.KEY_SPECIAL_PROPERTY:
for value in values:
if not isinstance(value, Key):
raise datastore_errors.BadFilterError(
"%s filter value must be a Key; received %s (a %s)"
% (
datastore_types.KEY_SPECIAL_PROPERTY,
value,
typename(value),
)
)
return match
def _Run(
self, limit=None, offset=None, prefetch_count=None, next_count=None, **kwargs
):
"""Deprecated, use Run() instead."""
return self.Run(
limit=limit,
offset=offset,
prefetch_size=prefetch_count,
batch_size=next_count,
**kwargs
)
def _ToPb(self, limit=None, offset=None, count=None):
query_options = datastore_query.QueryOptions(
config=self.GetQueryOptions(), limit=limit, offset=offset, batch_size=count
)
return self.GetQuery()._to_pb(_GetConnection(), query_options)
def _GetCompiledQuery(self):
"""Returns the internal-only pb representation of the last query run.
Do not use.
Raises:
AssertionError: Query not compiled or not yet executed.
"""
compiled_query_function = self.__compiled_query_source
if compiled_query_function:
compiled_query = compiled_query_function()
if compiled_query:
return compiled_query
raise AssertionError(
"No compiled query available, either this query has "
"not been executed or there is no compilation "
"available for this kind of query"
)
GetCompiledQuery = _GetCompiledQuery
GetCompiledCursor = GetCursor
def AllocateIdsAsync(model_key, size=None, **kwargs):
"""Asynchronously allocates a range of IDs.
Identical to datastore.AllocateIds() except returns an asynchronous object.
Call get_result() on the return value to block on the call and get the
results.
"""
max = kwargs.pop("max", None)
config = _GetConfigFromKwargs(kwargs)
if getattr(config, "read_policy", None) == EVENTUAL_CONSISTENCY:
raise datastore_errors.BadRequestError(
"read_policy is only supported on read operations."
)
keys, _ = NormalizeAndTypeCheckKeys(model_key)
if len(keys) > 1:
raise datastore_errors.BadArgumentError(
"Cannot allocate IDs for more than one model key at a time"
)
rpc = _GetConnection().async_allocate_ids(config, keys[0], size, max)
return rpc
def AllocateIds(model_key, size=None, **kwargs):
"""Allocates a range of IDs of size or with max for the given key.
Allocates a range of IDs in the datastore such that those IDs will not
be automatically assigned to new entities. You can only allocate IDs
for model keys from your app. If there is an error, raises a subclass of
datastore_errors.Error.
Either size or max must be provided but not both. If size is provided then a
range of the given size is returned. If max is provided then the largest
range of ids that are safe to use with an upper bound of max is returned (can
be an empty range).
Max should only be provided if you have an existing numeric id range that you
want to reserve, e.g. bulk loading entities that already have IDs. If you
don't care about which IDs you receive, use size instead.
Args:
model_key: Key or string to serve as a model specifying the ID sequence
in which to allocate IDs
size: integer, number of IDs to allocate.
max: integer, upper bound of the range of IDs to allocate.
config: Optional Configuration to use for this request.
Returns:
(start, end) of the allocated range, inclusive.
"""
return AllocateIdsAsync(model_key, size, **kwargs).get_result()
class MultiQuery(Query):
"""Class representing a query which requires multiple datastore queries.
This class is actually a subclass of datastore.Query as it is intended to act
like a normal Query object (supporting the same interface).
Does not support keys only queries, since it needs whole entities in order
to merge sort them. (That's not true if there are no sort orders, or if the
sort order is on __key__, but allowing keys only queries in those cases, but
not in others, would be confusing.)
"""
def __init__(self, bound_queries, orderings):
if len(bound_queries) > MAX_ALLOWABLE_QUERIES:
raise datastore_errors.BadArgumentError(
"Cannot satisfy query -- too many subqueries (max: %d, got %d)."
" Probable cause: too many IN/!= filters in query."
% (MAX_ALLOWABLE_QUERIES, len(bound_queries))
)
projection = bound_queries and bound_queries[0].GetQueryOptions().projection
for query in bound_queries:
if projection != query.GetQueryOptions().projection:
raise datastore_errors.BadQueryError(
"All queries must have the same projection."
)
if query.IsKeysOnly():
raise datastore_errors.BadQueryError(
"MultiQuery does not support keys_only."
)
self.__projection = projection
self.__bound_queries = bound_queries
self.__orderings = orderings
self.__compile = False
def __str__(self):
res = "MultiQuery: "
for query in self.__bound_queries:
res = "%s %s" % (res, str(query))
return res
def Get(self, limit, offset=0, **kwargs):
"""Deprecated, use list(Run(...)) instead.
Args:
limit: int or long representing the maximum number of entities to return.
offset: int or long representing the number of entities to skip
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
A list of entities with at most "limit" entries (less if the query
completes before reading limit values).
"""
if limit is None:
kwargs.setdefault("batch_size", _MAX_INT_32)
return list(self.Run(limit=limit, offset=offset, **kwargs))
class SortOrderEntity(object):
"""Allow entity comparisons using provided orderings.
The iterator passed to the constructor is eventually consumed via
calls to GetNext(), which generate new SortOrderEntity s with the
same orderings.
"""
def __init__(self, entity_iterator, orderings):
"""Ctor.
Args:
entity_iterator: an iterator of entities which will be wrapped.
orderings: an iterable of (identifier, order) pairs. order
should be either Query.ASCENDING or Query.DESCENDING.
"""
self.__entity_iterator = entity_iterator
self.__entity = None
self.__min_max_value_cache = {}
try:
self.__entity = next(entity_iterator)
except StopIteration:
pass
else:
self.__orderings = orderings
def __str__(self):
return str(self.__entity)
def GetEntity(self):
"""Gets the wrapped entity."""
return self.__entity
def GetNext(self):
"""Wrap and return the next entity.
The entity is retrieved from the iterator given at construction time.
"""
return MultiQuery.SortOrderEntity(self.__entity_iterator, self.__orderings)
def CmpProperties(self, that):
"""Compare two entities and return their relative order.
Compares self to that based on the current sort orderings and the
key orders between them. Returns negative, 0, or positive depending on
whether self is less, equal to, or greater than that. This
comparison returns as if all values were to be placed in ascending order
(highest value last). Only uses the sort orderings to compare (ignores
keys).
Args:
that: SortOrderEntity
Returns:
Negative if self < that
Zero if self == that
Positive if self > that
"""
if not self.__entity:
return cmp(self.__entity, that.__entity)
for identifier, order in self.__orderings:
value1 = self.__GetValueForId(self, identifier, order)
value2 = self.__GetValueForId(that, identifier, order)
result = cmp(value1, value2)
if order == Query.DESCENDING:
result = -result
if result:
return result
return 0
def __GetValueForId(self, sort_order_entity, identifier, sort_order):
value = _GetPropertyValue(sort_order_entity.__entity, identifier)
if isinstance(value, list):
entity_key = sort_order_entity.__entity.key()
if (entity_key, identifier) in self.__min_max_value_cache:
value = self.__min_max_value_cache[(entity_key, identifier)]
elif sort_order == Query.DESCENDING:
value = min(value)
else:
value = max(value)
self.__min_max_value_cache[(entity_key, identifier)] = value
return value
def __cmp__(self, that):
"""Compare self to that w.r.t. values defined in the sort order.
Compare an entity with another, using sort-order first, then the key
order to break ties. This can be used in a heap to have faster min-value
lookup.
Args:
that: other entity to compare to
Returns:
negative: if self is less than that in sort order
zero: if self is equal to that in sort order
positive: if self is greater than that in sort order
"""
property_compare = self.CmpProperties(that)
if property_compare:
return property_compare
else:
return cmp(self.__entity.key(), that.__entity.key())
def _ExtractBounds(self, config):
"""This function extracts the range of results to consider.
Since MultiQuery dedupes in memory, we must apply the offset and limit in
memory. The results that should be considered are
results[lower_bound:upper_bound].
We also pass the offset=0 and limit=upper_bound to the base queries to
optimize performance.
Args:
config: The base datastore_query.QueryOptions.
Returns:
a tuple consisting of the lower_bound and upper_bound to impose in memory
and the config to use with each bound query. The upper_bound may be None.
"""
if config is None:
return 0, None, None
lower_bound = config.offset or 0
upper_bound = config.limit
if lower_bound:
if upper_bound is not None:
upper_bound = min(lower_bound + upper_bound, _MAX_INT_32)
config = datastore_query.QueryOptions(
offset=0, limit=upper_bound, config=config
)
return lower_bound, upper_bound, config
def __GetProjectionOverride(self, config):
"""Returns a tuple of (original projection, projeciton override).
If projection is None, there is no projection. If override is None,
projection is sufficent for this query.
"""
projection = datastore_query.QueryOptions.projection(config)
if projection is None:
projection = self.__projection
else:
projection = projection
if not projection:
return None, None
override = set()
for prop, _ in self.__orderings:
if prop not in projection:
override.add(prop)
if not override:
return projection, None
return projection, projection + tuple(override)
def Run(self, **kwargs):
"""Return an iterable output with all results in order.
Merge sort the results. First create a list of iterators, then walk
though them and yield results in order.
Args:
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
An iterator for the result set.
"""
config = _GetConfigFromKwargs(
kwargs, convert_rpc=True, config_class=datastore_query.QueryOptions
)
if config and config.keys_only:
raise datastore_errors.BadRequestError(
"keys only queries are not supported by multi-query."
)
lower_bound, upper_bound, config = self._ExtractBounds(config)
projection, override = self.__GetProjectionOverride(config)
if override:
config = datastore_query.QueryOptions(projection=override, config=config)
results = []
count = 1
log_level = logging.DEBUG - 1
for bound_query in self.__bound_queries:
logging.log(log_level, "Running query #%i" % count)
results.append(bound_query.Run(config=config))
count += 1
def GetDedupeKey(sort_order_entity):
if projection:
return (
sort_order_entity.GetEntity().key(),
frozenset(iter(sort_order_entity.GetEntity().items())),
)
else:
return sort_order_entity.GetEntity().key()
def IterateResults(results):
"""Iterator function to return all results in sorted order.
Iterate over the array of results, yielding the next element, in
sorted order. This function is destructive (results will be empty
when the operation is complete).
Args:
results: list of result iterators to merge and iterate through
Yields:
The next result in sorted order.
"""
result_heap = []
for result in results:
heap_value = MultiQuery.SortOrderEntity(result, self.__orderings)
if heap_value.GetEntity():
heapq.heappush(result_heap, heap_value)
used_keys = set()
while result_heap:
if upper_bound is not None and len(used_keys) >= upper_bound:
break
top_result = heapq.heappop(result_heap)
dedupe_key = GetDedupeKey(top_result)
if dedupe_key not in used_keys:
result = top_result.GetEntity()
if override:
for key in list(result.keys()):
if key not in projection:
del result[key]
yield result
else:
pass
used_keys.add(dedupe_key)
results_to_push = []
while result_heap:
next = heapq.heappop(result_heap)
if dedupe_key != GetDedupeKey(next):
results_to_push.append(next)
break
else:
results_to_push.append(next.GetNext())
results_to_push.append(top_result.GetNext())
for popped_result in results_to_push:
if popped_result.GetEntity():
heapq.heappush(result_heap, popped_result)
it = IterateResults(results)
try:
for _ in range(lower_bound):
next(it)
except StopIteration:
pass
return it
def Count(self, limit=1000, **kwargs):
"""Return the number of matched entities for this query.
Will return the de-duplicated count of results. Will call the more
efficient Get() function if a limit is given.
Args:
limit: maximum number of entries to count (for any result > limit, return
limit).
config: Optional Configuration to use for this request.
Returns:
count of the number of entries returned.
"""
kwargs["limit"] = limit
config = _GetConfigFromKwargs(
kwargs, convert_rpc=True, config_class=datastore_query.QueryOptions
)
projection, override = self.__GetProjectionOverride(config)
if not projection:
config = datastore_query.QueryOptions(keys_only=True, config=config)
elif override:
config = datastore_query.QueryOptions(projection=override, config=config)
lower_bound, upper_bound, config = self._ExtractBounds(config)
used_keys = set()
for bound_query in self.__bound_queries:
for result in bound_query.Run(config=config):
if projection:
dedupe_key = (result.key(), tuple(result.items()))
else:
dedupe_key = result
used_keys.add(dedupe_key)
if upper_bound and len(used_keys) >= upper_bound:
return upper_bound - lower_bound
return max(0, len(used_keys) - lower_bound)
def GetIndexList(self):
raise AssertionError(
"No index_list available for a MultiQuery (queries "
'using "IN" or "!=" operators)'
)
def GetCursor(self):
raise AssertionError(
"No cursor available for a MultiQuery (queries "
'using "IN" or "!=" operators)'
)
def _GetCompiledQuery(self):
"""Internal only, do not use."""
raise AssertionError(
"No compilation available for a MultiQuery (queries "
'using "IN" or "!=" operators)'
)
def __setitem__(self, query_filter, value):
"""Add a new filter by setting it on all subqueries.
If any of the setting operations raise an exception, the ones
that succeeded are undone and the exception is propagated
upward.
Args:
query_filter: a string of the form "property operand".
value: the value that the given property is compared against.
"""
saved_items = []
for index, query in enumerate(self.__bound_queries):
saved_items.append(query.get(query_filter, None))
try:
query[query_filter] = value
except:
for q, old_value in zip(self.__bound_queries[:index], saved_items):
if old_value is not None:
q[query_filter] = old_value
else:
del q[query_filter]
raise
def __delitem__(self, query_filter):
"""Delete a filter by deleting it from all subqueries.
If a KeyError is raised during the attempt, it is ignored, unless
every subquery raised a KeyError. If any other exception is
raised, any deletes will be rolled back.
Args:
query_filter: the filter to delete.
Raises:
KeyError: No subquery had an entry containing query_filter.
"""
subquery_count = len(self.__bound_queries)
keyerror_count = 0
saved_items = []
for index, query in enumerate(self.__bound_queries):
try:
saved_items.append(query.get(query_filter, None))
del query[query_filter]
except KeyError:
keyerror_count += 1
except:
for q, old_value in zip(self.__bound_queries[:index], saved_items):
if old_value is not None:
q[query_filter] = old_value
raise
if keyerror_count == subquery_count:
raise KeyError(query_filter)
def __iter__(self):
return iter(self.__bound_queries)
GetCompiledCursor = GetCursor
GetCompiledQuery = _GetCompiledQuery
def RunInTransaction(function, *args, **kwargs):
"""Runs a function inside a datastore transaction.
Runs the user-provided function inside transaction, retries default
number of times.
Args:
function: a function to be run inside the transaction on all remaining
arguments
*args: positional arguments for function.
**kwargs: keyword arguments for function.
Returns:
the function's return value, if any
Raises:
TransactionFailedError, if the transaction could not be committed.
"""
return RunInTransactionOptions(None, function, *args, **kwargs)
def RunInTransactionCustomRetries(retries, function, *args, **kwargs):
"""Runs a function inside a datastore transaction.
Runs the user-provided function inside transaction, with a specified
number of retries.
Args:
retries: number of retries (not counting the initial try)
function: a function to be run inside the transaction on all remaining
arguments
*args: positional arguments for function.
**kwargs: keyword arguments for function.
Returns:
the function's return value, if any
Raises:
TransactionFailedError, if the transaction could not be committed.
"""
options = datastore_rpc.TransactionOptions(retries=retries)
return RunInTransactionOptions(options, function, *args, **kwargs)
def RunInTransactionOptions(options, function, *args, **kwargs):
"""Runs a function inside a datastore transaction.
Runs the user-provided function inside a full-featured, ACID datastore
transaction. Every Put, Get, and Delete call in the function is made within
the transaction. All entities involved in these calls must belong to the
same entity group. Queries are supported as long as they specify an
ancestor belonging to the same entity group.
The trailing arguments are passed to the function as positional arguments.
If the function returns a value, that value will be returned by
RunInTransaction. Otherwise, it will return None.
The function may raise any exception to roll back the transaction instead of
committing it. If this happens, the transaction will be rolled back and the
exception will be re-raised up to RunInTransaction's caller.
If you want to roll back intentionally, but don't have an appropriate
exception to raise, you can raise an instance of datastore_errors.Rollback.
It will cause a rollback, but will *not* be re-raised up to the caller.
The function may be run more than once, so it should be idempotent. It
should avoid side effects, and it shouldn't have *any* side effects that
aren't safe to occur multiple times. This includes modifying the arguments,
since they persist across invocations of the function. However, this doesn't
include Put, Get, and Delete calls, of course.
Example usage:
> def decrement(key, amount=1):
> counter = datastore.Get(key)
> counter['count'] -= amount
> if counter['count'] < 0: # don't let the counter go negative
> raise datastore_errors.Rollback()
> datastore.Put(counter)
>
> counter = datastore.Query('Counter', {'name': 'foo'})
> datastore.RunInTransaction(decrement, counter.key(), amount=5)
Transactions satisfy the traditional ACID properties. They are:
- Atomic. All of a transaction's operations are executed or none of them are.
- Consistent. The datastore's state is consistent before and after a
transaction, whether it committed or rolled back. Invariants such as
"every entity has a primary key" are preserved.
- Isolated. Transactions operate on a snapshot of the datastore. Other
datastore operations do not see intermediated effects of the transaction;
they only see its effects after it has committed.
- Durable. On commit, all writes are persisted to the datastore.
Nested transactions are not supported.
Args:
options: TransactionOptions specifying options (number of retries, etc) for
this transaction
function: a function to be run inside the transaction on all remaining
arguments
*args: positional arguments for function.
**kwargs: keyword arguments for function.
Returns:
the function's return value, if any
Raises:
TransactionFailedError, if the transaction could not be committed.
"""
options = datastore_rpc.TransactionOptions(options)
if IsInTransaction():
if options.propagation in (None, datastore_rpc.TransactionOptions.NESTED):
raise datastore_errors.BadRequestError(
"Nested transactions are not supported."
)
elif options.propagation is datastore_rpc.TransactionOptions.INDEPENDENT:
txn_connection = _GetConnection()
_SetConnection(_thread_local.old_connection)
try:
return RunInTransactionOptions(options, function, *args, **kwargs)
finally:
_SetConnection(txn_connection)
return function(*args, **kwargs)
if options.propagation is datastore_rpc.TransactionOptions.MANDATORY:
raise datastore_errors.BadRequestError("Requires an existing transaction.")
retries = options.retries
if retries is None:
retries = DEFAULT_TRANSACTION_RETRIES
_thread_local.old_connection = _GetConnection()
for _ in range(0, retries + 1):
new_connection = _thread_local.old_connection.new_transaction(options)
_SetConnection(new_connection)
try:
ok, result = _DoOneTry(new_connection, function, args, kwargs)
if ok:
return result
finally:
_SetConnection(_thread_local.old_connection)
raise datastore_errors.TransactionFailedError(
"The transaction could not be committed. Please try again."
)
def _DoOneTry(new_connection, function, args, kwargs):
"""Helper to call a function in a transaction, once.
Args:
new_connection: The new, transactional, connection object.
function: The function to call.
*args: Tuple of positional arguments.
**kwargs: Dict of keyword arguments.
"""
try:
result = function(*args, **kwargs)
except:
original_exception = sys.exc_info()
try:
new_connection.rollback()
except Exception:
logging.exception("Exception sending Rollback:")
type, value, trace = original_exception
if isinstance(value, datastore_errors.Rollback):
return True, None
else:
raise type(value).with_traceback(trace)
else:
if new_connection.commit():
return True, result
else:
logging.warning("Transaction collision. Retrying... %s", "")
return False, None
def _MaybeSetupTransaction(request, keys):
"""Begin a transaction, if necessary, and populate it in the request.
This API exists for internal backwards compatibility, primarily with
api/taskqueue/taskqueue.py.
Args:
request: A protobuf with a mutable_transaction() method.
keys: Unused.
Returns:
A transaction if we're inside a transaction, otherwise None
"""
return _GetConnection()._set_request_transaction(request)
def IsInTransaction():
"""Determine whether already running in transaction.
Returns:
True if already running in transaction, else False.
"""
return isinstance(_GetConnection(), datastore_rpc.TransactionalConnection)
def Transactional(_func=None, **kwargs):
"""A decorator that makes sure a function is run in a transaction.
Defaults propagation to datastore_rpc.TransactionOptions.ALLOWED, which means
any existing transaction will be used in place of creating a new one.
WARNING: Reading from the datastore while in a transaction will not see any
changes made in the same transaction. If the function being decorated relies
on seeing all changes made in the calling scoope, set
propagation=datastore_rpc.TransactionOptions.NESTED.
Args:
_func: do not use.
**kwargs: TransactionOptions configuration options.
Returns:
A wrapper for the given function that creates a new transaction if needed.
"""
if _func is not None:
return Transactional()(_func)
if not kwargs.pop("require_new", None):
kwargs.setdefault("propagation", datastore_rpc.TransactionOptions.ALLOWED)
options = datastore_rpc.TransactionOptions(**kwargs)
def outer_wrapper(func):
def inner_wrapper(*args, **kwds):
return RunInTransactionOptions(options, func, *args, **kwds)
return inner_wrapper
return outer_wrapper
@datastore_rpc._positional(1)
def NonTransactional(_func=None, allow_existing=True):
"""A decorator that insures a function is run outside a transaction.
If there is an existing transaction (and allow_existing=True), the existing
transaction is paused while the function is executed.
Args:
_func: do not use
allow_existing: If false, throw an exception if called from within a
transaction
Returns:
A wrapper for the decorated function that ensures it runs outside a
transaction.
"""
if _func is not None:
return NonTransactional()(_func)
def outer_wrapper(func):
def inner_wrapper(*args, **kwds):
if not IsInTransaction():
return func(*args, **kwds)
if not allow_existing:
raise datastore_errors.BadRequestError(
"Function cannot be called from within a transaction."
)
txn_connection = _GetConnection()
_SetConnection(_thread_local.old_connection)
try:
return func(*args, **kwds)
finally:
_SetConnection(txn_connection)
return inner_wrapper
return outer_wrapper
def _GetCompleteKeyOrError(arg):
"""Expects an Entity or a Key, and returns the corresponding Key. Raises
BadArgumentError or BadKeyError if arg is a different type or is incomplete.
Args:
arg: Entity or Key
Returns:
Key
"""
if isinstance(arg, Key):
key = arg
elif isinstance(arg, str):
key = Key(arg)
elif isinstance(arg, Entity):
key = arg.key()
elif not isinstance(arg, Key):
raise datastore_errors.BadArgumentError(
"Expects argument to be an Entity or Key; received %s (a %s)."
% (arg, typename(arg))
)
assert isinstance(key, Key)
if not key.has_id_or_name():
raise datastore_errors.BadKeyError("Key %r is not complete." % key)
return key
def _GetPropertyValue(entity, property):
"""Returns an entity's value for a given property name.
Handles special properties like __key__ as well as normal properties.
Args:
entity: datastore.Entity
property: str; the property name
Returns:
property value. For __key__, a datastore_types.Key.
Raises:
KeyError, if the entity does not have the given property.
"""
if property in datastore_types._SPECIAL_PROPERTIES:
if property == datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY:
raise KeyError(property)
assert property == datastore_types.KEY_SPECIAL_PROPERTY
return entity.key()
else:
return entity[property]
def _AddOrAppend(dictionary, key, value):
"""Adds the value to the existing values in the dictionary, if any.
If dictionary[key] doesn't exist, sets dictionary[key] to value.
If dictionary[key] is not a list, sets dictionary[key] to [old_value, value].
If dictionary[key] is a list, appends value to that list.
Args:
dictionary: a dict
key, value: anything
"""
if key in dictionary:
existing_value = dictionary[key]
if isinstance(existing_value, list):
existing_value.append(value)
else:
dictionary[key] = [existing_value, value]
else:
dictionary[key] = value
class Iterator(datastore_query.ResultsIterator):
"""Thin wrapper of datastore_query.ResultsIterator.
Deprecated, do not use, only for backwards compatability.
"""
def _Next(self, count=None):
if count is None:
count = 20
result = []
for r in self:
if len(result) >= count:
break
result.append(r)
return result
def GetCompiledCursor(self, query):
return self.cursor()
def GetIndexList(self):
"""Returns the list of indexes used to perform the query."""
tuple_index_list = super(Iterator, self).index_list()
return [index for index, state in tuple_index_list]
_Get = _Next
index_list = GetIndexList
DatastoreRPC = apiproxy_stub_map.UserRPC
GetRpcFromKwargs = _GetConfigFromKwargs
_CurrentTransactionKey = IsInTransaction
_ToDatastoreError = datastore_rpc._ToDatastoreError
_DatastoreExceptionFromErrorCodeAndDetail = (
datastore_rpc._DatastoreExceptionFromErrorCodeAndDetail
)
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PageSpeed configuration tools.
Library for parsing pagespeed configuration data from app.yaml and working
with these in memory.
"""
import google
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
_URL_BLACKLIST_REGEX = r"http(s)?://\S{0,499}"
_REWRITER_NAME_REGEX = r"[a-zA-Z0-9_]+"
_DOMAINS_TO_REWRITE_REGEX = r"(http(s)?://)?[-a-zA-Z0-9_.*]+(:\d+)?"
URL_BLACKLIST = "url_blacklist"
ENABLED_REWRITERS = "enabled_rewriters"
DISABLED_REWRITERS = "disabled_rewriters"
DOMAINS_TO_REWRITE = "domains_to_rewrite"
class MalformedPagespeedConfiguration(Exception):
"""Configuration file for PageSpeed API is malformed."""
class PagespeedEntry(validation.Validated):
"""Describes the format of a pagespeed configuration from a yaml file.
URL blacklist entries are patterns (with '?' and '*' as wildcards). Any URLs
that match a pattern on the blacklist will not be optimized by PageSpeed.
Rewriter names are strings (like 'CombineCss' or 'RemoveComments') describing
individual PageSpeed rewriters. A full list of valid rewriter names can be
found in the PageSpeed documentation.
The domains-to-rewrite list is a whitelist of domain name patterns with '*' as
a wildcard, optionally starting with 'http://' or 'https://'. If no protocol
is given, 'http://' is assumed. A resource will only be rewritten if it is on
the same domain as the HTML that references it, or if its domain is on the
domains-to-rewrite list.
"""
ATTRIBUTES = {
URL_BLACKLIST: validation.Optional(
validation.Repeated(validation.Regex(_URL_BLACKLIST_REGEX))
),
ENABLED_REWRITERS: validation.Optional(
validation.Repeated(validation.Regex(_REWRITER_NAME_REGEX))
),
DISABLED_REWRITERS: validation.Optional(
validation.Repeated(validation.Regex(_REWRITER_NAME_REGEX))
),
DOMAINS_TO_REWRITE: validation.Optional(
validation.Repeated(validation.Regex(_DOMAINS_TO_REWRITE_REGEX))
),
}
def LoadPagespeedEntry(pagespeed_entry, open_fn=None):
"""Load a yaml file or string and return a PagespeedEntry.
Args:
pagespeed_entry: The contents of a pagespeed entry from a yaml file
as a string, or an open file object.
open_fn: Function for opening files. Unused.
Returns:
A PagespeedEntry instance which represents the contents of the parsed yaml.
Raises:
yaml_errors.EventError: An error occured while parsing the yaml.
MalformedPagespeedConfiguration: The configuration is parseable but invalid.
"""
builder = yaml_object.ObjectBuilder(PagespeedEntry)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(pagespeed_entry)
parsed_yaml = handler.GetResults()
if not parsed_yaml:
return PagespeedEntry()
if len(parsed_yaml) > 1:
raise MalformedPagespeedConfiguration(
"Multiple configuration sections in the yaml"
)
return parsed_yaml[0]
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Search API module."""
from search import AtomField
from search import Cursor
from search import DateField
from search import DeleteError
from search import DeleteResult
from search import Document
from search import DOCUMENT_ID_FIELD_NAME
from search import Error
from search import ExpressionError
from search import Field
from search import FieldExpression
from search import GeoField
from search import GeoPoint
from search import get_indexes
from search import GetResponse
from search import HtmlField
from search import Index
from search import InternalError
from search import InvalidRequest
from search import LANGUAGE_FIELD_NAME
from search import MatchScorer
from search import MAXIMUM_DOCUMENT_ID_LENGTH
from search import MAXIMUM_DOCUMENTS_PER_PUT_REQUEST
from search import MAXIMUM_DOCUMENTS_RETURNED_PER_SEARCH
from search import MAXIMUM_EXPRESSION_LENGTH
from search import MAXIMUM_FIELD_ATOM_LENGTH
from search import MAXIMUM_FIELD_NAME_LENGTH
from search import MAXIMUM_FIELD_VALUE_LENGTH
from search import MAXIMUM_FIELDS_RETURNED_PER_SEARCH
from search import MAXIMUM_GET_INDEXES_OFFSET
from search import MAXIMUM_INDEX_NAME_LENGTH
from search import MAXIMUM_INDEXES_RETURNED_PER_GET_REQUEST
from search import MAXIMUM_NUMBER_FOUND_ACCURACY
from search import MAXIMUM_QUERY_LENGTH
from search import MAXIMUM_SEARCH_OFFSET
from search import MAXIMUM_SORTED_DOCUMENTS
from search import NumberField
from search import OperationResult
from search import PutError
from search import PutResult
from search import Query
from search import QueryError
from search import QueryOptions
from search import RANK_FIELD_NAME
from search import RescoringMatchScorer
from search import SCORE_FIELD_NAME
from search import ScoredDocument
from search import SearchResults
from search import SortExpression
from search import SortOptions
from search import TextField
from search import TIMESTAMP_FIELD_NAME
from search import TransientError
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the Task Queue API.
This stub stores tasks and runs them via dev_appserver's AddEvent capability.
It also validates the tasks by checking their queue name against the queue.yaml.
As well as implementing Task Queue API functions, the stub exposes various other
functions that are used by the dev_appserver's admin console to display the
application's queues and tasks.
"""
__all__ = []
import base64
import bisect
import calendar
import datetime
import logging
import os
import random
import string
import threading
import time
import taskqueue_service_pb
import taskqueue
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import queueinfo
from google.appengine.api import request_info
from google.appengine.api.taskqueue import taskqueue
from google.appengine.runtime import apiproxy_errors
DEFAULT_RATE = "5.00/s"
DEFAULT_RATE_FLOAT = 5.0
DEFAULT_BUCKET_SIZE = 5
MAX_ETA = datetime.timedelta(days=30)
MAX_PULL_TASK_SIZE_BYTES = 2**20
MAX_PUSH_TASK_SIZE_BYTES = 100 * (2**10)
MAX_TASK_SIZE = MAX_PUSH_TASK_SIZE_BYTES
MAX_REQUEST_SIZE = 32 << 20
BUILT_IN_HEADERS = set(
[
"x-appengine-queuename",
"x-appengine-taskname",
"x-appengine-taskexecutioncount",
"x-appengine-taskpreviousresponse",
"x-appengine-taskretrycount",
"x-appengine-tasketa",
"x-appengine-development-payload",
"content-length",
]
)
DEFAULT_QUEUE_NAME = "default"
INF = 1e500
QUEUE_MODE = taskqueue_service_pb.TaskQueueMode
AUTOMATIC_QUEUES = {
DEFAULT_QUEUE_NAME: (0.2, DEFAULT_BUCKET_SIZE, DEFAULT_RATE),
"__cron": (1, 1, "1/s"),
}
def _GetAppId(request):
"""Returns the app id to use for the given request.
Args:
request: A protocol buffer that has an app_id field.
Returns:
A string containing the app id or None if no app id was specified.
"""
if request.has_app_id():
return request.app_id()
else:
return None
def _SecToUsec(t):
"""Converts a time in seconds since the epoch to usec since the epoch.
Args:
t: Time in seconds since the unix epoch
Returns:
An integer containing the number of usec since the unix epoch.
"""
return int(t * 1e6)
def _UsecToSec(t):
"""Converts a time in usec since the epoch to seconds since the epoch.
Args:
t: Time in usec since the unix epoch
Returns:
A float containing the number of seconds since the unix epoch.
"""
return t / 1e6
def _FormatEta(eta_usec):
"""Formats a task ETA as a date string in UTC."""
eta = datetime.datetime.utcfromtimestamp(_UsecToSec(eta_usec))
return eta.strftime("%Y/%m/%d %H:%M:%S")
def _TruncDelta(timedelta):
"""Strips the microseconds field from a timedelta.
Args:
timedelta: a datetime.timedelta.
Returns:
A datetime.timedelta with the microseconds field not filled.
"""
return datetime.timedelta(days=timedelta.days, seconds=timedelta.seconds)
def _EtaDelta(eta_usec, now):
"""Formats a task ETA as a relative time string."""
eta = datetime.datetime.utcfromtimestamp(_UsecToSec(eta_usec))
if eta > now:
return "%s from now" % _TruncDelta(eta - now)
else:
return "%s ago" % _TruncDelta(now - eta)
def QueryTasksResponseToDict(queue_name, task_response, now):
"""Converts a TaskQueueQueryTasksResponse_Task protobuf group into a dict.
Args:
queue_name: The name of the queue this task came from.
task_response: An instance of TaskQueueQueryTasksResponse_Task.
now: A datetime.datetime object containing the current time in UTC.
Returns:
A dict containing the fields used by the dev appserver's admin console.
Raises:
ValueError: A task response contains an unknown HTTP method type.
"""
task = {}
task["name"] = task_response.task_name()
task["queue_name"] = queue_name
task["url"] = task_response.url()
method = task_response.method()
if method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.GET:
task["method"] = "GET"
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.POST:
task["method"] = "POST"
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.HEAD:
task["method"] = "HEAD"
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.PUT:
task["method"] = "PUT"
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.DELETE:
task["method"] = "DELETE"
else:
raise ValueError("Unexpected method: %d" % method)
task["eta"] = _FormatEta(task_response.eta_usec())
task["eta_usec"] = task_response.eta_usec()
task["eta_delta"] = _EtaDelta(task_response.eta_usec(), now)
task["body"] = base64.b64encode(task_response.body())
headers = [
(header.key(), header.value())
for header in task_response.header_list()
if header.key().lower() not in BUILT_IN_HEADERS
]
headers.append(("X-AppEngine-QueueName", queue_name))
headers.append(("X-AppEngine-TaskName", task_response.task_name()))
headers.append(("X-AppEngine-TaskRetryCount", str(task_response.retry_count())))
headers.append(("X-AppEngine-TaskETA", str(_UsecToSec(task_response.eta_usec()))))
headers.append(("X-AppEngine-Development-Payload", "1"))
headers.append(("Content-Length", str(len(task["body"]))))
if "content-type" not in frozenset(key.lower() for key, _ in headers):
headers.append(("Content-Type", "application/octet-stream"))
headers.append(
("X-AppEngine-TaskExecutionCount", str(task_response.execution_count()))
)
if task_response.has_runlog() and task_response.runlog().has_response_code():
headers.append(
(
"X-AppEngine-TaskPreviousResponse",
str(task_response.runlog().response_code()),
)
)
task["headers"] = headers
return task
class _Group(object):
"""A taskqueue group.
This class contains all of the queues for an application.
"""
def __init__(
self,
queue_yaml_parser=None,
app_id=None,
_all_queues_valid=False,
_update_newest_eta=None,
_testing_validate_state=False,
):
"""Constructor.
Args:
queue_yaml_parser: A function that takes no parameters and returns the
parsed results of the queue.yaml file. If this queue is not based on a
queue.yaml file use None.
app_id: The app id this Group is representing or None if it is the
currently running application.
_all_queues_valid: Automatically generate queues on first access.
_update_newest_eta: Callable for automatically executing tasks.
Takes the ETA of the task in seconds since the epoch, the queue_name
and a task name. May be None if automatic task running is disabled.
_testing_validate_state: Should this _Group and all of its _Queues
validate their state after each operation? This should only be used
during testing of the taskqueue_stub.
"""
self._queues = {}
self._queue_yaml_parser = queue_yaml_parser
self._all_queues_valid = _all_queues_valid
self._next_task_id = 1
self._app_id = app_id
if _update_newest_eta is None:
self._update_newest_eta = lambda x: None
else:
self._update_newest_eta = _update_newest_eta
self._testing_validate_state = _testing_validate_state
def GetQueuesAsDicts(self):
"""Gets all the applications's queues.
Returns:
A list of dictionaries, where each dictionary contains one queue's
attributes. E.g.:
[{'name': 'some-queue',
'max_rate': '1/s',
'bucket_size': 5,
'oldest_task': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'tasks_in_queue': 12,
'acl': ['user1@gmail.com']}, ...]
The list of queues always includes the default queue.
"""
self._ReloadQueuesFromYaml()
now = datetime.datetime.utcnow()
queues = []
for queue_name, queue in sorted(self._queues.items()):
queue_dict = {}
queues.append(queue_dict)
queue_dict["name"] = queue_name
queue_dict["bucket_size"] = queue.bucket_capacity
if queue.user_specified_rate is not None:
queue_dict["max_rate"] = queue.user_specified_rate
else:
queue_dict["max_rate"] = ""
if queue.queue_mode == QUEUE_MODE.PULL:
queue_dict["mode"] = "pull"
else:
queue_dict["mode"] = "push"
queue_dict["acl"] = queue.acl
if queue.Oldest():
queue_dict["oldest_task"] = _FormatEta(queue.Oldest())
queue_dict["eta_delta"] = _EtaDelta(queue.Oldest(), now)
else:
queue_dict["oldest_task"] = ""
queue_dict["eta_delta"] = ""
queue_dict["tasks_in_queue"] = queue.Count()
if queue.retry_parameters:
retry_proto = queue.retry_parameters
retry_dict = {}
if retry_proto.has_retry_limit():
retry_dict["retry_limit"] = retry_proto.retry_limit()
if retry_proto.has_age_limit_sec():
retry_dict["age_limit_sec"] = retry_proto.age_limit_sec()
if retry_proto.has_min_backoff_sec():
retry_dict["min_backoff_sec"] = retry_proto.min_backoff_sec()
if retry_proto.has_max_backoff_sec():
retry_dict["max_backoff_sec"] = retry_proto.max_backoff_sec()
if retry_proto.has_max_doublings():
retry_dict["max_doublings"] = retry_proto.max_doublings()
queue_dict["retry_parameters"] = retry_dict
return queues
def HasQueue(self, queue_name):
"""Check if the specified queue_name references a valid queue.
Args:
queue_name: The name of the queue to check.
Returns:
True if the queue exists, False otherwise.
"""
self._ReloadQueuesFromYaml()
return queue_name in self._queues and (self._queues[queue_name] is not None)
def GetQueue(self, queue_name):
"""Gets the _Queue instance for the specified queue.
Args:
queue_name: The name of the queue to fetch.
Returns:
The _Queue instance for the specified queue.
Raises:
KeyError if the queue does not exist.
"""
self._ReloadQueuesFromYaml()
return self._queues[queue_name]
def GetNextPushTask(self):
"""Finds the task with the lowest eta.
Returns:
A tuple containing the queue and task instance for the task with the
lowest eta, or (None, None) if there are no tasks.
"""
min_eta = INF
result = None, None
for queue in self._queues.values():
if queue.queue_mode == QUEUE_MODE.PULL:
continue
task = queue.OldestTask()
if not task:
continue
if task.eta_usec() < min_eta:
result = queue, task
min_eta = task.eta_usec()
return result
def _ConstructQueue(self, queue_name, *args, **kwargs):
if "_testing_validate_state" in kwargs:
raise TypeError(
"_testing_validate_state should not be passed to _ConstructQueue"
)
kwargs["_testing_validate_state"] = self._testing_validate_state
self._queues[queue_name] = _Queue(queue_name, *args, **kwargs)
def _ConstructAutomaticQueue(self, queue_name):
if queue_name in AUTOMATIC_QUEUES:
self._ConstructQueue(queue_name, *AUTOMATIC_QUEUES[queue_name])
else:
assert self._all_queues_valid
self._ConstructQueue(queue_name)
def _ReloadQueuesFromYaml(self):
"""Update the queue map with the contents of the queue.yaml file.
This function will remove queues that no longer exist in the queue.yaml
file.
If no queue yaml parser has been defined, this function is a no-op.
"""
if not self._queue_yaml_parser:
return
queue_info = self._queue_yaml_parser()
if queue_info and queue_info.queue:
queues = queue_info.queue
else:
queues = []
old_queues = set(self._queues)
new_queues = set()
for entry in queues:
queue_name = entry.name
new_queues.add(queue_name)
retry_parameters = None
if entry.bucket_size:
bucket_size = entry.bucket_size
else:
bucket_size = DEFAULT_BUCKET_SIZE
if entry.retry_parameters:
retry_parameters = queueinfo.TranslateRetryParameters(
entry.retry_parameters
)
if entry.mode == "pull":
mode = QUEUE_MODE.PULL
if entry.rate is not None:
logging.warning(
"Refill rate must not be specified for pull-based queue. "
"Please check queue.yaml file."
)
else:
mode = QUEUE_MODE.PUSH
if entry.rate is None:
logging.warning(
"Refill rate must be specified for push-based queue. "
"Please check queue.yaml file."
)
max_rate = entry.rate
if entry.acl is not None:
acl = taskqueue_service_pb.TaskQueueAcl()
for acl_entry in entry.acl:
acl.add_user_email(acl_entry.user_email)
else:
acl = None
if self._queues.get(queue_name) is None:
self._ConstructQueue(
queue_name,
bucket_capacity=bucket_size,
user_specified_rate=max_rate,
queue_mode=mode,
acl=acl,
retry_parameters=retry_parameters,
)
else:
queue = self._queues[queue_name]
queue.bucket_size = bucket_size
queue.user_specified_rate = max_rate
queue.acl = acl
queue.queue_mode = mode
queue.retry_parameters = retry_parameters
if mode == QUEUE_MODE.PUSH:
eta = queue.Oldest()
if eta:
self._update_newest_eta(_UsecToSec(eta))
if DEFAULT_QUEUE_NAME not in self._queues:
self._ConstructAutomaticQueue(DEFAULT_QUEUE_NAME)
new_queues.add(DEFAULT_QUEUE_NAME)
if not self._all_queues_valid:
for queue_name in old_queues - new_queues:
del self._queues[queue_name]
def _ValidateQueueName(self, queue_name):
"""Tests if the specified queue exists and creates it if needed.
This function replicates the behaviour of the taskqueue service by
automatically creating the 'automatic' queues when they are first accessed.
Args:
queue_name: The name queue of the queue to check.
Returns:
If there are no problems, returns TaskQueueServiceError.OK. Otherwise
returns the correct constant from TaskQueueServiceError.
"""
if not queue_name:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME
elif queue_name not in self._queues:
if queue_name in AUTOMATIC_QUEUES or self._all_queues_valid:
self._ConstructAutomaticQueue(queue_name)
else:
return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE
elif self._queues[queue_name] is None:
return taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE
return taskqueue_service_pb.TaskQueueServiceError.OK
def _CheckQueueForRpc(self, queue_name):
"""Ensures the specified queue exists and creates it if needed.
This function replicates the behaviour of the taskqueue service by
automatically creating the 'automatic' queues when they are first accessed.
Args:
queue_name: The name queue of the queue to check
Raises:
ApplicationError: If the queue name is invalid, tombstoned or does not
exist.
"""
self._ReloadQueuesFromYaml()
response = self._ValidateQueueName(queue_name)
if response != taskqueue_service_pb.TaskQueueServiceError.OK:
raise apiproxy_errors.ApplicationError(response)
def _ChooseTaskName(self):
"""Returns a string containing a unique task name."""
self._next_task_id += 1
return "task%d" % (self._next_task_id - 1)
def _VerifyTaskQueueAddRequest(self, request, now):
"""Checks that a TaskQueueAddRequest is valid.
Checks that a TaskQueueAddRequest specifies a valid eta and a valid queue.
Args:
request: The taskqueue_service_pb.TaskQueueAddRequest to validate.
now: A datetime.datetime object containing the current time in UTC.
Returns:
A taskqueue_service_pb.TaskQueueServiceError indicating any problems with
the request or taskqueue_service_pb.TaskQueueServiceError.OK if it is
valid.
"""
if request.eta_usec() < 0:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA
eta = datetime.datetime.utcfromtimestamp(_UsecToSec(request.eta_usec()))
max_eta = now + MAX_ETA
if eta > max_eta:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA
queue_name_response = self._ValidateQueueName(request.queue_name())
if queue_name_response != taskqueue_service_pb.TaskQueueServiceError.OK:
return queue_name_response
if request.has_crontimetable() and self._app_id is None:
return taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED
if request.mode() == QUEUE_MODE.PULL:
max_task_size_bytes = MAX_PULL_TASK_SIZE_BYTES
else:
max_task_size_bytes = MAX_PUSH_TASK_SIZE_BYTES
if request.ByteSize() > max_task_size_bytes:
return taskqueue_service_pb.TaskQueueServiceError.TASK_TOO_LARGE
return taskqueue_service_pb.TaskQueueServiceError.OK
def BulkAdd_Rpc(self, request, response):
"""Add many tasks to a queue using a single request.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse. See
taskqueue_service.proto.
"""
self._ReloadQueuesFromYaml()
if not request.add_request(0).queue_name():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE
)
error_found = False
task_results_with_chosen_names = set()
now = datetime.datetime.utcfromtimestamp(time.time())
for add_request in request.add_request_list():
task_result = response.add_taskresult()
result = self._VerifyTaskQueueAddRequest(add_request, now)
if result == taskqueue_service_pb.TaskQueueServiceError.OK:
if not add_request.task_name():
chosen_name = self._ChooseTaskName()
add_request.set_task_name(chosen_name)
task_results_with_chosen_names.add(id(task_result))
task_result.set_result(
taskqueue_service_pb.TaskQueueServiceError.SKIPPED
)
else:
error_found = True
task_result.set_result(result)
if error_found:
return
if request.add_request(0).has_transaction():
self._TransactionalBulkAdd(request)
else:
self._NonTransactionalBulkAdd(request, response, now)
for add_request, task_result in zip(
request.add_request_list(), response.taskresult_list()
):
if (
task_result.result()
== taskqueue_service_pb.TaskQueueServiceError.SKIPPED
):
task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
if id(task_result) in task_results_with_chosen_names:
task_result.set_chosen_task_name(add_request.task_name())
def _TransactionalBulkAdd(self, request):
"""Uses datastore.AddActions to associate tasks with a transaction.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
assigned unique names.
"""
try:
apiproxy_stub_map.MakeSyncCall(
"datastore_v3", "AddActions", request, api_base_pb.VoidProto()
)
except apiproxy_errors.ApplicationError as e:
raise apiproxy_errors.ApplicationError(
e.application_error
+ taskqueue_service_pb.TaskQueueServiceError.DATASTORE_ERROR,
e.error_detail,
)
def _NonTransactionalBulkAdd(self, request, response, now):
"""Adds tasks to the appropriate _Queue instance.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
those with empty names have been assigned unique names.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse to populate
with the results. N.B. the chosen_task_name field in the response will
not be filled-in.
now: A datetime.datetime object containing the current time in UTC.
"""
queue_mode = request.add_request(0).mode()
queue_name = request.add_request(0).queue_name()
store = self._queues[queue_name]
if store.queue_mode != queue_mode:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE
)
for add_request, task_result in zip(
request.add_request_list(), response.taskresult_list()
):
try:
store.Add(add_request, now)
except apiproxy_errors.ApplicationError as e:
task_result.set_result(e.application_error)
else:
task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
if (
store.queue_mode == QUEUE_MODE.PUSH
and store.Oldest() == add_request.eta_usec()
):
self._update_newest_eta(_UsecToSec(add_request.eta_usec()))
def UpdateQueue_Rpc(self, request, response):
"""Implementation of the UpdateQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
"""
queue_name = request.queue_name()
response = self._ValidateQueueName(queue_name)
is_unknown_queue = (
response == taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE
)
if response != taskqueue_service_pb.TaskQueueServiceError.OK and (
not is_unknown_queue
):
raise apiproxy_errors.ApplicationError(response)
if is_unknown_queue:
self._queues[queue_name] = _Queue(request.queue_name())
if self._app_id is not None:
self._queues[queue_name].Populate(random.randint(10, 100))
self._queues[queue_name].UpdateQueue_Rpc(request, response)
def FetchQueues_Rpc(self, request, response):
"""Implementation of the FetchQueues RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
self._ReloadQueuesFromYaml()
for queue_name in sorted(self._queues):
if response.queue_size() > request.max_rows():
break
if self._queues[queue_name] is None:
continue
self._queues[queue_name].FetchQueues_Rpc(request, response)
def FetchQueueStats_Rpc(self, request, response):
"""Implementation of the FetchQueueStats rpc which returns 'random' data.
This implementation loads some stats from the task store, the rest are
random numbers.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueueStatsRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueueStatsResponse.
"""
for queue_name in request.queue_name_list():
stats = response.add_queuestats()
if queue_name not in self._queues:
stats.set_num_tasks(0)
stats.set_oldest_eta_usec(-1)
continue
store = self._queues[queue_name]
stats.set_num_tasks(store.Count())
if stats.num_tasks() == 0:
stats.set_oldest_eta_usec(-1)
else:
stats.set_oldest_eta_usec(store.Oldest())
if random.randint(0, 9) > 0:
scanner_info = stats.mutable_scanner_info()
scanner_info.set_executed_last_minute(random.randint(0, 10))
scanner_info.set_executed_last_hour(
scanner_info.executed_last_minute() + random.randint(0, 100)
)
scanner_info.set_sampling_duration_seconds(random.random() * 10000.0)
scanner_info.set_requests_in_flight(random.randint(0, 10))
def QueryTasks_Rpc(self, request, response):
"""Implementation of the QueryTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].QueryTasks_Rpc(request, response)
def FetchTask_Rpc(self, request, response):
"""Implementation of the FetchTask RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchTaskRequest.
response: A taskqueue_service_pb.TaskQueueFetchTaskResponse.
"""
self._ReloadQueuesFromYaml()
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].FetchTask_Rpc(request, response)
def Delete_Rpc(self, request, response):
"""Implementation of the Delete RPC.
Deletes tasks from the task store.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
self._ReloadQueuesFromYaml()
def _AddResultForAll(result):
for _ in request.task_name_list():
response.add_result(result)
if request.queue_name() not in self._queues:
_AddResultForAll(taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
elif self._queues[request.queue_name()] is None:
_AddResultForAll(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE
)
else:
self._queues[request.queue_name()].Delete_Rpc(request, response)
def DeleteQueue_Rpc(self, request, response):
"""Implementation of the DeleteQueue RPC.
Tombstones the queue.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteQueueRequest.
response: A taskqueue_service_pb.TaskQueueDeleteQueueResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()] = None
def PauseQueue_Rpc(self, request, response):
"""Implementation of the PauseQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePauseQueueRequest.
response: A taskqueue_service_pb.TaskQueuePauseQueueResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].paused = request.pause()
def PurgeQueue_Rpc(self, request, response):
"""Implementation of the PurgeQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePurgeQueueRequest.
response: A taskqueue_service_pb.TaskQueuePurgeQueueResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].PurgeQueue()
def QueryAndOwnTasks_Rpc(self, request, response):
"""Implementation of the QueryAndOwnTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].QueryAndOwnTasks_Rpc(request, response)
def ModifyTaskLease_Rpc(self, request, response):
"""Implementation of the ModifyTaskLease RPC.
Args:
request: A taskqueue_service_pb.TaskQueueModifyTaskLeaseRequest.
response: A taskqueue_service_pb.TaskQueueModifyTaskLeaseResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].ModifyTaskLease_Rpc(request, response)
class Retry(object):
"""Task retry caclulator class.
Determines if and when a task should next be run
"""
_default_params = taskqueue_service_pb.TaskQueueRetryParameters()
def __init__(self, task, queue):
"""Constructor.
Args:
task: A taskqueue_service_pb.TaskQueueQueryTasksResponse_Task instance.
May be None.
queue: A _Queue instance. May be None.
"""
if task is not None and task.has_retry_parameters():
self._params = task.retry_parameters()
elif queue is not None and queue.retry_parameters is not None:
self._params = queue.retry_parameters
else:
self._params = self._default_params
def CanRetry(self, retry_count, age_usec):
"""Computes whether a task can be retried.
Args:
retry_count: An integer specifying which retry this is.
age_usec: An integer specifying the microseconds since the first try.
Returns:
True if a task is eligible for retrying.
"""
if self._params.has_retry_limit() and self._params.has_age_limit_sec():
return (
self._params.retry_limit() >= retry_count
or self._params.age_limit_sec() >= _UsecToSec(age_usec)
)
if self._params.has_retry_limit():
return self._params.retry_limit() >= retry_count
if self._params.has_age_limit_sec():
return self._params.age_limit_sec() >= _UsecToSec(age_usec)
return True
def CalculateBackoffUsec(self, retry_count):
"""Calculates time before the specified retry.
Args:
retry_count: An integer specifying which retry this is.
Returns:
The number of microseconds before a task should be retried.
"""
exponent = min(retry_count - 1, self._params.max_doublings())
linear_steps = retry_count - exponent
min_backoff_usec = _SecToUsec(self._params.min_backoff_sec())
max_backoff_usec = _SecToUsec(self._params.max_backoff_sec())
backoff_usec = min_backoff_usec
if exponent > 0:
backoff_usec *= 2 ** (min(1023, exponent))
if linear_steps > 1:
backoff_usec *= linear_steps
return int(min(max_backoff_usec, backoff_usec))
class _Queue(object):
"""A Taskqueue Queue.
This class contains all of the properties of a queue and a sorted list of
tasks.
"""
def __init__(
self,
queue_name,
bucket_refill_per_second=DEFAULT_RATE_FLOAT,
bucket_capacity=DEFAULT_BUCKET_SIZE,
user_specified_rate=DEFAULT_RATE,
retry_parameters=None,
max_concurrent_requests=None,
paused=False,
queue_mode=QUEUE_MODE.PUSH,
acl=None,
_testing_validate_state=None,
):
self.queue_name = queue_name
self.bucket_refill_per_second = bucket_refill_per_second
self.bucket_capacity = bucket_capacity
self.user_specified_rate = user_specified_rate
self.retry_parameters = retry_parameters
self.max_concurrent_requests = max_concurrent_requests
self.paused = paused
self.queue_mode = queue_mode
self.acl = acl
self._testing_validate_state = _testing_validate_state
self.task_name_archive = set()
self._sorted_by_name = []
self._sorted_by_eta = []
self._sorted_by_tag = []
self._lock = threading.Lock()
def VerifyIndexes(self):
"""Ensures that all three indexes are in a valid state.
This method is used by internal tests and should not need to be called in
any other circumstances.
Raises:
AssertionError: if the indexes are not in a valid state.
"""
assert self._IsInOrder(self._sorted_by_name)
assert self._IsInOrder(self._sorted_by_eta)
assert self._IsInOrder(self._sorted_by_tag)
tasks_by_name = set()
tasks_with_tags = set()
for name, task in self._sorted_by_name:
assert name == task.task_name()
assert name not in tasks_by_name
tasks_by_name.add(name)
if task.has_tag():
tasks_with_tags.add(name)
tasks_by_eta = set()
for eta, name, task in self._sorted_by_eta:
assert name == task.task_name()
assert eta == task.eta_usec()
assert name not in tasks_by_eta
tasks_by_eta.add(name)
assert tasks_by_eta == tasks_by_name
tasks_by_tag = set()
for tag, eta, name, task in self._sorted_by_tag:
assert name == task.task_name()
assert eta == task.eta_usec()
assert task.has_tag() and task.tag()
assert tag == task.tag()
assert name not in tasks_by_tag
tasks_by_tag.add(name)
assert tasks_by_tag == tasks_with_tags
@staticmethod
def _IsInOrder(l):
"""Determine if the specified list is in ascending order.
Args:
l: The list to check
Returns:
True if the list is in order, False otherwise
"""
sorted_list = sorted(l)
return l == sorted_list
def _WithLock(f):
"""Runs the decorated function within self._lock.
Args:
f: The function to be delegated to. Must be a member function (take self
as the first parameter).
Returns:
The result of f.
"""
def _Inner(self, *args, **kwargs):
with self._lock:
ret = f(self, *args, **kwargs)
if self._testing_validate_state:
self.VerifyIndexes()
return ret
_Inner.__doc__ = f.__doc__
return _Inner
@_WithLock
def UpdateQueue_Rpc(self, request, response):
"""Implementation of the UpdateQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
"""
assert request.queue_name() == self.queue_name
self.bucket_refill_per_second = request.bucket_refill_per_second()
self.bucket_capacity = request.bucket_capacity()
if request.has_user_specified_rate():
self.user_specified_rate = request.user_specified_rate()
else:
self.user_specified_rate = None
if request.has_retry_parameters():
self.retry_parameters = request.retry_parameters()
else:
self.retry_parameters = None
if request.has_max_concurrent_requests():
self.max_concurrent_requests = request.max_concurrent_requests()
else:
self.max_concurrent_requests = None
self.queue_mode = request.mode()
if request.has_acl():
self.acl = request.acl()
else:
self.acl = None
@_WithLock
def FetchQueues_Rpc(self, request, response):
"""Fills out a queue message on the provided TaskQueueFetchQueuesResponse.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
response_queue = response.add_queue()
response_queue.set_queue_name(self.queue_name)
response_queue.set_bucket_refill_per_second(self.bucket_refill_per_second)
response_queue.set_bucket_capacity(self.bucket_capacity)
if self.user_specified_rate is not None:
response_queue.set_user_specified_rate(self.user_specified_rate)
if self.max_concurrent_requests is not None:
response_queue.set_max_concurrent_requests(self.max_concurrent_requests)
if self.retry_parameters is not None:
response_queue.retry_parameters().CopyFrom(self.retry_parameters)
response_queue.set_paused(self.paused)
if self.queue_mode is not None:
response_queue.set_mode(self.queue_mode)
if self.acl is not None:
response_queue.mutable_acl().CopyFrom(self.acl)
@_WithLock
def QueryTasks_Rpc(self, request, response):
"""Implementation of the QueryTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
assert not request.has_start_tag()
if request.has_start_eta_usec():
tasks = self._LookupNoAcquireLock(
request.max_rows(),
name=request.start_task_name(),
eta=request.start_eta_usec(),
)
else:
tasks = self._LookupNoAcquireLock(
request.max_rows(), name=request.start_task_name()
)
for task in tasks:
response.add_task().MergeFrom(task)
@_WithLock
def FetchTask_Rpc(self, request, response):
"""Implementation of the FetchTask RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchTaskRequest.
response: A taskqueue_service_pb.TaskQueueFetchTaskResponse.
"""
task_name = request.task_name()
pos = self._LocateTaskByName(task_name)
if pos is None:
if task_name in self.task_name_archive:
error = taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK
else:
error = taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
raise apiproxy_errors.ApplicationError(error)
_, task = self._sorted_by_name[pos]
response.mutable_task().add_task().CopyFrom(task)
@_WithLock
def Delete_Rpc(self, request, response):
"""Implementation of the Delete RPC.
Deletes tasks from the task store. We mimic a 1/20 chance of a
TRANSIENT_ERROR when the request has an app_id.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
for taskname in request.task_name_list():
if request.has_app_id() and random.random() <= 0.05:
response.add_result(
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR
)
else:
response.add_result(self._DeleteNoAcquireLock(taskname))
def _QueryAndOwnTasksGetTaskList(
self, max_rows, group_by_tag, now_eta_usec, tag=None
):
assert self._lock.locked()
if group_by_tag and tag:
return self._IndexScan(
self._sorted_by_tag,
start_key=(
tag,
None,
None,
),
end_key=(
tag,
now_eta_usec,
None,
),
max_rows=max_rows,
)
elif group_by_tag:
tasks = self._IndexScan(
self._sorted_by_eta,
start_key=(
None,
None,
),
end_key=(
now_eta_usec,
None,
),
max_rows=max_rows,
)
if not tasks:
return []
if tasks[0].has_tag():
tag = tasks[0].tag()
return self._QueryAndOwnTasksGetTaskList(
max_rows, True, now_eta_usec, tag
)
else:
return [task for task in tasks if not task.has_tag()]
else:
return self._IndexScan(
self._sorted_by_eta,
start_key=(
None,
None,
),
end_key=(
now_eta_usec,
None,
),
max_rows=max_rows,
)
@_WithLock
def QueryAndOwnTasks_Rpc(self, request, response):
"""Implementation of the QueryAndOwnTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
"""
if self.queue_mode != QUEUE_MODE.PULL:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE
)
lease_seconds = request.lease_seconds()
if lease_seconds < 0:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST
)
max_tasks = request.max_tasks()
if max_tasks <= 0:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST
)
if request.has_tag() and not request.group_by_tag():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST,
"Tag specified, but group_by_tag was not.",
)
now_eta_usec = _SecToUsec(time.time())
tasks = self._QueryAndOwnTasksGetTaskList(
max_tasks, request.group_by_tag(), now_eta_usec, request.tag()
)
tasks_to_delete = []
for task in tasks:
retry = Retry(task, self)
if not retry.CanRetry(task.retry_count() + 1, 0):
logging.warning(
"Task %s in queue %s cannot be leased again after %d leases.",
task.task_name(),
self.queue_name,
task.retry_count(),
)
tasks_to_delete.append(task)
continue
self._PostponeTaskNoAcquireLock(
task, now_eta_usec + _SecToUsec(lease_seconds)
)
task_response = response.add_task()
task_response.set_task_name(task.task_name())
task_response.set_eta_usec(task.eta_usec())
task_response.set_retry_count(task.retry_count())
if task.has_tag():
task_response.set_tag(task.tag())
task_response.set_body(task.body())
for task in tasks_to_delete:
self._DeleteNoAcquireLock(task.task_name())
@_WithLock
def ModifyTaskLease_Rpc(self, request, response):
"""Implementation of the ModifyTaskLease RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
"""
if self.queue_mode != QUEUE_MODE.PULL:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE
)
if self.paused:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.QUEUE_PAUSED
)
lease_seconds = request.lease_seconds()
if lease_seconds < 0:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST
)
pos = self._LocateTaskByName(request.task_name())
if pos is None:
if request.task_name() in self.task_name_archive:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK
)
else:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
)
_, task = self._sorted_by_name[pos]
if task.eta_usec() != request.eta_usec():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_LEASE_EXPIRED
)
now_usec = _SecToUsec(time.time())
if task.eta_usec() < now_usec:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_LEASE_EXPIRED
)
future_eta_usec = now_usec + _SecToUsec(lease_seconds)
self._PostponeTaskNoAcquireLock(task, future_eta_usec, increase_retries=False)
response.set_updated_eta_usec(future_eta_usec)
@_WithLock
def IncRetryCount(self, task_name):
"""Increment the retry count of a task by 1.
Args:
task_name: The name of the task to update.
"""
pos = self._LocateTaskByName(task_name)
assert (
pos is not None
), "Task does not exist when trying to increase retry count."
task = self._sorted_by_name[pos][1]
self._IncRetryCount(task)
def _IncRetryCount(self, task):
assert self._lock.locked()
retry_count = task.retry_count()
task.set_retry_count(retry_count + 1)
task.set_execution_count(task.execution_count() + 1)
@_WithLock
def GetTasksAsDicts(self):
"""Gets all of the tasks in this queue.
Returns:
A list of dictionaries, where each dictionary contains one task's
attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskExecutionCount': '1'),
('X-AppEngine-TaskRetryCount': '1'),
('X-AppEngine-TaskETA': '1234567890.123456'),
('X-AppEngine-Development-Payload': '1'),
('X-AppEngine-TaskPreviousResponse': '300'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
"""
tasks = []
now = datetime.datetime.utcnow()
for _, _, task_response in self._sorted_by_eta:
tasks.append(QueryTasksResponseToDict(self.queue_name, task_response, now))
return tasks
@_WithLock
def GetTaskAsDict(self, task_name):
"""Gets a specific task from this queue.
Returns:
A dictionary containing one task's attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskExecutionCount': '1'),
('X-AppEngine-TaskRetryCount': '1'),
('X-AppEngine-TaskETA': '1234567890.123456'),
('X-AppEngine-Development-Payload': '1'),
('X-AppEngine-TaskPreviousResponse': '300'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
"""
task_responses = self._LookupNoAcquireLock(maximum=1, name=task_name)
if not task_responses:
return
(task_response,) = task_responses
if task_response.task_name() != task_name:
return
now = datetime.datetime.utcnow()
return QueryTasksResponseToDict(self.queue_name, task_response, now)
@_WithLock
def PurgeQueue(self):
"""Removes all content from the queue."""
self._sorted_by_name = []
self._sorted_by_eta = []
self._sorted_by_tag = []
@_WithLock
def _GetTasks(self):
"""Helper method for tests returning all tasks sorted by eta.
Returns:
A list of taskqueue_service_pb.TaskQueueQueryTasksResponse_Task objects
sorted by eta.
"""
return self._GetTasksNoAcquireLock()
def _GetTasksNoAcquireLock(self):
"""Helper method for tests returning all tasks sorted by eta.
Returns:
A list of taskqueue_service_pb.TaskQueueQueryTasksResponse_Task objects
sorted by eta.
"""
assert self._lock.locked()
tasks = []
for eta, task_name, task in self._sorted_by_eta:
tasks.append(task)
return tasks
def _InsertTask(self, task):
"""Insert a task into the store, keeps lists sorted.
Args:
task: the new task.
"""
assert self._lock.locked()
eta = task.eta_usec()
name = task.task_name()
bisect.insort_left(self._sorted_by_eta, (eta, name, task))
if task.has_tag():
bisect.insort_left(self._sorted_by_tag, (task.tag(), eta, name, task))
bisect.insort_left(self._sorted_by_name, (name, task))
self.task_name_archive.add(name)
@_WithLock
def RunTaskNow(self, task):
"""Change the eta of a task to now.
Args:
task: The TaskQueueQueryTasksResponse_Task run now. This must be
stored in this queue (otherwise an AssertionError is raised).
"""
self._PostponeTaskNoAcquireLock(task, 0, increase_retries=False)
@_WithLock
def PostponeTask(self, task, new_eta_usec):
"""Postpone the task to a future time and increment the retry count.
Args:
task: The TaskQueueQueryTasksResponse_Task to postpone. This must be
stored in this queue (otherwise an AssertionError is raised).
new_eta_usec: The new eta to set on the task. This must be greater then
the current eta on the task.
"""
assert new_eta_usec > task.eta_usec()
self._PostponeTaskNoAcquireLock(task, new_eta_usec)
def _PostponeTaskNoAcquireLock(self, task, new_eta_usec, increase_retries=True):
assert self._lock.locked()
if increase_retries:
self._IncRetryCount(task)
name = task.task_name()
eta = task.eta_usec()
assert self._RemoveTaskFromIndex(self._sorted_by_eta, (eta, name, None), task)
if task.has_tag():
assert self._RemoveTaskFromIndex(
self._sorted_by_tag, (task.tag(), eta, name, None), task
)
self._PostponeTaskInsertOnly(task, new_eta_usec)
def _PostponeTaskInsertOnly(self, task, new_eta_usec):
assert self._lock.locked()
task.set_eta_usec(new_eta_usec)
name = task.task_name()
bisect.insort_left(self._sorted_by_eta, (new_eta_usec, name, task))
if task.has_tag():
tag = task.tag()
bisect.insort_left(self._sorted_by_tag, (tag, new_eta_usec, name, task))
@_WithLock
def Lookup(self, maximum, name=None, eta=None):
"""Lookup a number of sorted tasks from the store.
If 'eta' is specified, the tasks are looked up in a list sorted by 'eta',
then 'name'. Otherwise they are sorted by 'name'. We need to be able to
sort by 'eta' and 'name' because tasks can have identical eta. If you had
20 tasks with the same ETA, you wouldn't be able to page past them, since
the 'next eta' would give the first one again. Names are unique, though.
Args:
maximum: the maximum number of tasks to return.
name: a task name to start with.
eta: an eta to start with.
Returns:
A list of up to 'maximum' tasks.
Raises:
ValueError: if the task store gets corrupted.
"""
return self._LookupNoAcquireLock(maximum, name, eta)
def _IndexScan(self, index, start_key, end_key=None, max_rows=None):
"""Return the result of a 'scan' over the given index.
The scan is inclusive of start_key and exclusive of end_key. It returns at
most max_rows from the index.
Args:
index: One of the index lists, eg self._sorted_by_tag.
start_key: The key to start at.
end_key: Optional end key.
max_rows: The maximum number of rows to yield.
Returns:
a list of up to 'max_rows' TaskQueueQueryTasksResponse_Task instances from
the given index, in sorted order.
"""
assert self._lock.locked()
start_pos = bisect.bisect_left(index, start_key)
end_pos = INF
if end_key is not None:
end_pos = bisect.bisect_left(index, end_key)
if max_rows is not None:
end_pos = min(end_pos, start_pos + max_rows)
end_pos = min(end_pos, len(index))
tasks = []
for pos in range(start_pos, end_pos):
tasks.append(index[pos][-1])
return tasks
def _LookupNoAcquireLock(self, maximum, name=None, eta=None, tag=None):
assert self._lock.locked()
if tag is not None:
return self._IndexScan(
self._sorted_by_tag,
start_key=(
tag,
eta,
name,
),
end_key=(
"%s\x00" % tag,
None,
None,
),
max_rows=maximum,
)
elif eta is not None:
return self._IndexScan(
self._sorted_by_eta,
start_key=(
eta,
name,
),
max_rows=maximum,
)
else:
return self._IndexScan(
self._sorted_by_name, start_key=(name,), max_rows=maximum
)
@_WithLock
def Count(self):
"""Returns the number of tasks in the store."""
return len(self._sorted_by_name)
@_WithLock
def OldestTask(self):
"""Returns the task with the oldest eta in the store."""
if self._sorted_by_eta:
return self._sorted_by_eta[0][2]
return None
@_WithLock
def Oldest(self):
"""Returns the oldest eta in the store, or None if no tasks."""
if self._sorted_by_eta:
return self._sorted_by_eta[0][0]
return None
def _LocateTaskByName(self, task_name):
"""Locate the index of a task in _sorted_by_name list.
If the task does not exist in the list, return None.
Args:
task_name: Name of task to be located.
Returns:
Index of the task in _sorted_by_name list if task exists,
None otherwise.
"""
assert self._lock.locked()
pos = bisect.bisect_left(self._sorted_by_name, (task_name,))
if (
pos >= len(self._sorted_by_name)
or self._sorted_by_name[pos][0] != task_name
):
return None
return pos
@_WithLock
def Add(self, request, now):
"""Inserts a new task into the store.
Args:
request: A taskqueue_service_pb.TaskQueueAddRequest.
now: A datetime.datetime object containing the current time in UTC.
Raises:
apiproxy_errors.ApplicationError: If a task with the same name is already
in the store, or the task is tombstoned.
"""
if self._LocateTaskByName(request.task_name()) is not None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS
)
if request.task_name() in self.task_name_archive:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK
)
now_sec = calendar.timegm(now.utctimetuple())
task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()
task.set_task_name(request.task_name())
task.set_eta_usec(request.eta_usec())
task.set_creation_time_usec(_SecToUsec(now_sec))
task.set_retry_count(0)
task.set_method(request.method())
if request.has_url():
task.set_url(request.url())
for keyvalue in request.header_list():
header = task.add_header()
header.set_key(keyvalue.key())
header.set_value(keyvalue.value())
if request.has_description():
task.set_description(request.description())
if request.has_body():
task.set_body(request.body())
if request.has_crontimetable():
task.mutable_crontimetable().set_schedule(
request.crontimetable().schedule()
)
task.mutable_crontimetable().set_timezone(
request.crontimetable().timezone()
)
if request.has_retry_parameters():
task.mutable_retry_parameters().CopyFrom(request.retry_parameters())
if request.has_tag():
task.set_tag(request.tag())
self._InsertTask(task)
@_WithLock
def Delete(self, name):
"""Deletes a task from the store by name.
Args:
name: the name of the task to delete.
Returns:
TaskQueueServiceError.UNKNOWN_TASK: if the task is unknown.
TaskQueueServiceError.INTERNAL_ERROR: if the store is corrupted.
TaskQueueServiceError.TOMBSTONED: if the task was deleted.
TaskQueueServiceError.OK: otherwise.
"""
return self._DeleteNoAcquireLock(name)
def _RemoveTaskFromIndex(self, index, index_tuple, task):
"""Remove a task from the specified index.
Args:
index: The index list that needs to be mutated.
index_tuple: The tuple to search for in the index.
task: The task instance that is expected to be stored at this location.
Returns:
True if the task was successfully removed from the index, False otherwise.
"""
assert self._lock.locked()
pos = bisect.bisect_left(index, index_tuple)
if index[pos][-1] is not task:
logging.debug("Expected %s, found %s", task, index[pos][-1])
return False
index.pop(pos)
return True
def _DeleteNoAcquireLock(self, name):
assert self._lock.locked()
pos = self._LocateTaskByName(name)
if pos is None:
if name in self.task_name_archive:
return taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK
else:
return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
old_task = self._sorted_by_name.pop(pos)[-1]
eta = old_task.eta_usec()
if not self._RemoveTaskFromIndex(
self._sorted_by_eta, (eta, name, None), old_task
):
return taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERRROR
if old_task.has_tag():
tag = old_task.tag()
if not self._RemoveTaskFromIndex(
self._sorted_by_tag, (tag, eta, name, None), old_task
):
return taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERRROR
return taskqueue_service_pb.TaskQueueServiceError.OK
@_WithLock
def Populate(self, num_tasks):
"""Populates the store with a number of tasks.
Args:
num_tasks: the number of tasks to insert.
"""
def RandomTask():
"""Creates a new task and randomly populates values."""
assert self._lock.locked()
task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()
task.set_task_name(
"".join(random.choice(string.ascii_lowercase) for x in range(20))
)
task.set_eta_usec(
now_usec + random.randint(_SecToUsec(-10), _SecToUsec(600))
)
task.set_creation_time_usec(
min(now_usec, task.eta_usec()) - random.randint(0, _SecToUsec(20))
)
task.set_url(random.choice(["/a", "/b", "/c", "/d"]))
if random.random() < 0.2:
task.set_method(
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.POST
)
task.set_body("A" * 2000)
else:
task.set_method(
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.GET
)
retry_count = max(0, random.randint(-10, 5))
task.set_retry_count(retry_count)
task.set_execution_count(retry_count)
if random.random() < 0.3:
random_headers = [
("nexus", "one"),
("foo", "bar"),
("content-type", "text/plain"),
("from", "user@email.com"),
]
for _ in range(random.randint(1, 4)):
elem = random.randint(0, len(random_headers) - 1)
key, value = random_headers.pop(elem)
header_proto = task.add_header()
header_proto.set_key(key)
header_proto.set_value(value)
return task
now_usec = _SecToUsec(time.time())
for _ in range(num_tasks):
self._InsertTask(RandomTask())
class _TaskExecutor(object):
"""Executor for a task object.
Converts a TaskQueueQueryTasksResponse_Task into a http request, then uses the
httplib library to send it to the http server.
"""
def __init__(self, default_host, request_data):
"""Constructor.
Args:
default_host: a string to use as the host/port to connect to if the host
header is not specified in the task.
request_data: A request_info.RequestInfo instance used to look up state
associated with the request that generated an API call.
"""
self._default_host = default_host
self._request_data = request_data
def _HeadersFromTask(self, task, queue):
"""Constructs the http headers for the given task.
This function will remove special headers (values in BUILT_IN_HEADERS) and
add the taskqueue headers.
Args:
task: The task, a TaskQueueQueryTasksResponse_Task instance.
queue: The queue that this task belongs to, an _Queue instance.
Returns:
A tuple of (header_dict, headers), where:
header_dict: A mapping from lowercase header name to a list of values.
headers: a list of tuples containing the http header and value. There
may be be mutiple entries with the same key.
"""
headers = []
header_dict = {}
for header in task.header_list():
header_key_lower = header.key().lower()
if header_key_lower not in BUILT_IN_HEADERS:
headers.append((header.key(), header.value()))
header_dict.setdefault(header_key_lower, []).append(header.value())
headers.append(("X-AppEngine-QueueName", queue.queue_name))
headers.append(("X-AppEngine-TaskName", task.task_name()))
headers.append(("X-AppEngine-TaskRetryCount", str(task.retry_count())))
headers.append(("X-AppEngine-TaskETA", str(_UsecToSec(task.eta_usec()))))
headers.append(("X-AppEngine-Fake-Is-Admin", "1"))
headers.append(("Content-Length", str(len(task.body()))))
if "content-type" not in header_dict:
headers.append(("Content-Type", "application/octet-stream"))
headers.append(("X-AppEngine-TaskExecutionCount", str(task.execution_count())))
if task.has_runlog() and task.runlog().has_response_code():
headers.append(
("X-AppEngine-TaskPreviousResponse", str(task.runlog().response_code()))
)
return header_dict, headers
def ExecuteTask(self, task, queue):
"""Construct a http request from the task and dispatch it.
Args:
task: The task to convert to a http request and then send. An instance of
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task
queue: The queue that this task belongs to. An instance of _Queue.
Returns:
Http Response code from the task's execution, 0 if an exception occurred.
"""
method = task.RequestMethod_Name(task.method())
header_dict, headers = self._HeadersFromTask(task, queue)
(connection_host,) = header_dict.get("host", [self._default_host])
if connection_host is None:
logging.error(
'Could not determine where to send the task "%s" '
'(Url: "%s") in queue "%s". Treating as an error.',
task.task_name(),
task.url(),
queue.queue_name,
)
return False
else:
header_dict["Host"] = connection_host
dispatcher = self._request_data.get_dispatcher()
try:
response = dispatcher.add_request(
method,
task.url(),
headers,
task.body() if task.has_body() else "",
"0.1.0.2",
)
except request_info.ServerDoesNotExistError:
logging.exception("Failed to dispatch task")
return 0
return int(response.status.split(" ", 1)[0])
class _BackgroundTaskScheduler(object):
"""The task scheduler class.
This class is designed to be run in a background thread.
Note: There must not be more than one instance of _BackgroundTaskScheduler per
group.
"""
def __init__(self, group, task_executor, retry_seconds, **kwargs):
"""Constructor.
Args:
group: The group that we will automatically execute tasks from. Must be an
instance of _Group.
task_executor: The class used to convert a task into a http request. Must
be an instance of _TaskExecutor.
retry_seconds: The number of seconds to delay a task by if its execution
fails.
_get_time: a callable that returns the current time in seconds since the
epoch. This argument may only be passed in by keyword. If unset, use
time.time.
"""
self._group = group
self._should_exit = False
self._next_wakeup = INF
self._event = threading.Event()
self._wakeup_lock = threading.Lock()
self.task_executor = task_executor
self.default_retry_seconds = retry_seconds
self._get_time = kwargs.pop("_get_time", time.time)
if kwargs:
raise TypeError("Unknown parameters: %s" % ", ".join(kwargs))
def UpdateNextEventTime(self, next_event_time):
"""Notify the TaskExecutor of the closest event it needs to process.
Args:
next_event_time: The time of the event in seconds since the epoch.
"""
with self._wakeup_lock:
if next_event_time < self._next_wakeup:
self._next_wakeup = next_event_time
self._event.set()
def Shutdown(self):
"""Request this TaskExecutor to exit."""
self._should_exit = True
self._event.set()
def _ProcessQueues(self):
with self._wakeup_lock:
self._next_wakeup = INF
now = self._get_time()
queue, task = self._group.GetNextPushTask()
while task and _UsecToSec(task.eta_usec()) <= now:
if task.retry_count() == 0:
task.set_first_try_usec(_SecToUsec(now))
response_code = self.task_executor.ExecuteTask(task, queue)
if response_code:
task.mutable_runlog().set_response_code(response_code)
else:
logging.error(
'An error occured while sending the task "%s" '
'(Url: "%s") in queue "%s". Treating as a task error.',
task.task_name(),
task.url(),
queue.queue_name,
)
now = self._get_time()
if 200 <= response_code < 300:
queue.Delete(task.task_name())
else:
retry = Retry(task, queue)
age_usec = _SecToUsec(now) - task.first_try_usec()
if retry.CanRetry(task.retry_count() + 1, age_usec):
retry_usec = retry.CalculateBackoffUsec(task.retry_count() + 1)
logging.warning(
"Task %s failed to execute. This task will retry in %.3f seconds",
task.task_name(),
_UsecToSec(retry_usec),
)
queue.PostponeTask(task, _SecToUsec(now) + retry_usec)
else:
logging.warning(
"Task %s failed to execute. The task has no remaining retries. "
"Failing permanently after %d retries and %d seconds",
task.task_name(),
task.retry_count(),
_UsecToSec(age_usec),
)
queue.Delete(task.task_name())
queue, task = self._group.GetNextPushTask()
if task:
with self._wakeup_lock:
eta = _UsecToSec(task.eta_usec())
if eta < self._next_wakeup:
self._next_wakeup = eta
def _Wait(self):
"""Block until we need to process a task or we need to exit."""
now = self._get_time()
while not self._should_exit and self._next_wakeup > now:
timeout = self._next_wakeup - now
self._event.wait(timeout)
self._event.clear()
now = self._get_time()
def MainLoop(self):
"""The main loop of the scheduler."""
while not self._should_exit:
self._ProcessQueues()
self._Wait()
class TaskQueueServiceStub(apiproxy_stub.APIProxyStub):
"""Python only task queue service stub.
This stub executes tasks when enabled by using the dev_appserver's AddEvent
capability. When task running is disabled this stub will store tasks for
display on a console, where the user may manually execute the tasks.
"""
def __init__(
self,
service_name="taskqueue",
root_path=None,
auto_task_running=False,
task_retry_seconds=30,
_all_queues_valid=False,
default_http_server=None,
_testing_validate_state=False,
request_data=None,
):
"""Constructor.
Args:
service_name: Service name expected for all calls.
root_path: Root path to the directory of the application which may contain
a queue.yaml file. If None, then it's assumed no queue.yaml file is
available.
auto_task_running: When True, the dev_appserver should automatically
run tasks after they are enqueued.
task_retry_seconds: How long to wait between task executions after a
task fails.
_testing_validate_state: Should this stub and all of its _Groups (and
thus and all of its _Queues) validate their state after each
operation? This should only be used during testing of the
taskqueue_stub.
request_data: A request_info.RequestInfo instance used to look up state
associated with the request that generated an API call.
"""
super(TaskQueueServiceStub, self).__init__(
service_name, max_request_size=MAX_REQUEST_SIZE, request_data=request_data
)
self._queues = {}
self._all_queues_valid = _all_queues_valid
self._root_path = root_path
self._testing_validate_state = _testing_validate_state
self._queues[None] = _Group(
self._ParseQueueYaml,
app_id=None,
_all_queues_valid=_all_queues_valid,
_update_newest_eta=self._UpdateNextEventTime,
_testing_validate_state=self._testing_validate_state,
)
self._auto_task_running = auto_task_running
self._started = False
self._task_scheduler = _BackgroundTaskScheduler(
self._queues[None],
_TaskExecutor(default_http_server, self.request_data),
retry_seconds=task_retry_seconds,
)
self._yaml_last_modified = None
def StartBackgroundExecution(self):
"""Start automatic task execution."""
if not self._started and self._auto_task_running:
task_scheduler_thread = threading.Thread(
target=self._task_scheduler.MainLoop
)
task_scheduler_thread.setDaemon(True)
task_scheduler_thread.start()
self._started = True
def Shutdown(self):
"""Requests the task scheduler to shutdown."""
self._task_scheduler.Shutdown()
def _ParseQueueYaml(self):
"""Loads the queue.yaml file and parses it.
Returns:
None if queue.yaml doesn't exist, otherwise a queueinfo.QueueEntry object
populated from the queue.yaml.
"""
if hasattr(self, "queue_yaml_parser"):
return self.queue_yaml_parser(self._root_path)
if self._root_path is None:
return None
for queueyaml in ("queue.yaml", "queue.yml"):
try:
path = os.path.join(self._root_path, queueyaml)
modified = os.stat(path).st_mtime
if self._yaml_last_modified and self._yaml_last_modified == modified:
return self._last_queue_info
fh = open(path, "r")
except (IOError, OSError):
continue
try:
queue_info = queueinfo.LoadSingleQueue(fh)
self._last_queue_info = queue_info
self._yaml_last_modified = modified
return queue_info
finally:
fh.close()
return None
def _UpdateNextEventTime(self, callback_time):
"""Enqueue a task to be automatically scheduled.
Note: If auto task running is disabled, this function is a no-op.
Args:
callback_time: The earliest time this task may be run, in seconds since
the epoch.
"""
self._task_scheduler.UpdateNextEventTime(callback_time)
def _GetGroup(self, app_id=None):
"""Get the _Group instance for app_id, creating a new one if needed.
Args:
app_id: The app id in question. Note: This field is not validated.
"""
if app_id not in self._queues:
self._queues[app_id] = _Group(
app_id=app_id,
_all_queues_valid=self._all_queues_valid,
_testing_validate_state=self._testing_validate_state,
)
return self._queues[app_id]
def _Dynamic_Add(self, request, response):
"""Add a single task to a queue.
This method is a wrapper around the BulkAdd RPC request.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: The taskqueue_service_pb.TaskQueueAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueAddResponse. See
taskqueue_service.proto.
"""
bulk_request = taskqueue_service_pb.TaskQueueBulkAddRequest()
bulk_response = taskqueue_service_pb.TaskQueueBulkAddResponse()
bulk_request.add_add_request().CopyFrom(request)
self._Dynamic_BulkAdd(bulk_request, bulk_response)
assert bulk_response.taskresult_size() == 1
result = bulk_response.taskresult(0).result()
if result != taskqueue_service_pb.TaskQueueServiceError.OK:
raise apiproxy_errors.ApplicationError(result)
elif bulk_response.taskresult(0).has_chosen_task_name():
response.set_chosen_task_name(
bulk_response.taskresult(0).chosen_task_name()
)
def _Dynamic_BulkAdd(self, request, response):
"""Add many tasks to a queue using a single request.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse. See
taskqueue_service.proto.
"""
assert request.add_request_size(), "taskqueue should prevent empty requests"
self._GetGroup(_GetAppId(request.add_request(0))).BulkAdd_Rpc(request, response)
def GetQueues(self):
"""Gets all the application's queues.
Returns:
A list of dictionaries, where each dictionary contains one queue's
attributes. E.g.:
[{'name': 'some-queue',
'max_rate': '1/s',
'bucket_size': 5,
'oldest_task': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'tasks_in_queue': 12}, ...]
The list of queues always includes the default queue.
"""
return self._GetGroup().GetQueuesAsDicts()
def GetTasks(self, queue_name):
"""Gets a queue's tasks.
Args:
queue_name: Queue's name to return tasks for.
Returns:
A list of dictionaries, where each dictionary contains one task's
attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskRetryCount': '0'),
('X-AppEngine-TaskETA': '1234567890.123456'),
('X-AppEngine-Development-Payload': '1'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
KeyError: An invalid queue name was specified.
"""
return self._GetGroup().GetQueue(queue_name).GetTasksAsDicts()
def DeleteTask(self, queue_name, task_name):
"""Deletes a task from a queue, without leaving a tombstone.
Args:
queue_name: the name of the queue to delete the task from.
task_name: the name of the task to delete.
"""
if self._GetGroup().HasQueue(queue_name):
queue = self._GetGroup().GetQueue(queue_name)
queue.Delete(task_name)
queue.task_name_archive.discard(task_name)
def FlushQueue(self, queue_name):
"""Removes all tasks from a queue, without leaving tombstones.
Args:
queue_name: the name of the queue to remove tasks from.
"""
if self._GetGroup().HasQueue(queue_name):
self._GetGroup().GetQueue(queue_name).PurgeQueue()
self._GetGroup().GetQueue(queue_name).task_name_archive.clear()
def _Dynamic_UpdateQueue(self, request, unused_response):
"""Local implementation of the UpdateQueue RPC in TaskQueueService.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
unused_response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
Not used.
"""
self._GetGroup(_GetAppId(request)).UpdateQueue_Rpc(request, unused_response)
def _Dynamic_FetchQueues(self, request, response):
"""Local implementation of the FetchQueues RPC in TaskQueueService.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
self._GetGroup(_GetAppId(request)).FetchQueues_Rpc(request, response)
def _Dynamic_FetchQueueStats(self, request, response):
"""Local 'random' implementation of the TaskQueueService.FetchQueueStats.
This implementation loads some stats from the task store, the rest with
random numbers.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueueStatsRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueueStatsResponse.
"""
self._GetGroup(_GetAppId(request)).FetchQueueStats_Rpc(request, response)
def _Dynamic_QueryTasks(self, request, response):
"""Local implementation of the TaskQueueService.QueryTasks RPC.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
self._GetGroup(_GetAppId(request)).QueryTasks_Rpc(request, response)
def _Dynamic_FetchTask(self, request, response):
"""Local implementation of the TaskQueueService.FetchTask RPC.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchTaskRequest.
response: A taskqueue_service_pb.TaskQueueFetchTaskResponse.
"""
self._GetGroup(_GetAppId(request)).FetchTask_Rpc(request, response)
def _Dynamic_Delete(self, request, response):
"""Local delete implementation of TaskQueueService.Delete.
Deletes tasks from the task store. A 1/20 chance of a transient error.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
self._GetGroup(_GetAppId(request)).Delete_Rpc(request, response)
def _Dynamic_ForceRun(self, request, response):
"""Local force run implementation of TaskQueueService.ForceRun.
Forces running of a task in a queue. This will fail randomly for testing if
the app id is non-empty.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueForceRunRequest.
response: A taskqueue_service_pb.TaskQueueForceRunResponse.
"""
if _GetAppId(request) is not None:
if random.random() <= 0.05:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR
)
elif random.random() <= 0.052:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR
)
else:
response.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
else:
group = self._GetGroup(None)
if not group.HasQueue(request.queue_name()):
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE
)
return
queue = group.GetQueue(request.queue_name())
task = queue.Lookup(1, name=request.task_name())
if not task:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
)
return
queue.RunTaskNow(task[0])
self._UpdateNextEventTime(0)
response.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
def _Dynamic_DeleteQueue(self, request, response):
"""Local delete implementation of TaskQueueService.DeleteQueue.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteQueueRequest.
response: A taskqueue_service_pb.TaskQueueDeleteQueueResponse.
"""
app_id = _GetAppId(request)
if app_id is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED
)
self._GetGroup(app_id).DeleteQueue_Rpc(request, response)
def _Dynamic_PauseQueue(self, request, response):
"""Local pause implementation of TaskQueueService.PauseQueue.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePauseQueueRequest.
response: A taskqueue_service_pb.TaskQueuePauseQueueResponse.
"""
app_id = _GetAppId(request)
if app_id is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED
)
self._GetGroup(app_id).PauseQueue_Rpc(request, response)
def _Dynamic_PurgeQueue(self, request, response):
"""Local purge implementation of TaskQueueService.PurgeQueue.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePurgeQueueRequest.
response: A taskqueue_service_pb.TaskQueuePurgeQueueResponse.
"""
self._GetGroup(_GetAppId(request)).PurgeQueue_Rpc(request, response)
def _Dynamic_DeleteGroup(self, request, response):
"""Local delete implementation of TaskQueueService.DeleteGroup.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteGroupRequest.
response: A taskqueue_service_pb.TaskQueueDeleteGroupResponse.
"""
app_id = _GetAppId(request)
if app_id is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED
)
if app_id in self._queues:
del self._queues[app_id]
else:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE
)
def _Dynamic_UpdateStorageLimit(self, request, response):
"""Local implementation of TaskQueueService.UpdateStorageLimit.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateStorageLimitRequest.
response: A taskqueue_service_pb.TaskQueueUpdateStorageLimitResponse.
"""
if _GetAppId(request) is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED
)
if request.limit() < 0 or request.limit() > 1000 * (1024**4):
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST
)
response.set_new_limit(request.limit())
def _Dynamic_QueryAndOwnTasks(self, request, response):
"""Local implementation of TaskQueueService.QueryAndOwnTasks.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
Raises:
InvalidQueueModeError: If target queue is not a pull queue.
"""
self._GetGroup().QueryAndOwnTasks_Rpc(request, response)
def _Dynamic_ModifyTaskLease(self, request, response):
"""Local implementation of TaskQueueService.ModifyTaskLease.
Args:
request: A taskqueue_service_pb.TaskQueueModifyTaskLeaseRequest.
response: A taskqueue_service_pb.TaskQueueModifyTaskLeaseResponse.
Raises:
InvalidQueueModeError: If target queue is not a pull queue.
"""
self._GetGroup().ModifyTaskLease_Rpc(request, response)
def get_filtered_tasks(self, url=None, name=None, queue_names=None):
"""Get the tasks in the task queue with filters.
Args:
url: A URL that all returned tasks should point at.
name: The name of all returned tasks.
queue_names: A list of queue names to retrieve tasks from. If left blank
this will get default to all queues available.
Returns:
A list of taskqueue.Task objects.
"""
all_queue_names = [queue["name"] for queue in self.GetQueues()]
if isinstance(queue_names, str):
queue_names = [queue_names]
if queue_names is None:
queue_names = all_queue_names
task_dicts = []
for queue_name in queue_names:
if queue_name in all_queue_names:
for task in self.GetTasks(queue_name):
if url is not None and task["url"] != url:
continue
if name is not None and task["name"] != name:
continue
task_dicts.append(task)
tasks = []
for task in task_dicts:
payload = base64.b64decode(task["body"])
headers = dict(task["headers"])
headers["Content-Length"] = str(len(payload))
eta = datetime.datetime.strptime(task["eta"], "%Y/%m/%d %H:%M:%S")
eta = eta.replace(tzinfo=taskqueue._UTC)
task_object = taskqueue.Task(
name=task["name"],
method=task["method"],
url=task["url"],
headers=headers,
payload=payload,
eta=eta,
)
tasks.append(task_object)
return tasks
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of scheduling for Groc format schedules.
A Groc schedule looks like '1st,2nd monday 9:00', or 'every 20 mins'. This
module takes a parsed schedule (produced by Antlr) and creates objects that
can produce times that match this schedule.
A parsed schedule is one of two types - an Interval or a Specific Time.
See the class docstrings for more.
Extensions to be considered:
allowing a comma separated list of times to run
"""
import calendar
import datetime
try:
import pytz
except ImportError:
pytz = None
import groc
HOURS = "hours"
MINUTES = "minutes"
try:
from pytz import NonExistentTimeError
from pytz import AmbiguousTimeError
except ImportError:
class NonExistentTimeError(Exception):
pass
class AmbiguousTimeError(Exception):
pass
def GrocTimeSpecification(schedule, timezone=None):
"""Factory function.
Turns a schedule specification into a TimeSpecification.
Arguments:
schedule: the schedule specification, as a string
timezone: the optional timezone as a string for this specification.
Defaults to 'UTC' - valid entries are things like 'Australia/Victoria'
or 'PST8PDT'.
Returns:
a TimeSpecification instance
"""
parser = groc.CreateParser(schedule)
parser.timespec()
if parser.period_string:
return IntervalTimeSpecification(
parser.interval_mins,
parser.period_string,
parser.synchronized,
parser.start_time_string,
parser.end_time_string,
timezone,
)
else:
return SpecificTimeSpecification(
parser.ordinal_set,
parser.weekday_set,
parser.month_set,
parser.monthday_set,
parser.time_string,
timezone,
)
class TimeSpecification(object):
"""Base class for time specifications."""
def GetMatches(self, start, n):
"""Returns the next n times that match the schedule, starting at time start.
Arguments:
start: a datetime to start from. Matches will start from after this time.
n: the number of matching times to return
Returns:
a list of n datetime objects
"""
out = []
for _ in range(n):
start = self.GetMatch(start)
out.append(start)
return out
def GetMatch(self, start):
"""Returns the next match after time start.
Must be implemented in subclasses.
Arguments:
start: a datetime to start from. Matches will start from after this time.
This may be in any pytz time zone, or it may be timezone-naive
(interpreted as UTC).
Returns:
a datetime object in the timezone of the input 'start'
"""
raise NotImplementedError
def _GetTimezone(timezone_string):
"""Converts a timezone string to a pytz timezone object.
Arguments:
timezone_string: a string representing a timezone, or None
Returns:
a pytz timezone object, or None if the input timezone_string is None
Raises:
ValueError: if timezone_string is not None and the pytz module could not be
loaded
"""
if timezone_string:
if pytz is None:
raise ValueError("need pytz in order to specify a timezone")
return pytz.timezone(timezone_string)
else:
return None
def _ToTimeZone(t, tzinfo):
"""Converts 't' to the time zone 'tzinfo'.
Arguments:
t: a datetime object. It may be in any pytz time zone, or it may be
timezone-naive (interpreted as UTC).
tzinfo: a pytz timezone object, or None (interpreted as UTC).
Returns:
a datetime object in the time zone 'tzinfo'
"""
if pytz is None:
return t.replace(tzinfo=tzinfo)
elif tzinfo:
if not t.tzinfo:
t = pytz.utc.localize(t)
return tzinfo.normalize(t.astimezone(tzinfo))
elif t.tzinfo:
return pytz.utc.normalize(t.astimezone(pytz.utc)).replace(tzinfo=None)
else:
return t
def _GetTime(time_string):
"""Converts a string to a datetime.time object.
Arguments:
time_string: a string representing a time ('hours:minutes')
Returns:
a datetime.time object
"""
hourstr, minutestr = time_string.split(":")
return datetime.time(int(hourstr), int(minutestr))
class IntervalTimeSpecification(TimeSpecification):
"""A time specification for a given interval.
An Interval type spec runs at the given fixed interval. It has the following
attributes:
period - the type of interval, either 'hours' or 'minutes'
interval - the number of units of type period.
synchronized - whether to synchronize the times to be locked to a fixed
period (midnight in the specified timezone).
start_time, end_time - restrict matches to a given range of times every day.
If these are None, there is no restriction. Otherwise, they are
datetime.time objects.
timezone - the time zone in which start_time and end_time should be
interpreted, or None (defaults to UTC). This is a pytz timezone object.
"""
def __init__(
self,
interval,
period,
synchronized=False,
start_time_string="",
end_time_string="",
timezone=None,
):
super(IntervalTimeSpecification, self).__init__()
if interval < 1:
raise groc.GrocException("interval must be greater than zero")
self.interval = interval
self.period = period
self.synchronized = synchronized
if self.period == HOURS:
self.seconds = self.interval * 3600
else:
self.seconds = self.interval * 60
self.timezone = _GetTimezone(timezone)
if self.synchronized:
if start_time_string:
raise ValueError(
"start_time_string may not be specified if synchronized is true"
)
if end_time_string:
raise ValueError(
"end_time_string may not be specified if synchronized is true"
)
if (self.seconds > 86400) or ((86400 % self.seconds) != 0):
raise groc.GrocException(
"can only use synchronized for periods that"
" divide evenly into 24 hours"
)
self.start_time = datetime.time(0, 0).replace(tzinfo=self.timezone)
self.end_time = datetime.time(23, 59).replace(tzinfo=self.timezone)
elif start_time_string:
if not end_time_string:
raise ValueError(
"end_time_string must be specified if start_time_string is"
)
self.start_time = _GetTime(start_time_string).replace(tzinfo=self.timezone)
self.end_time = _GetTime(end_time_string).replace(tzinfo=self.timezone)
else:
if end_time_string:
raise ValueError(
"start_time_string must be specified if end_time_string is"
)
self.start_time = None
self.end_time = None
def GetMatch(self, start):
"""Returns the next match after 'start'.
Arguments:
start: a datetime to start from. Matches will start from after this time.
This may be in any pytz time zone, or it may be timezone-naive
(interpreted as UTC).
Returns:
a datetime object in the timezone of the input 'start'
"""
if self.start_time is None:
return start + datetime.timedelta(seconds=self.seconds)
t = _ToTimeZone(start, self.timezone)
start_time = self._GetPreviousDateTime(t, self.start_time)
t_delta = t - start_time
t_delta_seconds = t_delta.days * 60 * 24 + t_delta.seconds
num_intervals = (t_delta_seconds + self.seconds) / self.seconds
interval_time = start_time + datetime.timedelta(
seconds=(num_intervals * self.seconds)
)
if self.timezone:
interval_time = self.timezone.normalize(interval_time)
next_start_time = self._GetNextDateTime(t, self.start_time)
if (
self._TimeIsInRange(t)
and self._TimeIsInRange(interval_time)
and interval_time < next_start_time
):
result = interval_time
else:
result = next_start_time
return _ToTimeZone(result, start.tzinfo)
def _TimeIsInRange(self, t):
"""Returns true if 't' falls between start_time and end_time, inclusive.
Arguments:
t: a datetime object, in self.timezone
Returns:
a boolean
"""
previous_start_time = self._GetPreviousDateTime(t, self.start_time)
previous_end_time = self._GetPreviousDateTime(t, self.end_time)
if previous_start_time > previous_end_time:
return True
else:
return t == previous_end_time
@staticmethod
def _GetPreviousDateTime(t, target_time):
"""Returns the latest datetime <= 't' that has the time target_time.
Arguments:
t: a datetime.datetime object, in self.timezone
target_time: a datetime.time object, in self.timezone
Returns:
a datetime.datetime object, in self.timezone
"""
date = t.date()
while True:
result = IntervalTimeSpecification._CombineDateAndTime(date, target_time)
if result <= t:
return result
date -= datetime.timedelta(days=1)
@staticmethod
def _GetNextDateTime(t, target_time):
"""Returns the earliest datetime > 't' that has the time target_time.
Arguments:
t: a datetime.datetime object, in self.timezone
target_time: a time object, in self.timezone
Returns:
a datetime.datetime object, in self.timezone
"""
date = t.date()
while True:
result = IntervalTimeSpecification._CombineDateAndTime(date, target_time)
if result > t:
return result
date += datetime.timedelta(days=1)
@staticmethod
def _CombineDateAndTime(date, time):
"""Creates a datetime object from date and time objects.
This is similar to the datetime.combine method, but its timezone
calculations are designed to work with pytz.
Arguments:
date: a datetime.date object, in any timezone
time: a datetime.time object, in any timezone
Returns:
a datetime.datetime object, in the timezone of the input 'time'
"""
if time.tzinfo:
naive_result = datetime.datetime(
date.year, date.month, date.day, time.hour, time.minute, time.second
)
try:
return time.tzinfo.localize(naive_result, is_dst=None)
except AmbiguousTimeError:
return min(
time.tzinfo.localize(naive_result, is_dst=True),
time.tzinfo.localize(naive_result, is_dst=False),
)
except NonExistentTimeError:
while True:
naive_result += datetime.timedelta(minutes=1)
try:
return time.tzinfo.localize(naive_result, is_dst=None)
except NonExistentTimeError:
pass
else:
return datetime.datetime.combine(date, time)
class SpecificTimeSpecification(TimeSpecification):
"""Specific time specification.
A Specific interval is more complex, but defines a certain time to run and
the days that it should run. It has the following attributes:
time - the time of day to run, as 'HH:MM'
ordinals - first, second, third &c, as a set of integers in 1..5
months - the months that this should run, as a set of integers in 1..12
weekdays - the days of the week that this should run, as a set of integers,
0=Sunday, 6=Saturday
timezone - the optional timezone as a string for this specification.
Defaults to UTC - valid entries are things like Australia/Victoria
or PST8PDT.
A specific time schedule can be quite complex. A schedule could look like
this:
'1st,third sat,sun of jan,feb,mar 09:15'
In this case, ordinals would be {1,3}, weekdays {0,6}, months {1,2,3} and
time would be '09:15'.
"""
def __init__(
self,
ordinals=None,
weekdays=None,
months=None,
monthdays=None,
timestr="00:00",
timezone=None,
):
super(SpecificTimeSpecification, self).__init__()
if weekdays and monthdays:
raise ValueError("cannot supply both monthdays and weekdays")
if ordinals is None:
self.ordinals = set(range(1, 6))
else:
self.ordinals = set(ordinals)
if self.ordinals and (min(self.ordinals) < 1 or max(self.ordinals) > 5):
raise ValueError(
"ordinals must be between 1 and 5 inclusive, " "got %r" % ordinals
)
if weekdays is None:
self.weekdays = set(range(7))
else:
self.weekdays = set(weekdays)
if self.weekdays and (min(self.weekdays) < 0 or max(self.weekdays) > 6):
raise ValueError(
"weekdays must be between "
"0 (sun) and 6 (sat) inclusive, "
"got %r" % weekdays
)
if months is None:
self.months = set(range(1, 13))
else:
self.months = set(months)
if self.months and (min(self.months) < 1 or max(self.months) > 12):
raise ValueError(
"months must be between "
"1 (jan) and 12 (dec) inclusive, "
"got %r" % months
)
if not monthdays:
self.monthdays = set()
else:
if min(monthdays) < 1:
raise ValueError("day of month must be greater than 0")
if max(monthdays) > 31:
raise ValueError("day of month must be less than 32")
if self.months:
for month in self.months:
_, ndays = calendar.monthrange(4, month)
if min(monthdays) <= ndays:
break
else:
raise ValueError(
"invalid day of month, "
"got day %r of month %r" % (max(monthdays), month)
)
self.monthdays = set(monthdays)
self.time = _GetTime(timestr)
self.timezone = _GetTimezone(timezone)
def _MatchingDays(self, year, month):
"""Returns matching days for the given year and month.
For the given year and month, return the days that match this instance's
day specification, based on either (a) the ordinals and weekdays, or
(b) the explicitly specified monthdays. If monthdays are specified,
dates that fall outside the range of the month will not be returned.
Arguments:
year: the year as an integer
month: the month as an integer, in range 1-12
Returns:
a list of matching days, as ints in range 1-31
"""
start_day, last_day = calendar.monthrange(year, month)
if self.monthdays:
return sorted([day for day in self.monthdays if day <= last_day])
out_days = []
start_day = (start_day + 1) % 7
for ordinal in self.ordinals:
for weekday in self.weekdays:
day = ((weekday - start_day) % 7) + 1
day += 7 * (ordinal - 1)
if day <= last_day:
out_days.append(day)
return sorted(out_days)
def _NextMonthGenerator(self, start, matches):
"""Creates a generator that produces results from the set 'matches'.
Matches must be >= 'start'. If none match, the wrap counter is incremented,
and the result set is reset to the full set. Yields a 2-tuple of (match,
wrapcount).
Arguments:
start: first set of matches will be >= this value (an int)
matches: the set of potential matches (a sequence of ints)
Yields:
a two-tuple of (match, wrap counter). match is an int in range (1-12),
wrapcount is a int indicating how many times we've wrapped around.
"""
potential = matches = sorted(matches)
after = start - 1
wrapcount = 0
while True:
potential = [x for x in potential if x > after]
if not potential:
wrapcount += 1
potential = matches
after = potential[0]
yield (after, wrapcount)
def GetMatch(self, start):
"""Returns the next match after time start.
Must be implemented in subclasses.
Arguments:
start: a datetime to start from. Matches will start from after this time.
This may be in any pytz time zone, or it may be timezone-naive
(interpreted as UTC).
Returns:
a datetime object in the timezone of the input 'start'
"""
start_time = _ToTimeZone(start, self.timezone).replace(tzinfo=None)
if self.months:
months = self._NextMonthGenerator(start_time.month, self.months)
while True:
month, yearwraps = next(months)
candidate_month = start_time.replace(
day=1, month=month, year=start_time.year + yearwraps
)
day_matches = self._MatchingDays(candidate_month.year, month)
if (candidate_month.year, candidate_month.month) == (
start_time.year,
start_time.month,
):
day_matches = [x for x in day_matches if x >= start_time.day]
while (
day_matches
and day_matches[0] == start_time.day
and start_time.time() >= self.time
):
day_matches.pop(0)
while day_matches:
out = candidate_month.replace(
day=day_matches[0],
hour=self.time.hour,
minute=self.time.minute,
second=0,
microsecond=0,
)
if self.timezone and pytz is not None:
try:
out = self.timezone.localize(out, is_dst=None)
except AmbiguousTimeError:
out = self.timezone.localize(out)
except NonExistentTimeError:
for _ in range(24):
out += datetime.timedelta(minutes=60)
try:
out = self.timezone.localize(out)
except NonExistentTimeError:
continue
break
return _ToTimeZone(out, start.tzinfo)
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module supports asynchronous I/O on multiple file descriptors."""
from google.appengine.api.remote_socket._remote_socket import select, error
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Repository for all builtin handlers information.
On initialization, this file generates a list of builtin handlers that have
associated app.yaml information. This file can then be called to read that
information and make it available.
"""
import logging
import os
DEFAULT_DIR = os.path.join(os.path.dirname(__file__))
_handler_dir = None
_available_builtins = None
# BUILTINS_NOT_AVAIABLE_IN_PYTHON27 = set(['datastore_admin', 'mapreduce'])
INCLUDE_FILENAME_TEMPLATE = "include-%s.yaml"
DEFAULT_INCLUDE_FILENAME = "include.yaml"
class InvalidBuiltinName(Exception):
"""Raised whenever a builtin handler name is specified that is not found."""
def reset_builtins_dir():
"""Public method for resetting builtins directory to default."""
set_builtins_dir(DEFAULT_DIR)
def set_builtins_dir(path):
"""Sets the appropriate path for testing and reinitializes the module."""
global _handler_dir, _available_builtins
_handler_dir = path
_available_builtins = []
_initialize_builtins()
def _initialize_builtins():
"""Scan the immediate subdirectories of the builtins module.
Encountered subdirectories with an app.yaml file are added to
AVAILABLE_BUILTINS.
"""
for filename in os.listdir(_handler_dir):
if os.path.isfile(_get_yaml_path(filename, "")):
_available_builtins.append(filename)
def _get_yaml_path(builtin_name, runtime):
"""Return expected path to a builtin handler's yaml file without error check."""
runtime_specific = os.path.join(
_handler_dir, builtin_name, INCLUDE_FILENAME_TEMPLATE % runtime
)
if runtime and os.path.exists(runtime_specific):
return runtime_specific
return os.path.join(_handler_dir, builtin_name, DEFAULT_INCLUDE_FILENAME)
def get_yaml_path(builtin_name, runtime=""):
"""Returns the full path to a yaml file by giving the builtin module's name.
Args:
builtin_name: single word name of builtin handler
runtime: name of the runtime
Raises:
ValueError: if handler does not exist in expected directory
Returns:
the absolute path to a valid builtin handler include.yaml file
"""
if _handler_dir is None:
set_builtins_dir(DEFAULT_DIR)
available_builtins = set(_available_builtins)
# if runtime == 'python27':
# available_builtins = available_builtins - BUILTINS_NOT_AVAIABLE_IN_PYTHON27
if builtin_name not in available_builtins:
raise InvalidBuiltinName(
"%s is not the name of a valid builtin.\n"
"Available handlers are: %s"
% (builtin_name, ", ".join(sorted(available_builtins)))
)
return _get_yaml_path(builtin_name, runtime)
def get_yaml_basepath():
"""Returns the full path of the directory in which builtins are located."""
if _handler_dir is None:
set_builtins_dir(DEFAULT_DIR)
return _handler_dir
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple, schema-based database abstraction layer for the datastore.
Modeled after Django's abstraction layer on top of SQL databases,
http://www.djangoproject.com/documentation/mode_api/. Ours is a little simpler
and a lot less code because the datastore is so much simpler than SQL
databases.
The programming model is to declare Python subclasses of the Model class,
declaring datastore properties as class members of that class. So if you want to
publish a story with title, body, and created date, you would do it like this:
class Story(db.Model):
title = db.StringProperty()
body = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
You can create a new Story in the datastore with this usage pattern:
story = Story(title='My title')
story.body = 'My body'
story.put()
You query for Story entities using built in query interfaces that map directly
to the syntax and semantics of the datastore:
stories = Story.all().filter('date >=', yesterday).order('-date')
for story in stories:
print story.title
The Property declarations enforce types by performing validation on assignment.
For example, the DateTimeProperty enforces that you assign valid datetime
objects, and if you supply the "required" option for a property, you will not
be able to assign None to that property.
We also support references between models, so if a story has comments, you
would represent it like this:
class Comment(db.Model):
story = db.ReferenceProperty(Story)
body = db.TextProperty()
When you get a story out of the datastore, the story reference is resolved
automatically the first time it is referenced, which makes it easy to use
model instances without performing additional queries by hand:
comment = Comment.get(key)
print comment.story.title
Likewise, you can access the set of comments that refer to each story through
this property through a reverse reference called comment_set, which is a Query
preconfigured to return all matching comments:
story = Story.get(key)
for comment in story.comment_set:
print comment.body
"""
import copy
import datetime
import logging
import re
import time
import urllib.parse
import warnings
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import namespace_manager
from google.appengine.api import users
from google.appengine.datastore import datastore_rpc
from google.appengine.datastore import datastore_query
Error = datastore_errors.Error
BadValueError = datastore_errors.BadValueError
BadPropertyError = datastore_errors.BadPropertyError
BadRequestError = datastore_errors.BadRequestError
EntityNotFoundError = datastore_errors.EntityNotFoundError
BadArgumentError = datastore_errors.BadArgumentError
QueryNotFoundError = datastore_errors.QueryNotFoundError
TransactionNotFoundError = datastore_errors.TransactionNotFoundError
Rollback = datastore_errors.Rollback
TransactionFailedError = datastore_errors.TransactionFailedError
BadFilterError = datastore_errors.BadFilterError
BadQueryError = datastore_errors.BadQueryError
BadKeyError = datastore_errors.BadKeyError
InternalError = datastore_errors.InternalError
NeedIndexError = datastore_errors.NeedIndexError
ReferencePropertyResolveError = datastore_errors.ReferencePropertyResolveError
Timeout = datastore_errors.Timeout
CommittedButStillApplying = datastore_errors.CommittedButStillApplying
ValidationError = BadValueError
Key = datastore_types.Key
Category = datastore_types.Category
Link = datastore_types.Link
Email = datastore_types.Email
GeoPt = datastore_types.GeoPt
IM = datastore_types.IM
PhoneNumber = datastore_types.PhoneNumber
PostalAddress = datastore_types.PostalAddress
Rating = datastore_types.Rating
Text = datastore_types.Text
Blob = datastore_types.Blob
ByteString = datastore_types.ByteString
BlobKey = datastore_types.BlobKey
READ_CAPABILITY = datastore.READ_CAPABILITY
WRITE_CAPABILITY = datastore.WRITE_CAPABILITY
STRONG_CONSISTENCY = datastore.STRONG_CONSISTENCY
EVENTUAL_CONSISTENCY = datastore.EVENTUAL_CONSISTENCY
NESTED = datastore_rpc.TransactionOptions.NESTED
MANDATORY = datastore_rpc.TransactionOptions.MANDATORY
ALLOWED = datastore_rpc.TransactionOptions.ALLOWED
INDEPENDENT = datastore_rpc.TransactionOptions.INDEPENDENT
KEY_RANGE_EMPTY = "Empty"
"""Indicates the given key range is empty and the datastore's
automatic ID allocator will not assign keys in this range to new
entities.
"""
KEY_RANGE_CONTENTION = "Contention"
"""Indicates the given key range is empty but the datastore's
automatic ID allocator may assign new entities keys in this range.
However it is safe to manually assign keys in this range
if either of the following is true:
- No other request will insert entities with the same kind and parent
as the given key range until all entities with manually assigned
keys from this range have been written.
- Overwriting entities written by other requests with the same kind
and parent as the given key range is acceptable.
The datastore's automatic ID allocator will not assign a key to a new
entity that will overwrite an existing entity, so once the range is
populated there will no longer be any contention.
"""
KEY_RANGE_COLLISION = "Collision"
"""Indicates that entities with keys inside the given key range
already exist and writing to this range will overwrite those entities.
Additionally the implications of KEY_RANGE_COLLISION apply. If
overwriting entities that exist in this range is acceptable it is safe
to use the given range.
The datastore's automatic ID allocator will never assign a key to
a new entity that will overwrite an existing entity so entities
written by the user to this range will never be overwritten by
an entity with an automatically assigned key.
"""
_kind_map = {}
_SELF_REFERENCE = object()
_RESERVED_WORDS = set(["key_name"])
class NotSavedError(Error):
"""Raised when a saved-object action is performed on a non-saved object."""
class KindError(BadValueError):
"""Raised when an entity is used with incorrect Model."""
class PropertyError(Error):
"""Raised when non-existent property is referenced."""
class DuplicatePropertyError(Error):
"""Raised when a property is duplicated in a model definition."""
class ConfigurationError(Error):
"""Raised when a property or model is improperly configured."""
class ReservedWordError(Error):
"""Raised when a property is defined for a reserved word."""
class DerivedPropertyError(Error):
"""Raised when attempting to assign a value to a derived property."""
_ALLOWED_PROPERTY_TYPES = set(
[
str,
str,
str,
bool,
int,
int,
float,
Key,
datetime.datetime,
datetime.date,
datetime.time,
Blob,
datastore_types.EmbeddedEntity,
ByteString,
Text,
users.User,
Category,
Link,
Email,
GeoPt,
IM,
PhoneNumber,
PostalAddress,
Rating,
BlobKey,
]
)
_ALLOWED_EXPANDO_PROPERTY_TYPES = set(_ALLOWED_PROPERTY_TYPES)
_ALLOWED_EXPANDO_PROPERTY_TYPES.update((list, tuple, type(None)))
_OPERATORS = ["<", "<=", ">", ">=", "=", "==", "!=", "in"]
_FILTER_REGEX = re.compile(
"^\s*([^\s]+)(\s+(%s)\s*)?$" % "|".join(_OPERATORS), re.IGNORECASE | re.UNICODE
)
def class_for_kind(kind):
"""Return base-class responsible for implementing kind.
Necessary to recover the class responsible for implementing provided
kind.
Args:
kind: Entity kind string.
Returns:
Class implementation for kind.
Raises:
KindError when there is no implementation for kind.
"""
try:
return _kind_map[kind]
except KeyError:
raise KindError("No implementation for kind '%s'" % kind)
def check_reserved_word(attr_name):
"""Raise an exception if attribute name is a reserved word.
Args:
attr_name: Name to check to see if it is a reserved word.
Raises:
ReservedWordError when attr_name is determined to be a reserved word.
"""
if datastore_types.RESERVED_PROPERTY_NAME.match(attr_name):
raise ReservedWordError(
"Cannot define property. All names both beginning and "
"ending with '__' are reserved."
)
if attr_name in _RESERVED_WORDS or attr_name in dir(Model):
raise ReservedWordError(
"Cannot define property using reserved word '%(attr_name)s'. "
"If you would like to use this name in the datastore consider "
"using a different name like %(attr_name)s_ and adding "
"name='%(attr_name)s' to the parameter list of the property "
"definition." % locals()
)
def query_descendants(model_instance):
"""Returns a query for all the descendants of a model instance.
Args:
model_instance: Model instance to find the descendants of.
Returns:
Query that will retrieve all entities that have the given model instance
as an ancestor. Unlike normal ancestor queries, this does not include the
ancestor itself.
"""
result = Query().ancestor(model_instance)
result.filter(datastore_types.KEY_SPECIAL_PROPERTY + " >", model_instance.key())
return result
def model_to_protobuf(model_instance, _entity_class=datastore.Entity):
"""Encodes a model instance as a protocol buffer.
Args:
model_instance: Model instance to encode.
Returns:
entity_pb.EntityProto representation of the model instance
"""
return model_instance._populate_entity(_entity_class).ToPb()
def model_from_protobuf(pb, _entity_class=datastore.Entity):
"""Decodes a model instance from a protocol buffer.
Args:
pb: The protocol buffer representation of the model instance. Can be an
entity_pb.EntityProto or str encoding of an entity_bp.EntityProto
Returns:
Model instance resulting from decoding the protocol buffer
"""
entity = _entity_class.FromPb(pb, default_kind=Expando.kind())
return class_for_kind(entity.kind()).from_entity(entity)
def model_is_projection(model_instance):
"""Returns true if the given db.Model instance only contains a projection of
the full entity.
"""
return model_instance._entity and model_instance._entity.is_projection()
def _initialize_properties(model_class, name, bases, dct):
"""Initialize Property attributes for Model-class.
Args:
model_class: Model class to initialize properties for.
"""
model_class._properties = {}
property_source = {}
def get_attr_source(name, cls):
for src_cls in cls.mro():
if name in src_cls.__dict__:
return src_cls
defined = set()
for base in bases:
if hasattr(base, "_properties"):
property_keys = set(base._properties.keys())
duplicate_property_keys = defined & property_keys
for dupe_prop_name in duplicate_property_keys:
old_source = property_source[dupe_prop_name] = get_attr_source(
dupe_prop_name, property_source[dupe_prop_name]
)
new_source = get_attr_source(dupe_prop_name, base)
if old_source != new_source:
raise DuplicatePropertyError(
"Duplicate property, %s, is inherited from both %s and %s."
% (dupe_prop_name, old_source.__name__, new_source.__name__)
)
property_keys -= duplicate_property_keys
if property_keys:
defined |= property_keys
property_source.update(dict.fromkeys(property_keys, base))
model_class._properties.update(base._properties)
for attr_name in list(dct.keys()):
attr = dct[attr_name]
if isinstance(attr, Property):
check_reserved_word(attr_name)
if attr_name in defined:
raise DuplicatePropertyError("Duplicate property: %s" % attr_name)
defined.add(attr_name)
model_class._properties[attr_name] = attr
attr.__property_config__(model_class, attr_name)
model_class._all_properties = frozenset(
prop.name for name, prop in list(model_class._properties.items())
)
model_class._unindexed_properties = frozenset(
prop.name
for name, prop in list(model_class._properties.items())
if not prop.indexed
)
def _coerce_to_key(value):
"""Returns the value's key.
Args:
value: a Model or Key instance or string encoded key or None
Returns:
The corresponding key, or None if value is None.
"""
if value is None:
return None
value, multiple = datastore.NormalizeAndTypeCheck(value, (Model, Key, str))
if len(value) > 1:
raise datastore_errors.BadArgumentError("Expected only one model or key")
value = value[0]
if isinstance(value, Model):
return value.key()
elif isinstance(value, str):
return Key(value)
else:
return value
class PropertiedClass(type):
"""Meta-class for initializing Model classes properties.
Used for initializing Properties defined in the context of a model.
By using a meta-class much of the configuration of a Property
descriptor becomes implicit. By using this meta-class, descriptors
that are of class Model are notified about which class they
belong to and what attribute they are associated with and can
do appropriate initialization via __property_config__.
Duplicate properties are not permitted.
"""
def __init__(cls, name, bases, dct, map_kind=True):
"""Initializes a class that might have property definitions.
This method is called when a class is created with the PropertiedClass
meta-class.
Loads all properties for this model and its base classes in to a dictionary
for easy reflection via the 'properties' method.
Configures each property defined in the new class.
Duplicate properties, either defined in the new class or defined separately
in two base classes are not permitted.
Properties may not assigned to names which are in the list of
_RESERVED_WORDS. It is still possible to store a property using a reserved
word in the datastore by using the 'name' keyword argument to the Property
constructor.
Args:
cls: Class being initialized.
name: Name of new class.
bases: Base classes of new class.
dct: Dictionary of new definitions for class.
Raises:
DuplicatePropertyError when a property is duplicated either in the new
class or separately in two base classes.
ReservedWordError when a property is given a name that is in the list of
reserved words, attributes of Model and names of the form '__.*__'.
"""
super(PropertiedClass, cls).__init__(name, bases, dct)
_initialize_properties(cls, name, bases, dct)
if map_kind:
_kind_map[cls.kind()] = cls
AUTO_UPDATE_UNCHANGED = object()
class Property(object):
"""A Property is an attribute of a Model.
It defines the type of the attribute, which determines how it is stored
in the datastore and how the property values are validated. Different property
types support different options, which change validation rules, default
values, etc. The simplest example of a property is a StringProperty:
class Story(db.Model):
title = db.StringProperty()
"""
creation_counter = 0
def __init__(
self,
verbose_name=None,
name=None,
default=None,
required=False,
validator=None,
choices=None,
indexed=True,
):
"""Initializes this Property with the given options.
Args:
verbose_name: User friendly name of property.
name: Storage name for property. By default, uses attribute name
as it is assigned in the Model sub-class.
default: Default value for property if none is assigned.
required: Whether property is required.
validator: User provided method used for validation.
choices: User provided set of valid property values.
indexed: Whether property is indexed.
"""
self.verbose_name = verbose_name
self.name = name
self.default = default
self.required = required
self.validator = validator
self.choices = choices
self.indexed = indexed
self.creation_counter = Property.creation_counter
Property.creation_counter += 1
def __property_config__(self, model_class, property_name):
"""Configure property, connecting it to its model.
Configure the property so that it knows its property name and what class
it belongs to.
Args:
model_class: Model class which Property will belong to.
property_name: Name of property within Model instance to store property
values in. By default this will be the property name preceded by
an underscore, but may change for different subclasses.
"""
self.model_class = model_class
if self.name is None:
self.name = property_name
def __get__(self, model_instance, model_class):
"""Returns the value for this property on the given model instance.
See http://docs.python.org/ref/descriptors.html for a description of
the arguments to this class and what they mean."""
if model_instance is None:
return self
try:
return getattr(model_instance, self._attr_name())
except AttributeError:
return None
def __set__(self, model_instance, value):
"""Sets the value for this property on the given model instance.
See http://docs.python.org/ref/descriptors.html for a description of
the arguments to this class and what they mean.
"""
value = self.validate(value)
setattr(model_instance, self._attr_name(), value)
def default_value(self):
"""Default value for unassigned values.
Returns:
Default value as provided by __init__(default).
"""
return self.default
def validate(self, value):
"""Assert that provided value is compatible with this property.
Args:
value: Value to validate against this Property.
Returns:
A valid value, either the input unchanged or adapted to the
required type.
Raises:
BadValueError if the value is not appropriate for this
property in any way.
"""
if self.empty(value):
if self.required:
raise BadValueError("Property %s is required" % self.name)
else:
if self.choices:
if value not in self.choices:
raise BadValueError(
"Property %s is %r; must be one of %r"
% (self.name, value, self.choices)
)
if self.validator is not None:
self.validator(value)
return value
def empty(self, value):
"""Determine if value is empty in the context of this property.
For most kinds, this is equivalent to "not value", but for kinds like
bool, the test is more subtle, so subclasses can override this method
if necessary.
Args:
value: Value to validate against this Property.
Returns:
True if this value is considered empty in the context of this Property
type, otherwise False.
"""
return not value
def get_value_for_datastore(self, model_instance):
"""Datastore representation of this property.
Looks for this property in the given model instance, and returns the proper
datastore representation of the value that can be stored in a datastore
entity. Most critically, it will fetch the datastore key value for
reference properties.
Some properies (e.g. DateTimeProperty, UserProperty) optionally update their
value on every put(). This call must return the current value for such
properties (get_updated_value_for_datastore returns the new value).
Args:
model_instance: Instance to fetch datastore value from.
Returns:
Datastore representation of the model value in a form that is
appropriate for storing in the datastore.
"""
return self.__get__(model_instance, model_instance.__class__)
def get_updated_value_for_datastore(self, model_instance):
"""Determine new value for auto-updated property.
Some properies (e.g. DateTimeProperty, UserProperty) optionally update their
value on every put(). This call must return the new desired value for such
properties. For all other properties, this call must return
AUTO_UPDATE_UNCHANGED.
Args:
model_instance: Instance to get new value for.
Returns:
Datastore representation of the new model value in a form that is
appropriate for storing in the datastore, or AUTO_UPDATE_UNCHANGED.
"""
return AUTO_UPDATE_UNCHANGED
def make_value_from_datastore_index_value(self, index_value):
value = datastore_types.RestoreFromIndexValue(index_value, self.data_type)
return self.make_value_from_datastore(value)
def make_value_from_datastore(self, value):
"""Native representation of this property.
Given a value retrieved from a datastore entity, return a value,
possibly converted, to be stored on the model instance. Usually
this returns the value unchanged, but a property class may
override this when it uses a different datatype on the model
instance than on the entity.
This API is not quite symmetric with get_value_for_datastore(),
because the model instance on which to store the converted value
may not exist yet -- we may be collecting values to be passed to a
model constructor.
Args:
value: value retrieved from the datastore entity.
Returns:
The value converted for use as a model instance attribute.
"""
return value
def _require_parameter(self, kwds, parameter, value):
"""Sets kwds[parameter] to value.
If kwds[parameter] exists and is not value, raises ConfigurationError.
Args:
kwds: The parameter dict, which maps parameter names (strings) to values.
parameter: The name of the parameter to set.
value: The value to set it to.
"""
if parameter in kwds and kwds[parameter] != value:
raise ConfigurationError("%s must be %s." % (parameter, value))
kwds[parameter] = value
def _attr_name(self):
"""Attribute name we use for this property in model instances.
DO NOT USE THIS METHOD.
"""
return "_" + self.name
data_type = str
def datastore_type(self):
"""Deprecated backwards-compatible accessor method for self.data_type."""
return self.data_type
class Index(datastore._BaseIndex):
"""A datastore index."""
id = datastore._BaseIndex._Id
kind = datastore._BaseIndex._Kind
has_ancestor = datastore._BaseIndex._HasAncestor
properties = datastore._BaseIndex._Properties
class Model(object, metaclass=PropertiedClass):
"""Model is the superclass of all object entities in the datastore.
The programming model is to declare Python subclasses of the Model class,
declaring datastore properties as class members of that class. So if you want
to publish a story with title, body, and created date, you would do it like
this:
class Story(db.Model):
title = db.StringProperty()
body = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
A model instance can have a single parent. Model instances without any
parent are root entities. It is possible to efficiently query for
instances by their shared parent. All descendents of a single root
instance also behave as a transaction group. This means that when you
work one member of the group within a transaction all descendents of that
root join the transaction. All operations within a transaction on this
group are ACID.
"""
def __new__(*args, **unused_kwds):
"""Allow subclasses to call __new__() with arguments.
Do NOT list 'cls' as the first argument, or in the case when
the 'unused_kwds' dictionary contains the key 'cls', the function
will complain about multiple argument values for 'cls'.
Raises:
TypeError if there are no positional arguments.
"""
if args:
cls = args[0]
else:
raise TypeError("object.__new__(): not enough arguments")
return super(Model, cls).__new__(cls)
def __init__(
self, parent=None, key_name=None, _app=None, _from_entity=False, **kwds
):
"""Creates a new instance of this model.
To create a new entity, you instantiate a model and then call put(),
which saves the entity to the datastore:
person = Person()
person.name = 'Bret'
person.put()
You can initialize properties in the model in the constructor with keyword
arguments:
person = Person(name='Bret')
We initialize all other properties to the default value (as defined by the
properties in the model definition) if they are not provided in the
constructor.
Args:
parent: Parent instance for this instance or None, indicating a top-
level instance.
key_name: Name for new model instance.
_from_entity: Intentionally undocumented.
kwds: Keyword arguments mapping to properties of model. Also:
key: Key instance for this instance, if provided makes parent and
key_name redundant (they do not need to be set but if they are
they must match the key).
"""
namespace = None
if isinstance(_app, tuple):
if len(_app) != 2:
raise BadArgumentError("_app must have 2 values if type is tuple.")
_app, namespace = _app
key = kwds.get("key", None)
if key is not None:
if isinstance(key, (tuple, list)):
key = Key.from_path(*key)
if isinstance(key, str):
key = Key(encoded=key)
if not isinstance(key, Key):
raise TypeError(
"Expected Key type; received %s (is %s)"
% (key, key.__class__.__name__)
)
if not key.has_id_or_name():
raise BadKeyError("Key must have an id or name")
if key.kind() != self.kind():
raise BadKeyError(
"Expected Key kind to be %s; received %s"
% (self.kind(), key.kind())
)
if _app is not None and key.app() != _app:
raise BadKeyError(
"Expected Key app to be %s; received %s" % (_app, key.app())
)
if namespace is not None and key.namespace() != namespace:
raise BadKeyError(
"Expected Key namespace to be %s; received %s"
% (namespace, key.namespace())
)
if key_name and key_name != key.name():
raise BadArgumentError(
"Cannot use key and key_name at the same time"
" with different values"
)
if parent and parent != key.parent():
raise BadArgumentError(
"Cannot use key and parent at the same time"
" with different values"
)
namespace = key.namespace()
self._key = key
self._key_name = None
self._parent = None
self._parent_key = None
else:
if key_name == "":
raise BadKeyError("Name cannot be empty.")
elif key_name is not None and not isinstance(key_name, str):
raise BadKeyError(
"Name must be string type, not %s" % key_name.__class__.__name__
)
if parent is not None:
if not isinstance(parent, (Model, Key)):
raise TypeError(
"Expected Model type; received %s (is %s)"
% (parent, parent.__class__.__name__)
)
if isinstance(parent, Model) and not parent.has_key():
raise BadValueError(
"%s instance must have a complete key before it can be used as a "
"parent." % parent.kind()
)
if isinstance(parent, Key):
self._parent_key = parent
self._parent = None
else:
self._parent_key = parent.key()
self._parent = parent
else:
self._parent_key = None
self._parent = None
self._key_name = key_name
self._key = None
if self._parent_key is not None:
if namespace is not None and self._parent_key.namespace() != namespace:
raise BadArgumentError(
"Expected parent namespace to be %r; received %r"
% (namespace, self._parent_key.namespace())
)
namespace = self._parent_key.namespace()
self._entity = None
if _app is not None and isinstance(_app, Key):
raise BadArgumentError(
"_app should be a string; received Key('%s'):\n"
" This may be the result of passing 'key' as "
"a positional parameter in SDK 1.2.6. Please "
"only pass 'key' as a keyword parameter." % _app
)
if namespace is None:
namespace = namespace_manager.get_namespace()
self._app = _app
self.__namespace = namespace
is_projection = False
if isinstance(_from_entity, datastore.Entity) and _from_entity.is_saved():
self._entity = _from_entity
is_projection = _from_entity.is_projection()
del self._key_name
del self._key
for prop in list(self.properties().values()):
if prop.name in kwds:
value = kwds[prop.name]
elif is_projection:
continue
else:
value = prop.default_value()
try:
prop.__set__(self, value)
except DerivedPropertyError:
if prop.name in kwds and not _from_entity:
raise
def key(self):
"""Unique key for this entity.
This property is only available if this entity is already stored in the
datastore or if it has a full key, so it is available if this entity was
fetched returned from a query, or after put() is called the first time
for new entities, or if a complete key was given when constructed.
Returns:
Datastore key of persisted entity.
Raises:
NotSavedError when entity is not persistent.
"""
if self.is_saved():
return self._entity.key()
elif self._key:
return self._key
elif self._key_name:
parent = self._parent_key or (self._parent and self._parent.key())
self._key = Key.from_path(
self.kind(),
self._key_name,
parent=parent,
_app=self._app,
namespace=self.__namespace,
)
return self._key
else:
raise NotSavedError()
def __set_property(self, entity, name, datastore_value):
if datastore_value == []:
entity.pop(name, None)
else:
entity[name] = datastore_value
def _to_entity(self, entity):
"""Copies information from this model to provided entity.
Args:
entity: Entity to save information on.
"""
for prop in list(self.properties().values()):
self.__set_property(entity, prop.name, prop.get_value_for_datastore(self))
set_unindexed_properties = getattr(entity, "set_unindexed_properties", None)
if set_unindexed_properties:
set_unindexed_properties(self._unindexed_properties)
def _populate_internal_entity(self, _entity_class=datastore.Entity):
"""Populates self._entity, saving its state to the datastore.
After this method is called, calling is_saved() will return True.
Returns:
Populated self._entity
"""
self._entity = self._populate_entity(_entity_class=_entity_class)
for prop in list(self.properties().values()):
new_value = prop.get_updated_value_for_datastore(self)
if new_value is not AUTO_UPDATE_UNCHANGED:
self.__set_property(self._entity, prop.name, new_value)
for attr in ("_key_name", "_key"):
try:
delattr(self, attr)
except AttributeError:
pass
return self._entity
def put(self, **kwargs):
"""Writes this model instance to the datastore.
If this instance is new, we add an entity to the datastore.
Otherwise, we update this instance, and the key will remain the
same.
Args:
config: datastore_rpc.Configuration to use for this request.
Returns:
The key of the instance (either the existing key or a new key).
Raises:
TransactionFailedError if the data could not be committed.
"""
self._populate_internal_entity()
return datastore.Put(self._entity, **kwargs)
save = put
def _populate_entity(self, _entity_class=datastore.Entity):
"""Internal helper -- Populate self._entity or create a new one
if that one does not exist. Does not change any state of the instance
other than the internal state of the entity.
This method is separate from _populate_internal_entity so that it is
possible to call to_xml without changing the state of an unsaved entity
to saved.
Returns:
self._entity or a new Entity which is not stored on the instance.
"""
if self.is_saved():
entity = self._entity
else:
kwds = {
"_app": self._app,
"namespace": self.__namespace,
"unindexed_properties": self._unindexed_properties,
}
if self._key is not None:
if self._key.id():
kwds["id"] = self._key.id()
else:
kwds["name"] = self._key.name()
if self._key.parent():
kwds["parent"] = self._key.parent()
else:
if self._key_name is not None:
kwds["name"] = self._key_name
if self._parent_key is not None:
kwds["parent"] = self._parent_key
elif self._parent is not None:
kwds["parent"] = self._parent._entity
entity = _entity_class(self.kind(), **kwds)
self._to_entity(entity)
return entity
def delete(self, **kwargs):
"""Deletes this entity from the datastore.
Args:
config: datastore_rpc.Configuration to use for this request.
Raises:
TransactionFailedError if the data could not be committed.
"""
datastore.Delete(self.key(), **kwargs)
self._key = self.key()
self._key_name = None
self._parent_key = None
self._entity = None
def is_saved(self):
"""Determine if entity is persisted in the datastore.
New instances of Model do not start out saved in the data. Objects which
are saved to or loaded from the Datastore will have a True saved state.
Returns:
True if object has been persisted to the datastore, otherwise False.
"""
return self._entity is not None
def has_key(self):
"""Determine if this model instance has a complete key.
When not using a fully self-assigned Key, ids are not assigned until the
data is saved to the Datastore, but instances with a key name always have
a full key.
Returns:
True if the object has been persisted to the datastore or has a key
or has a key_name, otherwise False.
"""
return self.is_saved() or self._key or self._key_name
def dynamic_properties(self):
"""Returns a list of all dynamic properties defined for instance."""
return []
def instance_properties(self):
"""Alias for dyanmic_properties."""
return self.dynamic_properties()
def parent(self):
"""Get the parent of the model instance.
Returns:
Parent of contained entity or parent provided in constructor, None if
instance has no parent.
"""
if self._parent is None:
parent_key = self.parent_key()
if parent_key is not None:
self._parent = get(parent_key)
return self._parent
def parent_key(self):
"""Get the parent's key.
This method is useful for avoiding a potential fetch from the datastore
but still get information about the instances parent.
Returns:
Parent key of entity, None if there is no parent.
"""
if self._parent_key is not None:
return self._parent_key
elif self._parent is not None:
return self._parent.key()
elif self._entity is not None:
return self._entity.parent()
elif self._key is not None:
return self._key.parent()
else:
return None
def to_xml(self, _entity_class=datastore.Entity):
"""Generate an XML representation of this model instance.
atom and gd:namespace properties are converted to XML according to their
respective schemas. For more information, see:
http://www.atomenabled.org/developers/syndication/
http://code.google.com/apis/gdata/common-elements.html
"""
entity = self._populate_entity(_entity_class)
return entity.ToXml()
@classmethod
def get(cls, keys, **kwargs):
"""Fetch instance from the datastore of a specific Model type using key.
We support Key objects and string keys (we convert them to Key objects
automatically).
Useful for ensuring that specific instance types are retrieved from the
datastore. It also helps that the source code clearly indicates what
kind of object is being retreived. Example:
story = Story.get(story_key)
Args:
keys: Key within datastore entity collection to find; or string key;
or list of Keys or string keys.
config: datastore_rpc.Configuration to use for this request.
Returns:
If a single key was given: a Model instance associated with key
for the provided class if it exists in the datastore, otherwise
None. If a list of keys was given: a list where list[i] is the
Model instance for keys[i], or None if no instance exists.
Raises:
KindError if any of the retreived objects are not instances of the
type associated with call to 'get'.
"""
results = get(keys, **kwargs)
if results is None:
return None
if isinstance(results, Model):
instances = [results]
else:
instances = results
for instance in instances:
if not (instance is None or isinstance(instance, cls)):
raise KindError(
"Kind %r is not a subclass of kind %r"
% (instance.kind(), cls.kind())
)
return results
@classmethod
def get_by_key_name(cls, key_names, parent=None, **kwargs):
"""Get instance of Model class by its key's name.
Args:
key_names: A single key-name or a list of key-names.
parent: Parent of instances to get. Can be a model or key.
config: datastore_rpc.Configuration to use for this request.
"""
try:
parent = _coerce_to_key(parent)
except BadKeyError as e:
raise BadArgumentError(str(e))
key_names, multiple = datastore.NormalizeAndTypeCheck(key_names, str)
keys = [
datastore.Key.from_path(cls.kind(), name, parent=parent)
for name in key_names
]
if multiple:
return get(keys, **kwargs)
else:
return get(keys[0], **kwargs)
@classmethod
def get_by_id(cls, ids, parent=None, **kwargs):
"""Get instance of Model class by id.
Args:
key_names: A single id or a list of ids.
parent: Parent of instances to get. Can be a model or key.
config: datastore_rpc.Configuration to use for this request.
"""
if isinstance(parent, Model):
parent = parent.key()
ids, multiple = datastore.NormalizeAndTypeCheck(ids, (int, int))
keys = [datastore.Key.from_path(cls.kind(), id, parent=parent) for id in ids]
if multiple:
return get(keys, **kwargs)
else:
return get(keys[0], **kwargs)
@classmethod
def get_or_insert(cls, key_name, **kwds):
"""Transactionally retrieve or create an instance of Model class.
This acts much like the Python dictionary setdefault() method, where we
first try to retrieve a Model instance with the given key name and parent.
If it's not present, then we create a new instance (using the *kwds
supplied) and insert that with the supplied key name.
Subsequent calls to this method with the same key_name and parent will
always yield the same entity (though not the same actual object instance),
regardless of the *kwds supplied. If the specified entity has somehow
been deleted separately, then the next call will create a new entity and
return it.
If the 'parent' keyword argument is supplied, it must be a Model instance.
It will be used as the parent of the new instance of this Model class if
one is created.
This method is especially useful for having just one unique entity for
a specific identifier. Insertion/retrieval is done transactionally, which
guarantees uniqueness.
Example usage:
class WikiTopic(db.Model):
creation_date = db.DatetimeProperty(auto_now_add=True)
body = db.TextProperty(required=True)
# The first time through we'll create the new topic.
wiki_word = 'CommonIdioms'
topic = WikiTopic.get_or_insert(wiki_word,
body='This topic is totally new!')
assert topic.key().name() == 'CommonIdioms'
assert topic.body == 'This topic is totally new!'
# The second time through will just retrieve the entity.
overwrite_topic = WikiTopic.get_or_insert(wiki_word,
body='A totally different message!')
assert topic.key().name() == 'CommonIdioms'
assert topic.body == 'This topic is totally new!'
Args:
key_name: Key name to retrieve or create.
**kwds: Keyword arguments to pass to the constructor of the model class
if an instance for the specified key name does not already exist. If
an instance with the supplied key_name and parent already exists, the
rest of these arguments will be discarded.
Returns:
Existing instance of Model class with the specified key_name and parent
or a new one that has just been created.
Raises:
TransactionFailedError if the specified Model instance could not be
retrieved or created transactionally (due to high contention, etc).
"""
def txn():
entity = cls.get_by_key_name(key_name, parent=kwds.get("parent"))
if entity is None:
entity = cls(key_name=key_name, **kwds)
entity.put()
return entity
return run_in_transaction(txn)
@classmethod
def all(cls, **kwds):
"""Returns a query over all instances of this model from the datastore.
Returns:
Query that will retrieve all instances from entity collection.
"""
return Query(cls, **kwds)
@classmethod
def gql(cls, query_string, *args, **kwds):
"""Returns a query using GQL query string.
See appengine/ext/gql for more information about GQL.
Args:
query_string: properly formatted GQL query string with the
'SELECT * FROM <entity>' part omitted
*args: rest of the positional arguments used to bind numeric references
in the query.
**kwds: dictionary-based arguments (for named parameters).
"""
return GqlQuery(
"SELECT * FROM %s %s" % (cls.kind(), query_string), *args, **kwds
)
@classmethod
def _load_entity_values(cls, entity):
"""Load dynamic properties from entity.
Loads attributes which are not defined as part of the entity in
to the model instance.
Args:
entity: Entity which contain values to search dyanmic properties for.
"""
entity_values = {}
for prop in list(cls.properties().values()):
if prop.name in entity:
try:
value = entity[prop.name]
except KeyError:
entity_values[prop.name] = []
else:
if entity.is_projection():
value = prop.make_value_from_datastore_index_value(value)
else:
value = prop.make_value_from_datastore(value)
entity_values[prop.name] = value
return entity_values
@classmethod
def from_entity(cls, entity):
"""Converts the entity representation of this model to an instance.
Converts datastore.Entity instance to an instance of cls.
Args:
entity: Entity loaded directly from datastore.
Raises:
KindError when cls is incorrect model for entity.
"""
if cls.kind() != entity.kind():
raise KindError(
"Class %s cannot handle kind '%s'" % (repr(cls), entity.kind())
)
entity_values = cls._load_entity_values(entity)
if entity.key().has_id_or_name():
entity_values["key"] = entity.key()
return cls(None, _from_entity=entity, **entity_values)
@classmethod
def kind(cls):
"""Returns the datastore kind we use for this model.
We just use the name of the model for now, ignoring potential collisions.
"""
return cls.__name__
@classmethod
def entity_type(cls):
"""Soon to be removed alias for kind."""
return cls.kind()
@classmethod
def properties(cls):
"""Returns a dictionary of all the properties defined for this model."""
return dict(cls._properties)
@classmethod
def fields(cls):
"""Soon to be removed alias for properties."""
return cls.properties()
def create_rpc(deadline=None, callback=None, read_policy=STRONG_CONSISTENCY):
"""Create an rpc for use in configuring datastore calls.
NOTE: This functions exists for backwards compatibility. Please use
create_config() instead. NOTE: the latter uses 'on_completion',
which is a function taking an argument, wherease create_rpc uses
'callback' which is a function without arguments.
Args:
deadline: float, deadline for calls in seconds.
callback: callable, a callback triggered when this rpc completes,
accepts one argument: the returned rpc.
read_policy: flag, set to EVENTUAL_CONSISTENCY to enable eventually
consistent reads
Returns:
A datastore.DatastoreRPC instance.
"""
return datastore.CreateRPC(
deadline=deadline, callback=callback, read_policy=read_policy
)
def get_async(keys, **kwargs):
"""Asynchronously fetch the specified Model instance(s) from the datastore.
Identical to db.get() except returns an asynchronous object. Call
get_result() on the return value to block on the call and get the results.
"""
keys, multiple = datastore.NormalizeAndTypeCheckKeys(keys)
def extra_hook(entities):
if not multiple and not entities:
return None
models = []
for entity in entities:
if entity is None:
model = None
else:
cls1 = class_for_kind(entity.kind())
model = cls1.from_entity(entity)
models.append(model)
if multiple:
return models
assert len(models) == 1
return models[0]
return datastore.GetAsync(keys, extra_hook=extra_hook, **kwargs)
def get(keys, **kwargs):
"""Fetch the specific Model instance with the given key from the datastore.
We support Key objects and string keys (we convert them to Key objects
automatically).
Args:
keys: Key within datastore entity collection to find; or string key;
or list of Keys or string keys.
config: datastore_rpc.Configuration to use for this request, must be
specified as a keyword argument.
Returns:
If a single key was given: a Model instance associated with key
if it exists in the datastore, otherwise None. If a list of keys was
given: a list where list[i] is the Model instance for keys[i], or
None if no instance exists.
"""
return get_async(keys, **kwargs).get_result()
def put_async(models, **kwargs):
"""Asynchronously store one or more Model instances.
Identical to db.put() except returns an asynchronous object. Call
get_result() on the return value to block on the call and get the results.
"""
models, multiple = datastore.NormalizeAndTypeCheck(models, Model)
entities = [model._populate_internal_entity() for model in models]
def extra_hook(keys):
if multiple:
return keys
assert len(keys) == 1
return keys[0]
return datastore.PutAsync(entities, extra_hook=extra_hook, **kwargs)
def put(models, **kwargs):
"""Store one or more Model instances.
Args:
models: Model instance or list of Model instances.
config: datastore_rpc.Configuration to use for this request, must be
specified as a keyword argument.
Returns:
A Key if models is an instance, a list of Keys in the same order
as models if models is a list.
Raises:
TransactionFailedError if the data could not be committed.
"""
return put_async(models, **kwargs).get_result()
save = put
def delete_async(models, **kwargs):
"""Asynchronous version of delete one or more Model instances.
Identical to db.delete() except returns an asynchronous object. Call
get_result() on the return value to block on the call.
"""
if isinstance(models, (str, Model, Key)):
models = [models]
else:
try:
models = iter(models)
except TypeError:
models = [models]
keys = [_coerce_to_key(v) for v in models]
return datastore.DeleteAsync(keys, **kwargs)
def delete(models, **kwargs):
"""Delete one or more Model instances.
Args:
models: Model instance, key, key string or iterable thereof.
config: datastore_rpc.Configuration to use for this request, must be
specified as a keyword argument.
Raises:
TransactionFailedError if the data could not be committed.
"""
delete_async(models, **kwargs).get_result()
def allocate_ids_async(model, size, **kwargs):
"""Asynchronously allocates a range of IDs.
Identical to allocate_ids() except returns an asynchronous object. Call
get_result() on the return value to block on the call and return the result.
"""
return datastore.AllocateIdsAsync(_coerce_to_key(model), size=size, **kwargs)
def allocate_ids(model, size, **kwargs):
"""Allocates a range of IDs of size for the model_key defined by model.
Allocates a range of IDs in the datastore such that those IDs will not
be automatically assigned to new entities. You can only allocate IDs
for model keys from your app. If there is an error, raises a subclass of
datastore_errors.Error.
Args:
model: Model instance, Key or string to serve as a template specifying the
ID sequence in which to allocate IDs. Returned ids should only be used
in entities with the same parent (if any) and kind as this key.
size: Number of IDs to allocate.
config: datastore_rpc.Configuration to use for this request.
Returns:
(start, end) of the allocated range, inclusive.
"""
return allocate_ids_async(model, size, **kwargs).get_result()
def allocate_id_range(model, start, end, **kwargs):
"""Allocates a range of IDs with specific endpoints.
Once these IDs have been allocated they may be provided manually to
newly created entities.
Since the datastore's automatic ID allocator will never assign
a key to a new entity that will cause an existing entity to be
overwritten, entities written to the given key range will never be
overwritten. However, writing entities with manually assigned keys in this
range may overwrite existing entities (or new entities written by a
separate request) depending on the key range state returned.
This method should only be used if you have an existing numeric id
range that you want to reserve, e.g. bulk loading entities that already
have IDs. If you don't care about which IDs you receive, use allocate_ids
instead.
Args:
model: Model instance, Key or string to serve as a template specifying the
ID sequence in which to allocate IDs. Allocated ids should only be used
in entities with the same parent (if any) and kind as this key.
start: first id of the range to allocate, inclusive.
end: last id of the range to allocate, inclusive.
config: datastore_rpc.Configuration to use for this request.
Returns:
One of (KEY_RANGE_EMPTY, KEY_RANGE_CONTENTION, KEY_RANGE_COLLISION). If not
KEY_RANGE_EMPTY, this represents a potential issue with using the allocated
key range.
"""
key = _coerce_to_key(model)
datastore.NormalizeAndTypeCheck((start, end), (int, int))
if start < 1 or end < 1:
raise BadArgumentError("Start %d and end %d must both be > 0." % (start, end))
if start > end:
raise BadArgumentError(
"Range end %d cannot be less than start %d." % (end, start)
)
safe_start, _ = datastore.AllocateIds(key, max=end, **kwargs)
race_condition = safe_start > start
start_key = Key.from_path(
key.kind(),
start,
parent=key.parent(),
_app=key.app(),
namespace=key.namespace(),
)
end_key = Key.from_path(
key.kind(), end, parent=key.parent(), _app=key.app(), namespace=key.namespace()
)
collision = (
Query(keys_only=True, namespace=key.namespace(), _app=key.app())
.filter("__key__ >=", start_key)
.filter("__key__ <=", end_key)
.fetch(1)
)
if collision:
return KEY_RANGE_COLLISION
elif race_condition:
return KEY_RANGE_CONTENTION
else:
return KEY_RANGE_EMPTY
def _index_converter(index):
return Index(index.Id(), index.Kind(), index.HasAncestor(), index.Properties())
def get_indexes_async(**kwargs):
"""Asynchronously retrieves the application indexes and their states.
Identical to get_indexes() except returns an asynchronous object. Call
get_result() on the return value to block on the call and get the results.
"""
def extra_hook(indexes):
return [(_index_converter(index), state) for index, state in indexes]
return datastore.GetIndexesAsync(extra_hook=extra_hook, **kwargs)
def get_indexes(**kwargs):
"""Retrieves the application indexes and their states.
Args:
config: datastore_rpc.Configuration to use for this request, must be
specified as a keyword argument.
Returns:
A list of (Index, Index.[BUILDING|SERVING|DELETING|ERROR]) tuples.
An index can be in the following states:
Index.BUILDING: Index is being built and therefore can not serve queries
Index.SERVING: Index is ready to service queries
Index.DELETING: Index is being deleted
Index.ERROR: Index encounted an error in the BUILDING state
"""
return get_indexes_async(**kwargs).get_result()
class Expando(Model):
"""Dynamically expandable model.
An Expando does not require (but can still benefit from) the definition
of any properties before it can be used to store information in the
datastore. Properties can be added to an expando object by simply
performing an assignment. The assignment of properties is done on
an instance by instance basis, so it is possible for one object of an
expando type to have different properties from another or even the same
properties with different types. It is still possible to define
properties on an expando, allowing those properties to behave the same
as on any other model.
Example:
import datetime
class Song(db.Expando):
title = db.StringProperty()
crazy = Song(title='Crazy like a diamond',
author='Lucy Sky',
publish_date='yesterday',
rating=5.0)
hoboken = Song(title='The man from Hoboken',
author=['Anthony', 'Lou'],
publish_date=datetime.datetime(1977, 5, 3))
crazy.last_minute_note=db.Text('Get a train to the station.')
Possible Uses:
One use of an expando is to create an object without any specific
structure and later, when your application mature and it in the right
state, change it to a normal model object and define explicit properties.
Additional exceptions for expando:
Protected attributes (ones whose names begin with '_') cannot be used
as dynamic properties. These are names that are reserved for protected
transient (non-persisted) attributes.
Order of lookup:
When trying to set or access an attribute value, any other defined
properties, such as methods and other values in __dict__ take precedence
over values in the datastore.
1 - Because it is not possible for the datastore to know what kind of
property to store on an undefined expando value, setting a property to
None is the same as deleting it from the expando.
2 - Persistent variables on Expando must not begin with '_'. These
variables considered to be 'protected' in Python, and are used
internally.
3 - Expando's dynamic properties are not able to store empty lists.
Attempting to assign an empty list to a dynamic property will raise
ValueError. Static properties on Expando can still support empty
lists but like normal Model properties is restricted from using
None.
"""
_dynamic_properties = None
def __init__(self, parent=None, key_name=None, _app=None, **kwds):
"""Creates a new instance of this expando model.
Args:
parent: Parent instance for this instance or None, indicating a top-
level instance.
key_name: Name for new model instance.
_app: Intentionally undocumented.
args: Keyword arguments mapping to properties of model.
"""
super(Expando, self).__init__(parent, key_name, _app, **kwds)
self._dynamic_properties = {}
for prop, value in kwds.items():
if prop not in self._all_properties and prop != "key":
if not (hasattr(getattr(type(self), prop, None), "__set__")):
setattr(self, prop, value)
else:
check_reserved_word(prop)
def __setattr__(self, key, value):
"""Dynamically set field values that are not defined.
Tries to set the value on the object normally, but failing that
sets the value on the contained entity.
Args:
key: Name of attribute.
value: Value to set for attribute. Must be compatible with
datastore.
Raises:
ValueError on attempt to assign empty list.
"""
check_reserved_word(key)
if key[:1] != "_" and not hasattr(getattr(type(self), key, None), "__set__"):
if value == []:
raise ValueError("Cannot store empty list to dynamic property %s" % key)
if type(value) not in _ALLOWED_EXPANDO_PROPERTY_TYPES:
raise TypeError(
"Expando cannot accept values of type '%s'." % type(value).__name__
)
if self._dynamic_properties is None:
self._dynamic_properties = {}
self._dynamic_properties[key] = value
else:
super(Expando, self).__setattr__(key, value)
def __getattribute__(self, key):
"""Get attribute from expando.
Must be overridden to allow dynamic properties to obscure class attributes.
Since all attributes are stored in self._dynamic_properties, the normal
__getattribute__ does not attempt to access it until __setattr__ is called.
By then, the static attribute being overwritten has already been located
and returned from the call.
This method short circuits the usual __getattribute__ call when finding a
dynamic property and returns it to the user via __getattr__. __getattr__
is called to preserve backward compatibility with older Expando models
that may have overridden the original __getattr__.
NOTE: Access to properties defined by Python descriptors are not obscured
because setting those attributes are done through the descriptor and does
not place those attributes in self._dynamic_properties.
"""
if not key.startswith("_"):
dynamic_properties = self._dynamic_properties
if dynamic_properties is not None and key in dynamic_properties:
return self.__getattr__(key)
return super(Expando, self).__getattribute__(key)
def __getattr__(self, key):
"""If no explicit attribute defined, retrieve value from entity.
Tries to get the value on the object normally, but failing that
retrieves value from contained entity.
Args:
key: Name of attribute.
Raises:
AttributeError when there is no attribute for key on object or
contained entity.
"""
_dynamic_properties = self._dynamic_properties
if _dynamic_properties is not None and key in _dynamic_properties:
return _dynamic_properties[key]
else:
return getattr(super(Expando, self), key)
def __delattr__(self, key):
"""Remove attribute from expando.
Expando is not like normal entities in that undefined fields
can be removed.
Args:
key: Dynamic property to be deleted.
"""
if self._dynamic_properties and key in self._dynamic_properties:
del self._dynamic_properties[key]
else:
object.__delattr__(self, key)
def dynamic_properties(self):
"""Determine which properties are particular to instance of entity.
Returns:
Set of names which correspond only to the dynamic properties.
"""
if self._dynamic_properties is None:
return []
return list(self._dynamic_properties.keys())
def _to_entity(self, entity):
"""Store to entity, deleting dynamic properties that no longer exist.
When the expando is saved, it is possible that a given property no longer
exists. In this case, the property will be removed from the saved instance.
Args:
entity: Entity which will receive dynamic properties.
"""
super(Expando, self)._to_entity(entity)
if self._dynamic_properties is None:
self._dynamic_properties = {}
for key, value in self._dynamic_properties.items():
entity[key] = value
all_properties = set(self._dynamic_properties.keys())
all_properties.update(self._all_properties)
for key in list(entity.keys()):
if key not in all_properties:
del entity[key]
@classmethod
def _load_entity_values(cls, entity):
"""Load dynamic properties from entity.
Expando needs to do a second pass to add the entity values which were
ignored by Model because they didn't have an corresponding predefined
property on the model.
Args:
entity: Entity which contain values to search dyanmic properties for.
"""
entity_values = super(Expando, cls)._load_entity_values(entity)
for key, value in entity.items():
if key not in entity_values:
entity_values[str(key)] = value
return entity_values
class _BaseQuery(object):
"""Base class for both Query and GqlQuery."""
_last_raw_query = None
_last_index_list = None
_cursor = None
_end_cursor = None
def __init__(self, model_class=None):
"""Constructor.
Args:
model_class: Model class from which entities are constructed.
keys_only: Whether the query should return full entities or only keys.
compile: Whether the query should also return a compiled query.
cursor: A compiled query from which to resume.
namespace: The namespace to query.
"""
self._model_class = model_class
def is_keys_only(self):
"""Returns whether this query is keys only.
Returns:
True if this query returns keys, False if it returns entities.
"""
raise NotImplementedError
def projection(self):
"""Returns the tuple of properties in the projection or None.
Projected results differ from normal results in multiple ways:
- they only contain a portion of the original entity and cannot be put;
- properties defined on the model, but not included in the projections will
have a value of None, even if the property is required or has a default
value;
- multi-valued properties (such as a ListProperty) will only contain a single
value.
- dynamic properties not included in the projection will not appear
on the model instance.
- dynamic properties included in the projection are deserialized into
their indexed type. Specifically one of str, bool, long, float, GeoPt, Key
or User. If the original type is known, it can be restored using
datastore_types.RestoreFromIndexValue.
However, projection queries are significantly faster than normal queries.
Projection queries on entities with multi-valued properties will return the
same entity multiple times, once for each unique combination of values for
properties included in the order, an inequaly property, or the projected
properties.
Returns:
The list of properties in the projection, or None if no projection is
set on this query.
"""
raise NotImplementedError
def is_distinct(self):
"""Returns true if the projection query should be distinct.
This is equivalent to the SQL syntax: SELECT DISTINCT. It is only available
for projection queries, it is not valid to specify distinct without also
specifying projection properties.
Distinct projection queries on entities with multi-valued properties will
return the same entity multiple times, once for each unique combination of
properties included in the projection.
Returns:
True if this projection query is distinct.
"""
raise NotImplementedError
def _get_query(self):
"""Subclass must override (and not call their super method).
Returns:
A datastore.Query instance representing the query.
"""
raise NotImplementedError
def run(self, **kwargs):
"""Iterator for this query.
If you know the number of results you need, use run(limit=...) instead,
or use a GQL query with a LIMIT clause. It's more efficient. If you want
all results use run(batch_size=<large number>).
Args:
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
Iterator for this query.
"""
raw_query = self._get_query()
iterator = raw_query.Run(**kwargs)
self._last_raw_query = raw_query
keys_only = kwargs.get("keys_only")
if keys_only is None:
keys_only = self.is_keys_only()
if keys_only:
return iterator
else:
return _QueryIterator(self._model_class, iter(iterator))
def __iter__(self):
"""Iterator for this query.
If you know the number of results you need, consider fetch() instead,
or use a GQL query with a LIMIT clause. It's more efficient.
"""
return self.run()
def __getstate__(self):
state = self.__dict__.copy()
state["_last_raw_query"] = None
return state
def get(self, **kwargs):
"""Get first result from this.
Beware: get() ignores the LIMIT clause on GQL queries.
Args:
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
First result from running the query if there are any, else None.
"""
results = self.run(limit=1, **kwargs)
try:
return next(results)
except StopIteration:
return None
def count(self, limit=1000, **kwargs):
"""Number of entities this query fetches.
Beware: count() ignores the LIMIT clause on GQL queries.
Args:
limit: A number. If there are more results than this, stop short and
just return this number. Providing this argument makes the count
operation more efficient.
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
Number of entities this query fetches.
"""
raw_query = self._get_query()
result = raw_query.Count(limit=limit, **kwargs)
self._last_raw_query = raw_query
return result
def fetch(self, limit, offset=0, **kwargs):
"""Return a list of items selected using SQL-like limit and offset.
Always use run(limit=...) instead of fetch() when iterating over a query.
Beware: offset must read and discard all skipped entities. Use
cursor()/with_cursor() instead.
Args:
limit: Maximum number of results to return.
offset: Optional number of results to skip first; default zero.
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
A list of db.Model instances. There may be fewer than 'limit'
results if there aren't enough results to satisfy the request.
"""
if limit is None:
kwargs.setdefault("batch_size", datastore._MAX_INT_32)
return list(self.run(limit=limit, offset=offset, **kwargs))
def index_list(self):
"""Get the index list for an already executed query.
Returns:
A list of indexes used by the query.
Raises:
AssertionError: If the query has not been executed.
"""
if self._last_raw_query is None:
raise AssertionError("No index list because query has not been run.")
if self._last_index_list is None:
raw_index_list = self._last_raw_query.GetIndexList()
self._last_index_list = [
_index_converter(raw_index) for raw_index in raw_index_list
]
return self._last_index_list
def cursor(self):
"""Get a serialized cursor for an already executed query.
The returned cursor effectively lets a future invocation of a similar
query to begin fetching results immediately after the last returned
result from this query invocation.
Returns:
A base64-encoded serialized cursor.
Raises:
AssertionError: If the query has not been executed.
"""
if self._last_raw_query is None:
raise AssertionError("No cursor available.")
cursor = self._last_raw_query.GetCursor()
return websafe_encode_cursor(cursor)
def with_cursor(self, start_cursor=None, end_cursor=None):
"""Set the start and end of this query using serialized cursors.
Conceptually cursors point to the position between the last result returned
and the next result so running a query with each of the following cursors
combinations will return all results in four chunks with no duplicate
results:
query.with_cursor(end_cursor=cursor1)
query.with_cursors(cursor1, cursor2)
query.with_cursors(cursor2, cursor3)
query.with_cursors(start_cursor=cursor3)
For example if the cursors pointed to:
cursor: 1 2 3
result: a b c d e f g h
The results returned by these queries would be [a, b], [c, d], [e, f],
[g, h] respectively.
Cursors are pinned to the position just after the previous result (last
result, exclusive), so if results are inserted or deleted between the time
the cursor was made and these queries are executed, the cursors stay pinned
to these positions. For example:
delete(b, f, g, h)
put(a1, b1, c1, d1)
cursor: 1(b) 2(d) 3(f)
result: a a1 b1 c c1 d d1 e
The results returned by these queries would now be: [a, a1], [b1, c, c1, d],
[d1, e], [] respectively.
Args:
start_cursor: The cursor position at which to start or None
end_cursor: The cursor position at which to end or None
Returns:
This Query instance, for chaining.
Raises:
BadValueError when cursor is not valid.
"""
if start_cursor is None:
self._cursor = None
else:
self._cursor = websafe_decode_cursor(start_cursor)
if end_cursor is None:
self._end_cursor = None
else:
self._end_cursor = websafe_decode_cursor(end_cursor)
return self
def __getitem__(self, arg):
"""Support for query[index] and query[start:stop].
Beware: this ignores the LIMIT clause on GQL queries.
Args:
arg: Either a single integer, corresponding to the query[index]
syntax, or a Python slice object, corresponding to the
query[start:stop] or query[start:stop:step] syntax.
Returns:
A single Model instance when the argument is a single integer.
A list of Model instances when the argument is a slice.
"""
if isinstance(arg, slice):
start, stop, step = arg.start, arg.stop, arg.step
if start is None:
start = 0
if stop is None:
raise ValueError("Open-ended slices are not supported")
if step is None:
step = 1
if start < 0 or stop < 0 or step != 1:
raise ValueError(
"Only slices with start>=0, stop>=0, step==1 are supported"
)
limit = stop - start
if limit < 0:
return []
return self.fetch(limit, start)
elif isinstance(arg, int):
if arg < 0:
raise ValueError("Only indices >= 0 are supported")
results = self.fetch(1, arg)
if results:
return results[0]
else:
raise IndexError("The query returned fewer than %d results" % (arg + 1))
else:
raise TypeError("Only integer indices and slices are supported")
class _QueryIterator(object):
"""Wraps the datastore iterator to return Model instances.
The datastore returns entities. We wrap the datastore iterator to
return Model instances instead.
"""
def __init__(self, model_class, datastore_iterator):
"""Iterator constructor
Args:
model_class: Model class from which entities are constructed.
datastore_iterator: Underlying datastore iterator.
"""
self.__model_class = model_class
self.__iterator = datastore_iterator
def __iter__(self):
"""Iterator on self.
Returns:
Self.
"""
return self
def __next__(self):
"""Get next Model instance in query results.
Returns:
Next model instance.
Raises:
StopIteration when there are no more results in query.
"""
if self.__model_class is not None:
return self.__model_class.from_entity(next(self.__iterator))
else:
while True:
entity = next(self.__iterator)
try:
model_class = class_for_kind(entity.kind())
except KindError:
if datastore_types.RESERVED_PROPERTY_NAME.match(entity.kind()):
continue
raise
else:
return model_class.from_entity(entity)
def _normalize_query_parameter(value):
"""Make any necessary type conversions to a query parameter.
The following conversions are made:
- Model instances are converted to Key instances. This is necessary so
that querying reference properties will work.
- datetime.date objects are converted to datetime.datetime objects (see
_date_to_datetime for details on this conversion). This is necessary so
that querying date properties with date objects will work.
- datetime.time objects are converted to datetime.datetime objects (see
_time_to_datetime for details on this conversion). This is necessary so
that querying time properties with time objects will work.
Args:
value: The query parameter value.
Returns:
The input value, or a converted value if value matches one of the
conversions specified above.
"""
if isinstance(value, Model):
value = value.key()
if isinstance(value, datetime.date) and not isinstance(value, datetime.datetime):
value = _date_to_datetime(value)
elif isinstance(value, datetime.time):
value = _time_to_datetime(value)
return value
class Query(_BaseQuery):
"""A Query instance queries over instances of Models.
You construct a query with a model class, like this:
class Story(db.Model):
title = db.StringProperty()
date = db.DateTimeProperty()
query = Query(Story)
You modify a query with filters and orders like this:
query.filter('title =', 'Foo')
query.order('-date')
query.ancestor(key_or_model_instance)
Every query can return an iterator, so you access the results of a query
by iterating over it:
for story in query:
print story.title
For convenience, all of the filtering and ordering methods return "self",
so the easiest way to use the query interface is to cascade all filters and
orders in the iterator line like this:
for story in Query(story).filter('title =', 'Foo').order('-date'):
print story.title
"""
_keys_only = False
_distinct = False
_projection = None
_namespace = None
_app = None
__ancestor = None
def __init__(
self,
model_class=None,
keys_only=False,
cursor=None,
namespace=None,
_app=None,
distinct=False,
projection=None,
):
"""Constructs a query over instances of the given Model.
Args:
model_class: Model class to build query for.
keys_only: Whether the query should return full entities or only keys.
projection: A tuple of strings representing the property names to include
in the projection this query should produce or None. Setting a
projection is similar to specifying 'SELECT prop1, prop2, ...' in SQL.
See _BaseQuery.projection for details on projection queries.
distinct: A boolean, true if the projection should be distinct.
See _BaseQuery.is_distinct for details on distinct queries.
cursor: A compiled query from which to resume.
namespace: The namespace to use for this query.
"""
super(Query, self).__init__(model_class)
if keys_only:
self._keys_only = True
if projection:
self._projection = projection
if namespace is not None:
self._namespace = namespace
if _app is not None:
self._app = _app
if distinct:
self._distinct = True
self.__query_sets = [{}]
self.__orderings = []
self.with_cursor(cursor)
def is_keys_only(self):
return self._keys_only
def projection(self):
return self._projection
def is_distinct(self):
return self._distinct
def _get_query(
self, _query_class=datastore.Query, _multi_query_class=datastore.MultiQuery
):
queries = []
for query_set in self.__query_sets:
if self._model_class is not None:
kind = self._model_class.kind()
else:
kind = None
query = _query_class(
kind,
query_set,
keys_only=self._keys_only,
projection=self._projection,
distinct=self._distinct,
compile=True,
cursor=self._cursor,
end_cursor=self._end_cursor,
namespace=self._namespace,
_app=self._app,
)
query.Order(*self.__orderings)
if self.__ancestor is not None:
query.Ancestor(self.__ancestor)
queries.append(query)
if (
_query_class != datastore.Query
and _multi_query_class == datastore.MultiQuery
):
warnings.warn(
"Custom _query_class specified without corresponding custom"
" _query_multi_class. Things will break if you use queries with"
' the "IN" or "!=" operators.',
RuntimeWarning,
)
if len(queries) > 1:
raise datastore_errors.BadArgumentError(
"Query requires multiple subqueries to satisfy. If _query_class"
" is overridden, _multi_query_class must also be overridden."
)
elif (
_query_class == datastore.Query
and _multi_query_class != datastore.MultiQuery
):
raise BadArgumentError(
"_query_class must also be overridden if"
" _multi_query_class is overridden."
)
if len(queries) == 1:
return queries[0]
else:
return _multi_query_class(queries, self.__orderings)
def __filter_disjunction(self, operations, values):
"""Add a disjunction of several filters and several values to the query.
This is implemented by duplicating queries and combining the
results later.
Args:
operations: a string or list of strings. Each string contains a
property name and an operator to filter by. The operators
themselves must not require multiple queries to evaluate
(currently, this means that 'in' and '!=' are invalid).
values: a value or list of filter values, normalized by
_normalize_query_parameter.
"""
if not isinstance(operations, (list, tuple)):
operations = [operations]
if not isinstance(values, (list, tuple)):
values = [values]
new_query_sets = []
for operation in operations:
if operation.lower().endswith("in") or operation.endswith("!="):
raise BadQueryError('Cannot use "in" or "!=" in a disjunction.')
for query_set in self.__query_sets:
for value in values:
new_query_set = copy.deepcopy(query_set)
datastore._AddOrAppend(new_query_set, operation, value)
new_query_sets.append(new_query_set)
self.__query_sets = new_query_sets
def filter(self, property_operator, value):
"""Add filter to query.
Args:
property_operator: string with the property and operator to filter by.
value: the filter value.
Returns:
Self to support method chaining.
Raises:
PropertyError if invalid property is provided.
"""
match = _FILTER_REGEX.match(property_operator)
prop = match.group(1)
if match.group(3) is not None:
operator = match.group(3)
else:
operator = "=="
if self._model_class is None:
if prop != datastore_types.KEY_SPECIAL_PROPERTY:
raise BadQueryError(
"Only %s filters are allowed on kindless queries."
% datastore_types.KEY_SPECIAL_PROPERTY
)
elif prop in self._model_class._unindexed_properties:
raise PropertyError("Property '%s' is not indexed" % prop)
if operator.lower() == "in":
if self._keys_only:
raise BadQueryError("Keys only queries do not support IN filters.")
elif not isinstance(value, (list, tuple)):
raise BadValueError('Argument to the "in" operator must be a list')
values = [_normalize_query_parameter(v) for v in value]
self.__filter_disjunction(prop + " =", values)
else:
if isinstance(value, (list, tuple)):
raise BadValueError("Filtering on lists is not supported")
if operator == "!=":
if self._keys_only:
raise BadQueryError("Keys only queries do not support != filters.")
self.__filter_disjunction(
[prop + " <", prop + " >"], _normalize_query_parameter(value)
)
else:
value = _normalize_query_parameter(value)
for query_set in self.__query_sets:
datastore._AddOrAppend(query_set, property_operator, value)
return self
def order(self, property):
"""Set order of query result.
To use descending order, prepend '-' (minus) to the property
name, e.g., '-date' rather than 'date'.
Args:
property: Property to sort on.
Returns:
Self to support method chaining.
Raises:
PropertyError if invalid property is provided.
"""
if property.startswith("-"):
property = property[1:]
order = datastore.Query.DESCENDING
else:
order = datastore.Query.ASCENDING
if self._model_class is None:
if (
property != datastore_types.KEY_SPECIAL_PROPERTY
or order != datastore.Query.ASCENDING
):
raise BadQueryError(
"Only %s ascending orders are supported on kindless queries"
% datastore_types.KEY_SPECIAL_PROPERTY
)
else:
if not issubclass(self._model_class, Expando):
if (
property not in self._model_class._all_properties
and property not in datastore_types._SPECIAL_PROPERTIES
):
raise PropertyError("Invalid property name '%s'" % property)
if property in self._model_class._unindexed_properties:
raise PropertyError("Property '%s' is not indexed" % property)
self.__orderings.append((property, order))
return self
def ancestor(self, ancestor):
"""Sets an ancestor for this query.
This restricts the query to only return results that descend from
a given model instance. In other words, all of the results will
have the ancestor as their parent, or parent's parent, etc. The
ancestor itself is also a possible result!
Args:
ancestor: Model or Key (that has already been saved)
Returns:
Self to support method chaining.
Raises:
TypeError if the argument isn't a Key or Model; NotSavedError
if it is, but isn't saved yet.
"""
if isinstance(ancestor, datastore.Key):
if ancestor.has_id_or_name():
self.__ancestor = ancestor
else:
raise NotSavedError()
elif isinstance(ancestor, Model):
if ancestor.has_key():
self.__ancestor = ancestor.key()
else:
raise NotSavedError()
else:
raise TypeError("ancestor should be Key or Model")
return self
class GqlQuery(_BaseQuery):
"""A Query class that uses GQL query syntax instead of .filter() etc."""
def __init__(self, query_string, *args, **kwds):
"""Constructor.
Args:
query_string: Properly formatted GQL query string.
*args: Positional arguments used to bind numeric references in the query.
**kwds: Dictionary-based arguments for named references.
Raises:
PropertyError if the query filters or sorts on a property that's not
indexed.
"""
from google.appengine.ext import gql
app = kwds.pop("_app", None)
namespace = None
if isinstance(app, tuple):
if len(app) != 2:
raise BadArgumentError("_app must have 2 values if type is tuple.")
app, namespace = app
self._proto_query = gql.GQL(query_string, _app=app, namespace=namespace)
if self._proto_query._kind is not None:
model_class = class_for_kind(self._proto_query._kind)
else:
model_class = None
super(GqlQuery, self).__init__(model_class)
if model_class is not None:
for property, unused in (
list(self._proto_query.filters().keys()) + self._proto_query.orderings()
):
if property in model_class._unindexed_properties:
raise PropertyError("Property '%s' is not indexed" % property)
self.bind(*args, **kwds)
def is_keys_only(self):
return self._proto_query._keys_only
def projection(self):
return self._proto_query.projection()
def is_distinct(self):
return self._proto_query.is_distinct()
def bind(self, *args, **kwds):
"""Bind arguments (positional or keyword) to the query.
Note that you can also pass arguments directly to the query
constructor. Each time you call bind() the previous set of
arguments is replaced with the new set. This is useful because
the hard work in in parsing the query; so if you expect to be
using the same query with different sets of arguments, you should
hold on to the GqlQuery() object and call bind() on it each time.
Args:
*args: Positional arguments used to bind numeric references in the query.
**kwds: Dictionary-based arguments for named references.
"""
self._args = []
for arg in args:
self._args.append(_normalize_query_parameter(arg))
self._kwds = {}
for name, arg in kwds.items():
self._kwds[name] = _normalize_query_parameter(arg)
def run(self, **kwargs):
"""Iterator for this query that handles the LIMIT clause property.
If the GQL query string contains a LIMIT clause, this function fetches
all results before returning an iterator. Otherwise results are retrieved
in batches by the iterator.
Args:
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
Iterator for this query.
"""
if self._proto_query.limit() > 0:
kwargs.setdefault("limit", self._proto_query.limit())
kwargs.setdefault("offset", self._proto_query.offset())
return _BaseQuery.run(self, **kwargs)
def _get_query(self):
return self._proto_query.Bind(
self._args, self._kwds, self._cursor, self._end_cursor
)
class UnindexedProperty(Property):
"""A property that isn't indexed by either built-in or composite indices.
TextProperty and BlobProperty derive from this class.
"""
def __init__(self, *args, **kwds):
"""Construct property. See the Property class for details.
Raises:
ConfigurationError if indexed=True.
"""
self._require_parameter(kwds, "indexed", False)
kwds["indexed"] = True
super(UnindexedProperty, self).__init__(*args, **kwds)
def validate(self, value):
"""Validate property.
Returns:
A valid value.
Raises:
BadValueError if property is not an instance of data_type.
"""
if value is not None and not isinstance(value, self.data_type):
try:
value = self.data_type(value)
except TypeError as err:
raise BadValueError(
"Property %s must be convertible "
"to a %s instance (%s)" % (self.name, self.data_type.__name__, err)
)
value = super(UnindexedProperty, self).validate(value)
if value is not None and not isinstance(value, self.data_type):
raise BadValueError(
"Property %s must be a %s instance"
% (self.name, self.data_type.__name__)
)
return value
class TextProperty(UnindexedProperty):
"""A string that can be longer than 500 bytes."""
data_type = Text
class StringProperty(Property):
"""A textual property, which can be multi- or single-line."""
def __init__(self, verbose_name=None, multiline=False, **kwds):
"""Construct string property.
Args:
verbose_name: Verbose name is always first parameter.
multi-line: Carriage returns permitted in property.
"""
super(StringProperty, self).__init__(verbose_name, **kwds)
self.multiline = multiline
def validate(self, value):
"""Validate string property.
Returns:
A valid value.
Raises:
BadValueError if property is not multi-line but value is.
"""
value = super(StringProperty, self).validate(value)
if value is not None and not isinstance(value, str):
raise BadValueError(
"Property %s must be a str or unicode instance, not a %s"
% (self.name, type(value).__name__)
)
if not self.multiline and value and value.find("\n") != -1:
raise BadValueError("Property %s is not multi-line" % self.name)
if value is not None and len(value) > self.MAX_LENGTH:
raise BadValueError(
"Property %s is %d characters long; it must be %d or less."
% (self.name, len(value), self.MAX_LENGTH)
)
return value
MAX_LENGTH = 500
data_type = str
class _CoercingProperty(Property):
"""A Property subclass that extends validate() to coerce to self.data_type."""
def validate(self, value):
"""Coerce values (except None) to self.data_type.
Args:
value: The value to be validated and coerced.
Returns:
The coerced and validated value. It is guaranteed that this is
either None or an instance of self.data_type; otherwise an exception
is raised.
Raises:
BadValueError if the value could not be validated or coerced.
"""
value = super(_CoercingProperty, self).validate(value)
if value is not None and not isinstance(value, self.data_type):
value = self.data_type(value)
return value
class CategoryProperty(_CoercingProperty):
"""A property whose values are Category instances."""
data_type = Category
class LinkProperty(_CoercingProperty):
"""A property whose values are Link instances."""
def validate(self, value):
value = super(LinkProperty, self).validate(value)
if value is not None:
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(value)
if not scheme or not netloc:
raise BadValueError(
"Property %s must be a full URL ('%s')" % (self.name, value)
)
return value
data_type = Link
URLProperty = LinkProperty
class EmailProperty(_CoercingProperty):
"""A property whose values are Email instances."""
data_type = Email
class GeoPtProperty(_CoercingProperty):
"""A property whose values are GeoPt instances."""
data_type = GeoPt
class IMProperty(_CoercingProperty):
"""A property whose values are IM instances."""
data_type = IM
class PhoneNumberProperty(_CoercingProperty):
"""A property whose values are PhoneNumber instances."""
data_type = PhoneNumber
class PostalAddressProperty(_CoercingProperty):
"""A property whose values are PostalAddress instances."""
data_type = PostalAddress
class BlobProperty(UnindexedProperty):
"""A byte string that can be longer than 500 bytes."""
data_type = Blob
class ByteStringProperty(Property):
"""A short (<=500 bytes) byte string.
This type should be used for short binary values that need to be indexed. If
you do not require indexing (regardless of length), use BlobProperty instead.
"""
def validate(self, value):
"""Validate ByteString property.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'ByteString'.
"""
if value is not None and not isinstance(value, ByteString):
try:
value = ByteString(value)
except TypeError as err:
raise BadValueError(
"Property %s must be convertible "
"to a ByteString instance (%s)" % (self.name, err)
)
value = super(ByteStringProperty, self).validate(value)
if value is not None and not isinstance(value, ByteString):
raise BadValueError("Property %s must be a ByteString instance" % self.name)
if value is not None and len(value) > self.MAX_LENGTH:
raise BadValueError(
"Property %s is %d bytes long; it must be %d or less."
% (self.name, len(value), self.MAX_LENGTH)
)
return value
MAX_LENGTH = 500
data_type = ByteString
class DateTimeProperty(Property):
"""The base class of all of our date/time properties.
We handle common operations, like converting between time tuples and
datetime instances.
"""
def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, **kwds):
"""Construct a DateTimeProperty
Args:
verbose_name: Verbose name is always first parameter.
auto_now: Date/time property is updated with the current time every time
it is saved to the datastore. Useful for properties that want to track
the modification time of an instance.
auto_now_add: Date/time is set to the when its instance is created.
Useful for properties that record the creation time of an entity.
"""
super(DateTimeProperty, self).__init__(verbose_name, **kwds)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
def validate(self, value):
"""Validate datetime.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'datetime'.
"""
value = super(DateTimeProperty, self).validate(value)
if value and not isinstance(value, self.data_type):
raise BadValueError(
"Property %s must be a %s, but was %r"
% (self.name, self.data_type.__name__, value)
)
return value
def default_value(self):
"""Default value for datetime.
Returns:
value of now() as appropriate to the date-time instance if auto_now
or auto_now_add is set, else user configured default value implementation.
"""
if self.auto_now or self.auto_now_add:
return self.now()
return Property.default_value(self)
def get_updated_value_for_datastore(self, model_instance):
"""Get new value for property to send to datastore.
Returns:
now() as appropriate to the date-time instance in the odd case where
auto_now is set to True, else AUTO_UPDATE_UNCHANGED.
"""
if self.auto_now:
return self.now()
return AUTO_UPDATE_UNCHANGED
data_type = datetime.datetime
@staticmethod
def now():
"""Get now as a full datetime value.
Returns:
'now' as a whole timestamp, including both time and date.
"""
return datetime.datetime.now()
def _date_to_datetime(value):
"""Convert a date to a datetime for datastore storage.
Args:
value: A datetime.date object.
Returns:
A datetime object with time set to 0:00.
"""
assert isinstance(value, datetime.date)
return datetime.datetime(value.year, value.month, value.day)
def _time_to_datetime(value):
"""Convert a time to a datetime for datastore storage.
Args:
value: A datetime.time object.
Returns:
A datetime object with date set to 1970-01-01.
"""
assert isinstance(value, datetime.time)
return datetime.datetime(
1970, 1, 1, value.hour, value.minute, value.second, value.microsecond
)
class DateProperty(DateTimeProperty):
"""A date property, which stores a date without a time."""
@staticmethod
def now():
"""Get now as a date datetime value.
Returns:
'date' part of 'now' only.
"""
return datetime.datetime.now().date()
def validate(self, value):
"""Validate date.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'date',
or if it is an instance of 'datetime' (which is a subclass
of 'date', but for all practical purposes a different type).
"""
value = super(DateProperty, self).validate(value)
if isinstance(value, datetime.datetime):
raise BadValueError(
"Property %s must be a %s, not a datetime"
% (self.name, self.data_type.__name__)
)
return value
def get_updated_value_for_datastore(self, model_instance):
"""Get new value for property to send to datastore.
Returns:
now() as appropriate to the date instance in the odd case where
auto_now is set to True, else AUTO_UPDATE_UNCHANGED.
"""
if self.auto_now:
return _date_to_datetime(self.now())
return AUTO_UPDATE_UNCHANGED
def get_value_for_datastore(self, model_instance):
"""Get value from property to send to datastore.
We retrieve a datetime.date from the model instance and return a
datetime.datetime instance with the time set to zero.
See base class method documentation for details.
"""
value = super(DateProperty, self).get_value_for_datastore(model_instance)
if value is not None:
assert isinstance(value, datetime.date)
value = _date_to_datetime(value)
return value
def make_value_from_datastore(self, value):
"""Native representation of this property.
We receive a datetime.datetime retrieved from the entity and return
a datetime.date instance representing its date portion.
See base class method documentation for details.
"""
if value is not None:
assert isinstance(value, datetime.datetime)
value = value.date()
return value
data_type = datetime.date
class TimeProperty(DateTimeProperty):
"""A time property, which stores a time without a date."""
@staticmethod
def now():
"""Get now as a time datetime value.
Returns:
'time' part of 'now' only.
"""
return datetime.datetime.now().time()
def empty(self, value):
"""Is time property empty.
"0:0" (midnight) is not an empty value.
Returns:
True if value is None, else False.
"""
return value is None
def get_updated_value_for_datastore(self, model_instance):
"""Get new value for property to send to datastore.
Returns:
now() as appropriate to the time instance in the odd case where
auto_now is set to True, else AUTO_UPDATE_UNCHANGED.
"""
if self.auto_now:
return _time_to_datetime(self.now())
return AUTO_UPDATE_UNCHANGED
def get_value_for_datastore(self, model_instance):
"""Get value from property to send to datastore.
We retrieve a datetime.time from the model instance and return a
datetime.datetime instance with the date set to 1/1/1970.
See base class method documentation for details.
"""
value = super(TimeProperty, self).get_value_for_datastore(model_instance)
if value is not None:
assert isinstance(value, datetime.time), repr(value)
value = _time_to_datetime(value)
return value
def make_value_from_datastore(self, value):
"""Native representation of this property.
We receive a datetime.datetime retrieved from the entity and return
a datetime.date instance representing its time portion.
See base class method documentation for details.
"""
if value is not None:
assert isinstance(value, datetime.datetime)
value = value.time()
return value
data_type = datetime.time
class IntegerProperty(Property):
"""An integer property."""
def validate(self, value):
"""Validate integer property.
Returns:
A valid value.
Raises:
BadValueError if value is not an integer or long instance.
"""
value = super(IntegerProperty, self).validate(value)
if value is None:
return value
if not isinstance(value, int) or isinstance(value, bool):
raise BadValueError(
"Property %s must be an int or long, not a %s"
% (self.name, type(value).__name__)
)
if value < -0x8000000000000000 or value > 0x7FFFFFFFFFFFFFFF:
raise BadValueError("Property %s must fit in 64 bits" % self.name)
return value
data_type = int
def empty(self, value):
"""Is integer property empty.
0 is not an empty value.
Returns:
True if value is None, else False.
"""
return value is None
class RatingProperty(_CoercingProperty, IntegerProperty):
"""A property whose values are Rating instances."""
data_type = Rating
class FloatProperty(Property):
"""A float property."""
def validate(self, value):
"""Validate float.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'float'.
"""
value = super(FloatProperty, self).validate(value)
if value is not None and not isinstance(value, float):
raise BadValueError("Property %s must be a float" % self.name)
return value
data_type = float
def empty(self, value):
"""Is float property empty.
0.0 is not an empty value.
Returns:
True if value is None, else False.
"""
return value is None
class BooleanProperty(Property):
"""A boolean property."""
def validate(self, value):
"""Validate boolean.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'bool'.
"""
value = super(BooleanProperty, self).validate(value)
if value is not None and not isinstance(value, bool):
raise BadValueError("Property %s must be a bool" % self.name)
return value
data_type = bool
def empty(self, value):
"""Is boolean property empty.
False is not an empty value.
Returns:
True if value is None, else False.
"""
return value is None
class UserProperty(Property):
"""A user property."""
def __init__(
self,
verbose_name=None,
name=None,
required=False,
validator=None,
choices=None,
auto_current_user=False,
auto_current_user_add=False,
indexed=True,
):
"""Initializes this Property with the given options.
Note: this does *not* support the 'default' keyword argument.
Use auto_current_user_add=True instead.
Args:
verbose_name: User friendly name of property.
name: Storage name for property. By default, uses attribute name
as it is assigned in the Model sub-class.
required: Whether property is required.
validator: User provided method used for validation.
choices: User provided set of valid property values.
auto_current_user: If true, the value is set to the current user
each time the entity is written to the datastore.
auto_current_user_add: If true, the value is set to the current user
the first time the entity is written to the datastore.
indexed: Whether property is indexed.
"""
super(UserProperty, self).__init__(
verbose_name,
name,
required=required,
validator=validator,
choices=choices,
indexed=indexed,
)
self.auto_current_user = auto_current_user
self.auto_current_user_add = auto_current_user_add
def validate(self, value):
"""Validate user.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'User'.
"""
value = super(UserProperty, self).validate(value)
if value is not None and not isinstance(value, users.User):
raise BadValueError("Property %s must be a User" % self.name)
return value
def default_value(self):
"""Default value for user.
Returns:
Value of users.get_current_user() if auto_current_user or
auto_current_user_add is set; else None. (But *not* the default
implementation, since we don't support the 'default' keyword
argument.)
"""
if self.auto_current_user or self.auto_current_user_add:
return users.get_current_user()
return None
def get_updated_value_for_datastore(self, model_instance):
"""Get new value for property to send to datastore.
Returns:
Value of users.get_current_user() if auto_current_user is set;
else AUTO_UPDATE_UNCHANGED.
"""
if self.auto_current_user:
return users.get_current_user()
return AUTO_UPDATE_UNCHANGED
data_type = users.User
class ListProperty(Property):
"""A property that stores a list of things.
This is a parameterized property; the parameter must be a valid
non-list data type, and all items must conform to this type.
"""
def __init__(self, item_type, verbose_name=None, default=None, **kwds):
"""Construct ListProperty.
Args:
item_type: Type for the list items; must be one of the allowed property
types.
verbose_name: Optional verbose name.
default: Optional default value; if omitted, an empty list is used.
**kwds: Optional additional keyword arguments, passed to base class.
Note that the only permissible value for 'required' is True.
"""
if item_type is str:
item_type = str
if not isinstance(item_type, type):
raise TypeError("Item type should be a type object")
if item_type not in _ALLOWED_PROPERTY_TYPES:
raise ValueError("Item type %s is not acceptable" % item_type.__name__)
if issubclass(item_type, (Blob, Text)):
self._require_parameter(kwds, "indexed", False)
kwds["indexed"] = True
self._require_parameter(kwds, "required", True)
if default is None:
default = []
self.item_type = item_type
super(ListProperty, self).__init__(verbose_name, default=default, **kwds)
def validate(self, value):
"""Validate list.
Returns:
A valid value.
Raises:
BadValueError if property is not a list whose items are instances of
the item_type given to the constructor.
"""
value = super(ListProperty, self).validate(value)
if value is not None:
if not isinstance(value, list):
raise BadValueError("Property %s must be a list" % self.name)
value = self.validate_list_contents(value)
return value
def _load(self, model_instance, value):
if not isinstance(value, list):
value = [value]
return super(ListProperty, self)._load(model_instance, value)
def validate_list_contents(self, value):
"""Validates that all items in the list are of the correct type.
Returns:
The validated list.
Raises:
BadValueError if the list has items are not instances of the
item_type given to the constructor.
"""
if self.item_type in (int, int):
item_type = (int, int)
else:
item_type = self.item_type
for item in value:
if not isinstance(item, item_type):
if item_type == (int, int):
raise BadValueError(
"Items in the %s list must all be integers." % self.name
)
else:
raise BadValueError(
"Items in the %s list must all be %s instances"
% (self.name, self.item_type.__name__)
)
return value
def empty(self, value):
"""Is list property empty.
[] is not an empty value.
Returns:
True if value is None, else false.
"""
return value is None
data_type = list
def default_value(self):
"""Default value for list.
Because the property supplied to 'default' is a static value,
that value must be shallow copied to prevent all fields with
default values from sharing the same instance.
Returns:
Copy of the default value.
"""
return list(super(ListProperty, self).default_value())
def get_value_for_datastore(self, model_instance):
"""Get value from property to send to datastore.
Returns:
validated list appropriate to save in the datastore.
"""
value = super(ListProperty, self).get_value_for_datastore(model_instance)
if not value:
return value
value = self.validate_list_contents(value)
if self.validator:
self.validator(value)
if self.item_type == datetime.date:
value = list(map(_date_to_datetime, value))
elif self.item_type == datetime.time:
value = list(map(_time_to_datetime, value))
return value
def make_value_from_datastore(self, value):
"""Native representation of this property.
If this list is a list of datetime.date or datetime.time, we convert
the list of datetime.datetime retrieved from the entity into
datetime.date or datetime.time.
See base class method documentation for details.
"""
if self.item_type == datetime.date:
for v in value:
assert isinstance(v, datetime.datetime)
value = [x.date() for x in value]
elif self.item_type == datetime.time:
for v in value:
assert isinstance(v, datetime.datetime)
value = [x.time() for x in value]
return value
def make_value_from_datastore_index_value(self, index_value):
value = [datastore_types.RestoreFromIndexValue(index_value, self.item_type)]
return self.make_value_from_datastore(value)
class StringListProperty(ListProperty):
"""A property that stores a list of strings.
A shorthand for the most common type of ListProperty.
"""
def __init__(self, verbose_name=None, default=None, **kwds):
"""Construct StringListProperty.
Args:
verbose_name: Optional verbose name.
default: Optional default value; if omitted, an empty list is used.
**kwds: Optional additional keyword arguments, passed to ListProperty().
"""
super(StringListProperty, self).__init__(
str, verbose_name=verbose_name, default=default, **kwds
)
class ReferenceProperty(Property):
"""A property that represents a many-to-one reference to another model.
For example, a reference property in model A that refers to model B forms
a many-to-one relationship from A to B: every instance of A refers to a
single B instance, and every B instance can have many A instances refer
to it.
"""
def __init__(
self, reference_class=None, verbose_name=None, collection_name=None, **attrs
):
"""Construct ReferenceProperty.
Args:
reference_class: Which model class this property references.
verbose_name: User friendly name of property.
collection_name: If provided, alternate name of collection on
reference_class to store back references. Use this to allow
a Model to have multiple fields which refer to the same class.
"""
super(ReferenceProperty, self).__init__(verbose_name, **attrs)
self.collection_name = collection_name
if reference_class is None:
reference_class = Model
if not (
(isinstance(reference_class, type) and issubclass(reference_class, Model))
or reference_class is _SELF_REFERENCE
):
raise KindError("reference_class must be Model or _SELF_REFERENCE")
self.reference_class = self.data_type = reference_class
def make_value_from_datastore_index_value(self, index_value):
value = datastore_types.RestoreFromIndexValue(index_value, Key)
return self.make_value_from_datastore(value)
def __property_config__(self, model_class, property_name):
"""Loads all of the references that point to this model.
We need to do this to create the ReverseReferenceProperty properties for
this model and create the <reference>_set attributes on the referenced
model, e.g.:
class Story(db.Model):
title = db.StringProperty()
class Comment(db.Model):
story = db.ReferenceProperty(Story)
story = Story.get(id)
print [c for c in story.comment_set]
In this example, the comment_set property was created based on the reference
from Comment to Story (which is inherently one to many).
Args:
model_class: Model class which will have its reference properties
initialized.
property_name: Name of property being configured.
Raises:
DuplicatePropertyError if referenced class already has the provided
collection name as a property.
"""
super(ReferenceProperty, self).__property_config__(model_class, property_name)
if self.reference_class is _SELF_REFERENCE:
self.reference_class = self.data_type = model_class
if self.collection_name is None:
self.collection_name = "%s_set" % (model_class.__name__.lower())
existing_prop = getattr(self.reference_class, self.collection_name, None)
if existing_prop is not None:
if not (
isinstance(existing_prop, _ReverseReferenceProperty)
and existing_prop._prop_name == property_name
and existing_prop._model.__name__ == model_class.__name__
and existing_prop._model.__module__ == model_class.__module__
):
raise DuplicatePropertyError(
"Class %s already has property %s "
% (self.reference_class.__name__, self.collection_name)
)
setattr(
self.reference_class,
self.collection_name,
_ReverseReferenceProperty(model_class, property_name),
)
def __get__(self, model_instance, model_class):
"""Get reference object.
This method will fetch unresolved entities from the datastore if
they are not already loaded.
Returns:
ReferenceProperty to Model object if property is set, else None.
Raises:
ReferencePropertyResolveError: if the referenced model does not exist.
"""
if model_instance is None:
return self
if hasattr(model_instance, self.__id_attr_name()):
reference_id = getattr(model_instance, self.__id_attr_name())
else:
reference_id = None
if reference_id is not None:
resolved = getattr(model_instance, self.__resolved_attr_name())
if resolved is not None:
return resolved
else:
instance = get(reference_id)
if instance is None:
raise ReferencePropertyResolveError(
"ReferenceProperty failed to be resolved: %s"
% reference_id.to_path()
)
setattr(model_instance, self.__resolved_attr_name(), instance)
return instance
else:
return None
def __set__(self, model_instance, value):
"""Set reference."""
value = self.validate(value)
if value is not None:
if isinstance(value, datastore.Key):
setattr(model_instance, self.__id_attr_name(), value)
setattr(model_instance, self.__resolved_attr_name(), None)
else:
setattr(model_instance, self.__id_attr_name(), value.key())
setattr(model_instance, self.__resolved_attr_name(), value)
else:
setattr(model_instance, self.__id_attr_name(), None)
setattr(model_instance, self.__resolved_attr_name(), None)
def get_value_for_datastore(self, model_instance):
"""Get key of reference rather than reference itself."""
return getattr(model_instance, self.__id_attr_name())
def validate(self, value):
"""Validate reference.
Returns:
A valid value.
Raises:
BadValueError for the following reasons:
- Value is not saved.
- Object not of correct model type for reference.
"""
if isinstance(value, datastore.Key):
return value
if value is not None and not value.has_key():
raise BadValueError(
"%s instance must have a complete key before it can be stored as a "
"reference" % self.reference_class.kind()
)
value = super(ReferenceProperty, self).validate(value)
if value is not None and not isinstance(value, self.reference_class):
raise KindError(
"Property %s must be an instance of %s"
% (self.name, self.reference_class.kind())
)
return value
def __id_attr_name(self):
"""Get attribute of referenced id.
Returns:
Attribute where to store id of referenced entity.
"""
return self._attr_name()
def __resolved_attr_name(self):
"""Get attribute of resolved attribute.
The resolved attribute is where the actual loaded reference instance is
stored on the referring model instance.
Returns:
Attribute name of where to store resolved reference model instance.
"""
return "_RESOLVED" + self._attr_name()
Reference = ReferenceProperty
def SelfReferenceProperty(verbose_name=None, collection_name=None, **attrs):
"""Create a self reference.
Function for declaring a self referencing property on a model.
Example:
class HtmlNode(db.Model):
parent = db.SelfReferenceProperty('Parent', 'children')
Args:
verbose_name: User friendly name of property.
collection_name: Name of collection on model.
Raises:
ConfigurationError if reference_class provided as parameter.
"""
if "reference_class" in attrs:
raise ConfigurationError("Do not provide reference_class to self-reference.")
return ReferenceProperty(_SELF_REFERENCE, verbose_name, collection_name, **attrs)
SelfReference = SelfReferenceProperty
class _ReverseReferenceProperty(Property):
"""The inverse of the Reference property above.
We construct reverse references automatically for the model to which
the Reference property is pointing to create the one-to-many property for
that model. For example, if you put a Reference property in model A that
refers to model B, we automatically create a _ReverseReference property in
B called a_set that can fetch all of the model A instances that refer to
that instance of model B.
"""
def __init__(self, model, prop):
"""Constructor for reverse reference.
Constructor does not take standard values of other property types.
Args:
model: Model class that this property is a collection of.
property: Name of foreign property on referred model that points back
to this properties entity.
"""
self.__model = model
self.__property = prop
@property
def _model(self):
"""Internal helper to access the model class, read-only."""
return self.__model
@property
def _prop_name(self):
"""Internal helper to access the property name, read-only."""
return self.__property
def __get__(self, model_instance, model_class):
"""Fetches collection of model instances of this collection property."""
if model_instance is not None:
query = Query(self.__model)
return query.filter(self.__property + " =", model_instance.key())
else:
return self
def __set__(self, model_instance, value):
"""Not possible to set a new collection."""
raise BadValueError("Virtual property is read-only")
class ComputedProperty(Property):
"""Property used for creating properties derived from other values.
Certain attributes should never be set by users but automatically
calculated at run-time from other values of the same entity. These
values are implemented as persistent properties because they provide
useful search keys.
A computed property behaves the same as normal properties except that
you may not set values on them. Attempting to do so raises
db.DerivedPropertyError which db.Model knows to ignore during entity
loading time. Whenever getattr is used for the property
the value is recaclulated. This happens when the model calls
get_value_for_datastore on the property.
Example:
import string
class Person(Model):
name = StringProperty(required=True)
@db.ComputedProperty
def lower_case_name(self):
return self.name.lower()
# Find all people regardless of case used in name.
Person.gql('WHERE lower_case_name=:1' % name_to_search_for.lower())
"""
def __init__(self, value_function, indexed=True):
"""Constructor.
Args:
value_function: Callable f(model_instance) -> value used to derive
persistent property value for storage in datastore.
indexed: Whether or not the attribute should be indexed.
"""
super(ComputedProperty, self).__init__(indexed=indexed)
self.__value_function = value_function
def __set__(self, *args):
"""Disallow setting this value.
Raises:
DerivedPropertyError when developer attempts to set attribute manually.
Model knows to ignore this exception when getting from datastore.
"""
raise DerivedPropertyError("Computed property %s cannot be set." % self.name)
def __get__(self, model_instance, model_class):
"""Derive property value.
Args:
model_instance: Instance to derive property for in bound method case,
else None.
model_class: Model class associated with this property descriptor.
Returns:
Result of calling self.__value_funcion as provided by property
constructor.
"""
if model_instance is None:
return self
return self.__value_function(model_instance)
def to_dict(model_instance, dictionary=None):
"""Convert model to dictionary.
Args:
model_instance: Model instance for which to make dictionary.
dictionary: dict instance or compatible to receive model values.
The dictionary is not cleared of original values. Similar to using
dictionary.update. If dictionary is None, a new dictionary instance is
created and returned.
Returns:
New dictionary appropriate populated with model instances values
if entity is None, else entity.
"""
if dictionary is None:
dictionary = {}
model_instance._to_entity(dictionary)
return dictionary
run_in_transaction = datastore.RunInTransaction
run_in_transaction_custom_retries = datastore.RunInTransactionCustomRetries
run_in_transaction_options = datastore.RunInTransactionOptions
RunInTransaction = run_in_transaction
RunInTransactionCustomRetries = run_in_transaction_custom_retries
websafe_encode_cursor = datastore_query.Cursor.to_websafe_string
websafe_decode_cursor = datastore_query.Cursor.from_websafe_string
is_in_transaction = datastore.IsInTransaction
transactional = datastore.Transactional
non_transactional = datastore.NonTransactional
create_config = datastore.CreateConfig
create_transaction_options = datastore.CreateTransactionOptions
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Status page handler for mapreduce framework."""
import os
import time
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
from google.appengine.ext import db
from google.appengine.ext.mapreduce import base_handler
from google.appengine.ext.mapreduce import errors
from google.appengine.ext.mapreduce import model
MR_YAML_NAMES = ["mapreduce.yaml", "mapreduce.yml"]
class BadStatusParameterError(Exception):
"""A parameter passed to a status handler was invalid."""
class UserParam(validation.Validated):
"""A user-supplied parameter to a mapreduce job."""
ATTRIBUTES = {
"name": r"[a-zA-Z0-9_\.]+",
"default": validation.Optional(r".*"),
"value": validation.Optional(r".*"),
}
class MapperInfo(validation.Validated):
"""Configuration parameters for the mapper part of the job."""
ATTRIBUTES = {
"handler": r".+",
"input_reader": r".+",
"output_writer": validation.Optional(r".+"),
"params": validation.Optional(validation.Repeated(UserParam)),
"params_validator": validation.Optional(r".+"),
}
class MapreduceInfo(validation.Validated):
"""Mapreduce description in mapreduce.yaml."""
ATTRIBUTES = {
"name": r".+",
"mapper": MapperInfo,
"params": validation.Optional(validation.Repeated(UserParam)),
"params_validator": validation.Optional(r".+"),
}
class MapReduceYaml(validation.Validated):
"""Root class for mapreduce.yaml.
File format:
mapreduce:
- name: <mapreduce_name>
mapper:
- input_reader: google.appengine.ext.mapreduce.DatastoreInputReader
- handler: path_to_my.MapperFunction
- params:
- name: foo
default: bar
- name: blah
default: stuff
- params_validator: path_to_my.ValidatorFunction
Where
mapreduce_name: The name of the mapreduce. Used for UI purposes.
mapper_handler_spec: Full <module_name>.<function_name/class_name> of
mapper handler. See MapreduceSpec class documentation for full handler
specification.
input_reader: Full <module_name>.<function_name/class_name> of the
InputReader sub-class to use for the mapper job.
params: A list of optional parameter names and optional default values
that may be supplied or overridden by the user running the job.
params_validator is full <module_name>.<function_name/class_name> of
a callable to validate the mapper_params after they are input by the
user running the job.
"""
ATTRIBUTES = {"mapreduce": validation.Optional(validation.Repeated(MapreduceInfo))}
@staticmethod
def to_dict(mapreduce_yaml):
"""Converts a MapReduceYaml file into a JSON-encodable dictionary.
For use in user-visible UI and internal methods for interfacing with
user code (like param validation). as a list
Args:
mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.
Returns:
A list of configuration dictionaries.
"""
all_configs = []
for config in mapreduce_yaml.mapreduce:
out = {
"name": config.name,
"mapper_input_reader": config.mapper.input_reader,
"mapper_handler": config.mapper.handler,
}
if config.mapper.params_validator:
out["mapper_params_validator"] = config.mapper.params_validator
if config.mapper.params:
param_defaults = {}
for param in config.mapper.params:
param_defaults[param.name] = param.default or param.value
out["mapper_params"] = param_defaults
if config.params:
param_defaults = {}
for param in config.params:
param_defaults[param.name] = param.default or param.value
out["params"] = param_defaults
if config.mapper.output_writer:
out["mapper_output_writer"] = config.mapper.output_writer
all_configs.append(out)
return all_configs
def find_mapreduce_yaml(status_file=__file__):
"""Traverse directory trees to find mapreduce.yaml file.
Begins with the location of status.py and then moves on to check the working
directory.
Args:
status_file: location of status.py, overridable for testing purposes.
Returns:
the path of mapreduce.yaml file or None if not found.
"""
checked = set()
yaml = _find_mapreduce_yaml(os.path.dirname(status_file), checked)
if not yaml:
yaml = _find_mapreduce_yaml(os.getcwd(), checked)
return yaml
def _find_mapreduce_yaml(start, checked):
"""Traverse the directory tree identified by start until a directory already
in checked is encountered or the path of mapreduce.yaml is found.
Checked is present both to make loop termination easy to reason about and so
that the same directories do not get rechecked.
Args:
start: the path to start in and work upward from
checked: the set of already examined directories
Returns:
the path of mapreduce.yaml file or None if not found.
"""
dir = start
while dir not in checked:
checked.add(dir)
for mr_yaml_name in MR_YAML_NAMES:
yaml_path = os.path.join(dir, mr_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
dir = os.path.dirname(dir)
return None
def parse_mapreduce_yaml(contents):
"""Parses mapreduce.yaml file contents.
Args:
contents: mapreduce.yaml file contents.
Returns:
MapReduceYaml object with all the data from original file.
Raises:
errors.BadYamlError: when contents is not a valid mapreduce.yaml file.
"""
try:
builder = yaml_object.ObjectBuilder(MapReduceYaml)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(contents)
mr_info = handler.GetResults()
except (ValueError, yaml_errors.EventError) as e:
raise errors.BadYamlError(e)
if len(mr_info) < 1:
raise errors.BadYamlError("No configs found in mapreduce.yaml")
if len(mr_info) > 1:
raise errors.MultipleDocumentsInMrYaml("Found %d YAML documents" % len(mr_info))
jobs = mr_info[0]
job_names = set(j.name for j in jobs.mapreduce)
if len(jobs.mapreduce) != len(job_names):
raise errors.BadYamlError("Overlapping mapreduce names; names must be unique")
return jobs
def get_mapreduce_yaml(parse=parse_mapreduce_yaml):
"""Locates mapreduce.yaml, loads and parses its info.
Args:
parse: Used for testing.
Returns:
MapReduceYaml object.
Raises:
errors.BadYamlError: when contents is not a valid mapreduce.yaml file or the
file is missing.
"""
mr_yaml_path = find_mapreduce_yaml()
if not mr_yaml_path:
raise errors.MissingYamlError()
mr_yaml_file = open(mr_yaml_path)
try:
return parse(mr_yaml_file.read())
finally:
mr_yaml_file.close()
class ResourceHandler(base_handler.BaseHandler):
"""Handler for static resources."""
_RESOURCE_MAP = {
"status": ("overview.html", "text/html"),
"detail": ("detail.html", "text/html"),
"base.css": ("base.css", "text/css"),
"jquery.js": ("jquery-1.6.1.min.js", "text/javascript"),
"jquery-json.js": ("jquery.json-2.2.min.js", "text/javascript"),
"status.js": ("status.js", "text/javascript"),
}
def get(self, relative):
if relative not in self._RESOURCE_MAP:
self.response.set_status(404)
self.response.out.write("Resource not found.")
return
real_path, content_type = self._RESOURCE_MAP[relative]
path = os.path.join(os.path.dirname(__file__), "static", real_path)
self.response.headers["Cache-Control"] = "public; max-age=300"
self.response.headers["Content-Type"] = content_type
self.response.out.write(open(path).read())
class ListConfigsHandler(base_handler.GetJsonHandler):
"""Lists mapreduce configs as JSON for users to start jobs."""
def handle(self):
self.json_response["configs"] = MapReduceYaml.to_dict(get_mapreduce_yaml())
class ListJobsHandler(base_handler.GetJsonHandler):
"""Lists running and completed mapreduce jobs for an overview as JSON."""
def handle(self):
cursor = self.request.get("cursor")
count = int(self.request.get("count", "50"))
query = model.MapreduceState.all()
if cursor:
query.filter("__key__ >=", db.Key(cursor))
query.order("__key__")
jobs_list = query.fetch(count + 1)
if len(jobs_list) == (count + 1):
self.json_response["cursor"] = str(jobs_list[-1].key())
jobs_list = jobs_list[:-1]
all_jobs = []
for job in jobs_list:
out = {
"name": job.mapreduce_spec.name,
"mapreduce_id": job.mapreduce_spec.mapreduce_id,
"active": job.active,
"start_timestamp_ms": int(
time.mktime(job.start_time.utctimetuple()) * 1000
),
"updated_timestamp_ms": int(
time.mktime(job.last_poll_time.utctimetuple()) * 1000
),
"chart_url": job.sparkline_url,
"chart_width": job.chart_width,
"active_shards": job.active_shards,
"shards": job.mapreduce_spec.mapper.shard_count,
}
if job.result_status:
out["result_status"] = job.result_status
all_jobs.append(out)
self.json_response["jobs"] = all_jobs
class GetJobDetailHandler(base_handler.GetJsonHandler):
"""Retrieves the details of a mapreduce job as JSON."""
def handle(self):
mapreduce_id = self.request.get("mapreduce_id")
if not mapreduce_id:
raise BadStatusParameterError("'mapreduce_id' was invalid")
job = model.MapreduceState.get_by_key_name(mapreduce_id)
if job is None:
raise KeyError("Could not find job with ID %r" % mapreduce_id)
self.json_response.update(job.mapreduce_spec.to_json())
self.json_response.update(job.counters_map.to_json())
self.json_response.update(
{
"active": job.active,
"start_timestamp_ms": int(
time.mktime(job.start_time.utctimetuple()) * 1000
),
"updated_timestamp_ms": int(
time.mktime(job.last_poll_time.utctimetuple()) * 1000
),
"chart_url": job.chart_url,
"chart_width": job.chart_width,
}
)
self.json_response["result_status"] = job.result_status
shards_list = model.ShardState.find_by_mapreduce_state(job)
all_shards = []
shards_list.sort(key=lambda x: x.shard_number)
for shard in shards_list:
out = {
"active": shard.active,
"result_status": shard.result_status,
"shard_number": shard.shard_number,
"shard_id": shard.shard_id,
"updated_timestamp_ms": int(
time.mktime(shard.update_time.utctimetuple()) * 1000
),
"shard_description": shard.shard_description,
"last_work_item": shard.last_work_item,
}
out.update(shard.counters_map.to_json())
all_shards.append(out)
self.json_response["shards"] = all_shards
<|endoftext|> |
<|endoftext|>#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An extremely simple WSGI web application framework.
This module is an alias for the webapp2 module i.e. the following are
equivalent:
1. from google.appengine.ext import webapp
2. import webapp2 as webapp
It exports three primary classes: Request, Response, and RequestHandler. You
implement a web application by subclassing RequestHandler. As WSGI requests come
in, they are passed to instances of your RequestHandlers. The RequestHandler
class provides access to the easy-to-use Request and Response objects so you can
interpret the request and write the response with no knowledge of the esoteric
WSGI semantics. Here is a simple example:
from google.appengine.ext import webapp
import wsgiref.simple_server
class MainPage(webapp.RequestHandler):
def get(self):
self.response.out.write(
'<html><body><form action="/hello" method="post">'
'Name: <input name="name" type="text" size="20"> '
'<input type="submit" value="Say Hello"></form></body></html>')
class HelloPage(webapp.RequestHandler):
def post(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Hello, %s' % self.request.get('name'))
application = webapp.WSGIApplication([
('/', MainPage),
('/hello', HelloPage)
], debug=True)
The WSGIApplication class maps URI regular expressions to your RequestHandler
classes. It is a WSGI-compatible application object, so you can use it in
conjunction with wsgiref to make your web application into, e.g., a CGI
script or a simple HTTP server, as in the example above.
The framework does not support streaming output. All output from a response
is stored in memory before it is written.
"""
import logging
import os
from google.appengine.api import lib_config
def __django_version_setup():
"""Selects a particular Django version to load."""
django_version = _config_handle.django_version
if django_version is not None:
from google.appengine.dist import use_library
use_library("django", str(django_version))
else:
from google.appengine.dist import _library
version, explicit = _library.installed.get("django", ("0.96", False))
if not explicit:
logging.warn(
"You are using the default Django version (%s). "
"The default Django version will change in an "
"App Engine release in the near future. "
"Please call use_library() to explicitly select a "
"Django version. "
"For more information see %s",
version,
"https://developers.google.com/appengine/docs/python/tools/"
"libraries#Django",
)
try:
import django
if not hasattr(django, "VERSION"):
from django import v0_96
except ImportError:
pass
def _django_setup():
"""Imports and configures Django.
This can be overridden by defining a function named
webapp_django_setup() in the app's appengine_config.py file (see
lib_config docs). Such a function should import and configure
Django.
In the Python 2.5 runtime, you can also just configure the Django version to
be used by setting webapp_django_version in that file.
Finally, calling use_library('django', <version>) in that file
should also work:
# Example taken from from
# https://developers.google.com/appengine/docs/python/tools/libraries#Django
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from google.appengine.dist import use_library
use_library('django', '1.2')
In the Python 2.7 runtime, the Django version is specified in you app.yaml
file and use_library is not supported.
If your application also imports Django directly it should ensure
that the same code is executed before your app imports Django
(directly or indirectly). Perhaps the simplest way to ensure that
is to include the following in your main.py (and in each alternate
main script):
from google.appengine.ext.webapp import template
import django
This will ensure that whatever Django setup code you have included
in appengine_config.py is executed, as a side effect of importing
the webapp.template module.
"""
if os.environ.get("APPENGINE_RUNTIME") != "python27":
__django_version_setup()
import django
import django.conf
try:
raise ImportError
# TODO: Right now the below line raises a
# django.core.exceptions.ImproperlyConfigured exception. Need to investigate
# why and address accordingly.
# getattr(django.conf.settings, 'FAKE_ATTR', None)
except (ImportError, EnvironmentError) as e:
if os.getenv(django.conf.ENVIRONMENT_VARIABLE):
logging.warning(e)
try:
django.conf.settings.configure(
DEBUG=False,
TEMPLATE_DEBUG=False,
TEMPLATE_LOADERS=(
"django.template.loaders.filesystem.load_template_source",
),
)
except (EnvironmentError, RuntimeError):
pass
if os.environ.get("APPENGINE_RUNTIME") == "python27":
_config_handle = lib_config.register(
"webapp",
{
"add_wsgi_middleware": lambda app: app,
},
)
from webapp2 import *
else:
_config_handle = lib_config.register(
"webapp",
{
"django_setup": _django_setup,
"django_version": None,
"add_wsgi_middleware": lambda app: app,
},
)
from google.appengine.ext.webapp._webapp25 import *
from google.appengine.ext.webapp._webapp25 import __doc__
<|endoftext|> |