repo_name
stringlengths 7
65
| path
stringlengths 5
189
| copies
stringclasses 681
values | size
stringlengths 4
7
| content
stringlengths 697
1.02M
| license
stringclasses 14
values |
---|---|---|---|---|---|
mozilla/normandy | normandy/recipes/tests/api/v3/test_shield_identicon.py | 1 | 1396 | import pytest
from normandy.recipes.api.v3.shield_identicon import Genome
@pytest.fixture
def genome():
seed = 123
return Genome(seed)
class TestGenome(object):
"""
Tests the Genome module by setting the seed to a known value and making sure that the
random choices remain consistent, ie. they do not change over time.
"""
def test_weighted_choice(self, genome):
weighted_options = [
{"weight": 1, "value": "apple"},
{"weight": 2, "value": "orange"},
{"weight": 4, "value": "strawberry"},
]
weighted_choice_values = [
genome.weighted_choice(weighted_options),
genome.weighted_choice(weighted_options),
genome.weighted_choice(weighted_options),
]
assert weighted_choice_values == [
{"weight": 1, "value": "apple"},
{"weight": 2, "value": "orange"},
{"weight": 1, "value": "apple"},
]
def test_emoji(self, genome):
emoji_values = [genome.emoji(), genome.emoji(), genome.emoji()]
assert emoji_values == ["๐
", "๐ฏ", "๐"]
def test_color(self, genome):
color_values = [
genome.color().rgb_color,
genome.color().rgb_color,
genome.color().rgb_color,
]
assert color_values == [(7, 54, 66), (255, 207, 0), (88, 110, 117)]
| mpl-2.0 |
mozilla/normandy | normandy/recipes/tests/api/v3/test_api.py | 1 | 84448 | from datetime import timedelta, datetime
from django.conf import settings
from django.db import connection
from django.test.utils import CaptureQueriesContext
import pytest
from rest_framework import serializers
from rest_framework.reverse import reverse
from pathlib import Path
from normandy.base.api.permissions import AdminEnabledOrReadOnly
from normandy.base.tests import UserFactory, Whatever
from normandy.base.utils import canonical_json_dumps
from normandy.recipes.models import ApprovalRequest, Recipe, RecipeRevision
from normandy.recipes import filters as filter_objects
from normandy.recipes.tests import (
ActionFactory,
ApprovalRequestFactory,
ChannelFactory,
CountryFactory,
LocaleFactory,
RecipeFactory,
RecipeRevisionFactory,
fake_sign,
)
@pytest.mark.django_db
class TestActionAPI(object):
def test_it_works(self, api_client):
res = api_client.get("/api/v3/action/")
assert res.status_code == 200
assert res.data == {"count": 0, "next": None, "previous": None, "results": []}
def test_it_serves_actions(self, api_client):
action = ActionFactory(
name="foo", implementation="foobar", arguments_schema={"type": "object"}
)
res = api_client.get("/api/v3/action/")
action_url = reverse(
"recipes:v1:action-implementation",
kwargs={"name": action.name, "impl_hash": action.implementation_hash},
)
assert res.status_code == 200
assert res.data == {
"count": 1,
"next": None,
"previous": None,
"results": [
{
"id": action.id,
"name": "foo",
"implementation_url": Whatever.endswith(action_url),
"arguments_schema": {"type": "object"},
}
],
}
def test_it_serves_actions_without_implementation(self, api_client):
action = ActionFactory(
name="foo-remote", implementation=None, arguments_schema={"type": "object"}
)
res = api_client.get("/api/v3/action/")
assert res.status_code == 200
assert res.data["results"] == [
{
"id": action.id,
"name": "foo-remote",
"implementation_url": None,
"arguments_schema": {"type": "object"},
}
]
def test_list_view_includes_cache_headers(self, api_client):
res = api_client.get("/api/v3/action/")
assert res.status_code == 200
# It isn't important to assert a particular value for max-age
assert "max-age=" in res["Cache-Control"]
assert "public" in res["Cache-Control"]
def test_detail_view_includes_cache_headers(self, api_client):
action = ActionFactory()
res = api_client.get("/api/v3/action/{id}/".format(id=action.id))
assert res.status_code == 200
# It isn't important to assert a particular value for max-age
assert "max-age=" in res["Cache-Control"]
assert "public" in res["Cache-Control"]
def test_list_sets_no_cookies(self, api_client):
res = api_client.get("/api/v3/action/")
assert res.status_code == 200
assert "Cookies" not in res
def test_detail_sets_no_cookies(self, api_client):
action = ActionFactory()
res = api_client.get("/api/v3/action/{id}/".format(id=action.id))
assert res.status_code == 200
assert res.client.cookies == {}
@pytest.mark.django_db
class TestRecipeAPI(object):
@pytest.mark.django_db
class TestListing(object):
def test_it_works(self, api_client):
res = api_client.get("/api/v3/recipe/")
assert res.status_code == 200
assert res.data["results"] == []
def test_it_serves_recipes(self, api_client):
recipe = RecipeFactory()
res = api_client.get("/api/v3/recipe/")
assert res.status_code == 200
assert res.data["results"][0]["latest_revision"]["name"] == recipe.latest_revision.name
def test_available_if_admin_enabled(self, api_client, settings):
settings.ADMIN_ENABLED = True
res = api_client.get("/api/v3/recipe/")
assert res.status_code == 200
assert res.data["results"] == []
def test_readonly_if_admin_disabled(self, api_client, settings):
settings.ADMIN_ENABLED = False
res = api_client.get("/api/v3/recipe/")
assert res.status_code == 200
recipe = RecipeFactory(name="unchanged")
res = api_client.patch("/api/v3/recipe/%s/" % recipe.id, {"name": "changed"})
assert res.status_code == 403
assert res.data["detail"] == AdminEnabledOrReadOnly.message
def test_list_view_includes_cache_headers(self, api_client):
res = api_client.get("/api/v3/recipe/")
assert res.status_code == 200
# It isn't important to assert a particular value for max_age
assert "max-age=" in res["Cache-Control"]
assert "public" in res["Cache-Control"]
def test_list_sets_no_cookies(self, api_client):
res = api_client.get("/api/v3/recipe/")
assert res.status_code == 200
assert "Cookies" not in res
def test_list_can_filter_baseline_recipes(
self, rs_settings, api_client, mocked_remotesettings
):
recipe1 = RecipeFactory(
extra_capabilities=[], approver=UserFactory(), enabler=UserFactory()
)
rs_settings.BASELINE_CAPABILITIES |= recipe1.latest_revision.capabilities
assert recipe1.latest_revision.uses_only_baseline_capabilities()
recipe2 = RecipeFactory(
extra_capabilities=["test-capability"],
approver=UserFactory(),
enabler=UserFactory(),
)
assert not recipe2.latest_revision.uses_only_baseline_capabilities()
# Only approved recipes are considered as part of uses_only_baseline_capabilities
recipe3 = RecipeFactory(extra_capabilities=[])
rs_settings.BASELINE_CAPABILITIES |= recipe3.latest_revision.capabilities
assert recipe3.latest_revision.uses_only_baseline_capabilities()
res = api_client.get("/api/v3/recipe/")
assert res.status_code == 200
assert res.data["count"] == 3
res = api_client.get("/api/v3/recipe/?uses_only_baseline_capabilities=true")
assert res.status_code == 200
assert res.data["count"] == 1
assert res.data["results"][0]["id"] == recipe1.id
def test_list_can_filter_by_filter_object_fields(self, api_client):
locale = LocaleFactory()
recipe1 = RecipeFactory(
filter_object=[
filter_objects.BucketSampleFilter.create(
start=100,
count=200,
total=10_000,
input=["normandy.userId", '"global-v4'],
),
filter_objects.LocaleFilter.create(locales=[locale.code]),
]
)
recipe2 = RecipeFactory(
filter_object=[
filter_objects.PresetFilter.create(name="pocket-1"),
filter_objects.LocaleFilter.create(locales=[locale.code]),
]
)
# All recipes are visible
res = api_client.get("/api/v3/recipe/")
assert res.status_code == 200
assert {r["id"] for r in res.data["results"]} == {recipe1.id, recipe2.id}
# Filters can find simple-strings
res = api_client.get("/api/v3/recipe/?filter_object=name:pocket")
assert res.status_code == 200
assert {r["id"] for r in res.data["results"]} == {recipe2.id}
# Filters can find partial values in lists
res = api_client.get("/api/v3/recipe/?filter_object=input:global-v4")
assert res.status_code == 200
assert {r["id"] for r in res.data["results"]} == {recipe1.id}
# Filters can find multiple results
res = api_client.get(f"/api/v3/recipe/?filter_object=locales:{locale.code}")
assert res.status_code == 200
assert {r["id"] for r in res.data["results"]} == {recipe1.id, recipe2.id}
# Filters can find nothing
res = api_client.get("/api/v3/recipe/?filter_object=doesnt:exist")
assert res.status_code == 200
assert res.data["count"] == 0
def test_list_invalid_filter_by_filter_object(self, api_client):
# The filter is supposed to be of the form
# `key1:val1,key2:val2,...`. What if we don't follow that format?
res = api_client.get("/api/v3/recipe/?filter_object=nocolon")
assert res.status_code == 400
assert res.data == {
"filter_object": "Filters must be of the format `key1:val1,key2:val2,..."
}
def test_list_can_filter_by_filter_object_type(self, api_client):
locale = LocaleFactory()
recipe1 = RecipeFactory(
filter_object=[
filter_objects.BucketSampleFilter.create(
start=100,
count=200,
total=10_000,
input=["normandy.userId", '"global-v4"'],
),
filter_objects.LocaleFilter.create(locales=[locale.code]),
]
)
recipe2 = RecipeFactory(
filter_object=[
filter_objects.PresetFilter.create(name="pocket-1"),
filter_objects.LocaleFilter.create(locales=[locale.code]),
]
)
# All recipes are visible
res = api_client.get("/api/v3/recipe/")
assert res.status_code == 200
assert {r["id"] for r in res.data["results"]} == {recipe1.id, recipe2.id}
# Filters can find types that exist on both
res = api_client.get("/api/v3/recipe/?filter_object=type:locale")
assert res.status_code == 200
assert {r["id"] for r in res.data["results"]} == {recipe1.id, recipe2.id}
# Filters can find a type that exists on just one
res = api_client.get("/api/v3/recipe/?filter_object=type:bucket")
assert res.status_code == 200
assert {r["id"] for r in res.data["results"]} == {recipe1.id}
# Filters can find nothing
res = api_client.get("/api/v3/recipe/?filter_object=type:unused")
assert res.status_code == 200
assert res.data["count"] == 0
@pytest.mark.django_db
class TestCreation(object):
def test_it_can_create_recipes(self, api_client):
action = ActionFactory()
# Enabled recipe
res = api_client.post(
"/api/v3/recipe/",
{
"name": "Test Recipe",
"action_id": action.id,
"arguments": {},
"extra_filter_expression": "whatever",
"enabled": True,
},
)
assert res.status_code == 201, res.json()
recipes = Recipe.objects.all()
assert recipes.count() == 1
def test_it_can_create_recipes_actions_without_implementation(self, api_client):
action = ActionFactory(implementation=None)
assert action.implementation is None
# Enabled recipe
res = api_client.post(
"/api/v3/recipe/",
{
"name": "Test Recipe",
"action_id": action.id,
"arguments": {},
"extra_filter_expression": "whatever",
"enabled": True,
},
)
assert res.status_code == 201
(recipe,) = Recipe.objects.all()
assert recipe.latest_revision.action.implementation is None
def test_it_can_create_disabled_recipes(self, api_client):
action = ActionFactory()
# Disabled recipe
res = api_client.post(
"/api/v3/recipe/",
{
"name": "Test Recipe",
"action_id": action.id,
"arguments": {},
"extra_filter_expression": "whatever",
"enabled": False,
},
)
assert res.status_code == 201
recipes = Recipe.objects.all()
assert recipes.count() == 1
def test_creation_when_action_does_not_exist(self, api_client):
res = api_client.post(
"/api/v3/recipe/", {"name": "Test Recipe", "action_id": 1234, "arguments": {}}
)
assert res.status_code == 400
assert res.json()["action_id"] == [
serializers.PrimaryKeyRelatedField.default_error_messages["does_not_exist"].format(
pk_value=1234
)
]
recipes = Recipe.objects.all()
assert recipes.count() == 0
def test_creation_when_action_id_is_missing(self, api_client):
res = api_client.post("/api/v3/recipe/", {"name": "Test Recipe", "arguments": {}})
assert res.status_code == 400
assert res.json()["action_id"] == [
serializers.PrimaryKeyRelatedField.default_error_messages["required"]
]
recipes = Recipe.objects.all()
assert recipes.count() == 0
def test_creation_when_action_id_is_invalid(self, api_client):
res = api_client.post(
"/api/v3/recipe/",
{"name": "Test Recipe", "action_id": "a string", "arguments": {}},
)
assert res.status_code == 400
assert res.json()["action_id"] == [
serializers.PrimaryKeyRelatedField.default_error_messages["incorrect_type"].format(
data_type="str"
)
]
recipes = Recipe.objects.all()
assert recipes.count() == 0
def test_creation_when_arguments_are_invalid(self, api_client):
action = ActionFactory(
name="foobarbaz",
arguments_schema={
"type": "object",
"properties": {"message": {"type": "string"}},
"required": ["message"],
},
)
res = api_client.post(
"/api/v3/recipe/",
{
"name": "Test Recipe",
"enabled": True,
"extra_filter_expression": "true",
"action_id": action.id,
"arguments": {},
},
)
assert res.status_code == 400
assert res.json()["arguments"]["message"] == "This field is required."
recipes = Recipe.objects.all()
assert recipes.count() == 0
def test_creation_when_arguments_is_missing(self, api_client):
action = ActionFactory(
name="foobarbaz",
arguments_schema={
"type": "object",
"properties": {"message": {"type": "string"}},
"required": ["message"],
},
)
res = api_client.post(
"/api/v3/recipe/",
{
"name": "Test Recipe",
"enabled": True,
"extra_filter_expression": "true",
"action_id": action.id,
},
)
assert res.status_code == 400
assert res.json()["arguments"] == [
serializers.PrimaryKeyRelatedField.default_error_messages["required"]
]
recipes = Recipe.objects.all()
assert recipes.count() == 0
def test_creation_when_arguments_is_a_string(self, api_client):
action = ActionFactory(
name="foobarbaz",
arguments_schema={
"type": "object",
"properties": {"message": {"type": "string"}},
"required": ["message"],
},
)
data = {
"name": "Test Recipe",
"enabled": True,
"extra_filter_expression": "true",
"action_id": action.id,
"arguments": '{"message": "the message"}',
}
res = api_client.post("/api/v3/recipe/", data)
assert res.status_code == 400
assert res.data == {"arguments": ["Must be an object."]}
recipes = Recipe.objects.all()
assert recipes.count() == 0
def test_creation_when_action_id_is_a_string_and_arguments_are_invalid(self, api_client):
action = ActionFactory(
name="foobarbaz",
arguments_schema={
"type": "object",
"properties": {"message": {"type": "string"}},
"required": ["message"],
},
)
data = {
"name": "Test Recipe",
"enabled": True,
"extra_filter_expression": "true",
"action_id": f"{action.id}",
"arguments": {},
}
res = api_client.post("/api/v3/recipe/", data)
assert res.status_code == 400
assert res.data == {"arguments": {"message": "This field is required."}}
recipes = Recipe.objects.all()
assert recipes.count() == 0
def test_creation_when_identicon_seed_is_invalid(self, api_client):
action = ActionFactory()
res = api_client.post(
"/api/v3/recipe/",
{
"name": "Test Recipe",
"action_id": action.id,
"arguments": {},
"extra_filter_expression": "whatever",
"enabled": True,
"identicon_seed": "invalid_identicon_seed",
},
)
assert res.status_code == 400
def test_at_least_one_filter_is_required(self, api_client):
action = ActionFactory()
res = api_client.post(
"/api/v3/recipe/",
{"name": "Test Recipe", "action_id": action.id, "arguments": {}, "enabled": True},
)
assert res.status_code == 400, res.json()
assert res.json() == {
"non_field_errors": ["one of extra_filter_expression or filter_object is required"]
}
def test_with_experimenter_slug(self, api_client):
action = ActionFactory()
res = api_client.post(
"/api/v3/recipe/",
{
"name": "Test Recipe",
"action_id": action.id,
"arguments": {},
"extra_filter_expression": "whatever",
"enabled": True,
"experimenter_slug": "some-experimenter-slug",
},
)
assert res.status_code == 201, res.json()
recipe = Recipe.objects.get()
assert recipe.latest_revision.experimenter_slug == "some-experimenter-slug"
def test_without_experimenter_slug(self, api_client):
action = ActionFactory()
res = api_client.post(
"/api/v3/recipe/",
{
"name": "Test Recipe",
"action_id": action.id,
"arguments": {},
"extra_filter_expression": "whatever",
"enabled": True,
},
)
assert res.status_code == 201, res.json()
recipe = Recipe.objects.get()
assert recipe.latest_revision.experimenter_slug is None
def test_creating_recipes_stores_the_user(self, api_client):
action = ActionFactory()
api_client.post(
"/api/v3/recipe/",
{
"name": "Test Recipe",
"action_id": action.id,
"arguments": {},
"extra_filter_expression": "whatever",
},
)
recipe = Recipe.objects.get()
assert recipe.latest_revision.user is not None
def test_it_can_create_recipes_with_only_filter_object(self, api_client):
action = ActionFactory()
channel = ChannelFactory()
res = api_client.post(
"/api/v3/recipe/",
{
"name": "Test Recipe",
"action_id": action.id,
"arguments": {},
"extra_filter_expression": " ",
"filter_object": [{"type": "channel", "channels": [channel.slug]}],
"enabled": True,
},
)
assert res.status_code == 201, res.json()
assert Recipe.objects.count() == 1
recipe = Recipe.objects.get()
assert recipe.latest_revision.extra_filter_expression == ""
assert (
recipe.latest_revision.filter_expression
== f'normandy.channel in ["{channel.slug}"]'
)
def test_it_can_create_extra_filter_expression_omitted(self, api_client):
action = ActionFactory()
channel = ChannelFactory()
# First try to create a recipe with 0 filter objects.
res = api_client.post(
"/api/v3/recipe/",
{
"name": "Test Recipe",
"action_id": action.id,
"arguments": {},
"filter_object": [],
"enabled": True,
},
)
assert res.status_code == 400
assert res.json()["non_field_errors"] == [
"one of extra_filter_expression or filter_object is required"
]
# Setting at least some filter_object but omitting the extra_filter_expression.
res = api_client.post(
"/api/v3/recipe/",
{
"name": "Test Recipe",
"action_id": action.id,
"arguments": {},
"filter_object": [{"type": "channel", "channels": [channel.slug]}],
"enabled": True,
},
)
assert res.status_code == 201, res.json()
assert Recipe.objects.count() == 1
recipe = Recipe.objects.get()
assert recipe.latest_revision.extra_filter_expression == ""
assert (
recipe.latest_revision.filter_expression
== f'normandy.channel in ["{channel.slug}"]'
)
def test_it_accepts_capabilities(self, api_client):
action = ActionFactory()
res = api_client.post(
"/api/v3/recipe/",
{
"action_id": action.id,
"extra_capabilities": ["test.one", "test.two"],
"arguments": {},
"name": "test recipe",
"extra_filter_expression": "true",
},
)
assert res.status_code == 201, res.json()
assert Recipe.objects.count() == 1
recipe = Recipe.objects.get()
# Passed extra capabilities:
assert recipe.latest_revision.extra_capabilities == ["test.one", "test.two"]
# Extra capabilities get included in capabilities
assert {"test.one", "test.two"} <= set(recipe.latest_revision.capabilities)
@pytest.mark.django_db
class TestUpdates(object):
def test_it_can_edit_recipes(self, api_client):
recipe = RecipeFactory(
name="unchanged", extra_filter_expression="true", filter_object_json=None
)
old_revision_id = recipe.latest_revision.id
res = api_client.patch(
"/api/v3/recipe/%s/" % recipe.id,
{"name": "changed", "extra_filter_expression": "false"},
)
assert res.status_code == 200
recipe = Recipe.objects.all()[0]
assert recipe.latest_revision.name == "changed"
assert recipe.latest_revision.filter_expression == "false"
assert recipe.latest_revision.id != old_revision_id
def test_it_can_change_action_for_recipes(self, api_client):
recipe = RecipeFactory()
action = ActionFactory()
res = api_client.patch("/api/v3/recipe/%s/" % recipe.id, {"action_id": action.id})
assert res.status_code == 200
recipe = Recipe.objects.get(pk=recipe.id)
assert recipe.latest_revision.action == action
def test_it_can_change_arguments_for_recipes(self, api_client):
recipe = RecipeFactory(arguments_json="{}")
action = ActionFactory(
name="foobarbaz",
arguments_schema={
"type": "object",
"properties": {"message": {"type": "string"}, "checkbox": {"type": "boolean"}},
"required": ["message", "checkbox"],
},
)
arguments = {"message": "test message", "checkbox": False}
res = api_client.patch(
"/api/v3/recipe/%s/" % recipe.id, {"action_id": action.id, "arguments": arguments}
)
assert res.status_code == 200, res.json()
recipe.refresh_from_db()
assert recipe.latest_revision.arguments == arguments
res = api_client.get("/api/v3/recipe/%s/" % recipe.id)
assert res.status_code == 200, res.json()
assert res.json()["latest_revision"]["arguments"] == arguments
arguments = {"message": "second message", "checkbox": True}
res = api_client.patch(
"/api/v3/recipe/%s/" % recipe.id, {"action_id": action.id, "arguments": arguments}
)
assert res.status_code == 200, res.json()
recipe.refresh_from_db()
assert recipe.latest_revision.arguments == arguments
res = api_client.get("/api/v3/recipe/%s/" % recipe.id)
assert res.status_code == 200, res.json()
assert res.json()["latest_revision"]["arguments"] == arguments
def test_it_can_delete_recipes(self, api_client):
recipe = RecipeFactory()
res = api_client.delete("/api/v3/recipe/%s/" % recipe.id)
assert res.status_code == 204
recipes = Recipe.objects.all()
assert recipes.count() == 0
def test_update_recipe_action(self, api_client):
r = RecipeFactory()
a = ActionFactory(name="test")
res = api_client.patch(f"/api/v3/recipe/{r.pk}/", {"action_id": a.id})
assert res.status_code == 200
r.refresh_from_db()
assert r.latest_revision.action == a
def test_update_recipe_comment(self, api_client):
r = RecipeFactory(comment="foo")
res = api_client.patch(f"/api/v3/recipe/{r.pk}/", {"comment": "bar"})
assert res.status_code == 200
r.refresh_from_db()
assert r.latest_revision.comment == "bar"
def test_update_recipe_experimenter_slug(self, api_client):
r = RecipeFactory()
res = api_client.patch(f"/api/v3/recipe/{r.pk}/", {"experimenter_slug": "a-new-slug"})
assert res.status_code == 200
r.refresh_from_db()
assert r.latest_revision.experimenter_slug == "a-new-slug"
def test_updating_recipes_stores_the_user(self, api_client):
recipe = RecipeFactory()
api_client.patch(f"/api/v3/recipe/{recipe.pk}/", {"name": "Test Recipe"})
recipe.refresh_from_db()
assert recipe.latest_revision.user is not None
def test_it_can_update_recipes_with_only_filter_object(self, api_client):
recipe = RecipeFactory(name="unchanged", extra_filter_expression="true")
channel = ChannelFactory()
res = api_client.patch(
"/api/v3/recipe/%s/" % recipe.id,
{
"name": "changed",
"extra_filter_expression": "",
"filter_object": [{"type": "channel", "channels": [channel.slug]}],
},
)
assert res.status_code == 200, res.json()
recipe.refresh_from_db()
assert recipe.latest_revision.extra_filter_expression == ""
assert recipe.latest_revision.filter_object
assert (
recipe.latest_revision.filter_expression
== f'normandy.channel in ["{channel.slug}"]'
)
# And you can omit it too
res = api_client.patch(
"/api/v3/recipe/%s/" % recipe.id,
{
"name": "changed",
"filter_object": [{"type": "channel", "channels": [channel.slug]}],
},
)
assert res.status_code == 200, res.json()
recipe.refresh_from_db()
assert recipe.latest_revision.extra_filter_expression == ""
# Let's paranoid-check that you can't unset the filter_object too.
res = api_client.patch(
"/api/v3/recipe/%s/" % recipe.id, {"name": "changed", "filter_object": []}
)
assert res.status_code == 400
assert res.json()["non_field_errors"] == [
"if extra_filter_expression is blank, at least one filter_object is required"
]
def test_it_can_update_capabilities(self, api_client):
recipe = RecipeFactory(extra_capabilities=["always", "original"])
res = api_client.patch(
f"/api/v3/recipe/{recipe.id}/", {"extra_capabilities": ["always", "changed"]}
)
assert res.status_code == 200
recipe = Recipe.objects.get()
assert {"always", "changed"} <= set(recipe.latest_revision.capabilities)
assert "original" not in recipe.latest_revision.capabilities
@pytest.mark.django_db
class TestFilterObjects(object):
def make_recipe(self, api_client, **kwargs):
data = {
"name": "Test Recipe",
"action_id": ActionFactory().id,
"arguments": {},
"enabled": True,
"extra_filter_expression": "true",
"filter_object": [],
}
data.update(kwargs)
return api_client.post("/api/v3/recipe/", data)
def test_bad_filter_objects(self, api_client):
res = self.make_recipe(api_client, filter_object={}) # not a list
assert res.status_code == 400
assert res.json() == {
"filter_object": ['Expected a list of items but got type "dict".']
}
res = self.make_recipe(
api_client, filter_object=["1 + 1 == 2"]
) # not a list of objects
assert res.status_code == 400
assert res.json() == {
"filter_object": {
"0": {"non field errors": ["filter_object members must be objects."]}
}
}
res = self.make_recipe(
api_client, filter_object=[{"channels": ["release"]}]
) # type is required
assert res.status_code == 400
assert res.json() == {"filter_object": {"0": {"type": ["This field is required."]}}}
def test_validate_filter_objects_channels(self, api_client):
res = self.make_recipe(
api_client, filter_object=[{"type": "channel", "channels": ["nightwolf"]}]
)
assert res.status_code == 400
assert res.json() == {
"filter_object": {"0": {"channels": ["Unrecognized channel slug 'nightwolf'"]}}
}
ChannelFactory(slug="nightwolf")
res = self.make_recipe(
api_client, filter_object=[{"type": "channel", "channels": ["nightwolf"]}]
)
assert res.status_code == 201
def test_validate_filter_objects_locales(self, api_client):
res = self.make_recipe(
api_client, filter_object=[{"type": "locale", "locales": ["sv"]}]
)
assert res.status_code == 400
assert res.json() == {
"filter_object": {"0": {"locales": ["Unrecognized locale code 'sv'"]}}
}
LocaleFactory(code="sv")
res = self.make_recipe(
api_client, filter_object=[{"type": "locale", "locales": ["sv"]}]
)
assert res.status_code == 201
def test_validate_filter_objects_countries(self, api_client):
res = self.make_recipe(
api_client, filter_object=[{"type": "country", "countries": ["SS"]}]
)
assert res.status_code == 400
assert res.json() == {
"filter_object": {"0": {"countries": ["Unrecognized country code 'SS'"]}}
}
CountryFactory(code="SS", name="South Sudan")
res = self.make_recipe(
api_client, filter_object=[{"type": "country", "countries": ["SS"]}]
)
assert res.status_code == 201
def test_channel_works(self, api_client):
channel1 = ChannelFactory(slug="beta")
channel2 = ChannelFactory(slug="release")
res = self.make_recipe(
api_client,
filter_object=[{"type": "channel", "channels": [channel1.slug, channel2.slug]}],
)
assert res.status_code == 201, res.json()
recipe_data = res.json()
Recipe.objects.get(id=recipe_data["id"])
assert recipe_data["latest_revision"]["filter_expression"] == (
f'(normandy.channel in ["{channel1.slug}","{channel2.slug}"]) && (true)'
)
def test_channel_correct_fields(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "channel"}])
assert res.status_code == 400
assert res.json() == {
"filter_object": {"0": {"channels": ["This field is required."]}}
}
def test_locale_works(self, api_client):
locale1 = LocaleFactory()
locale2 = LocaleFactory(code="de")
res = self.make_recipe(
api_client,
filter_object=[{"type": "locale", "locales": [locale1.code, locale2.code]}],
)
assert res.status_code == 201, res.json()
recipe_data = res.json()
Recipe.objects.get(id=recipe_data["id"])
assert recipe_data["latest_revision"]["filter_expression"] == (
f'(normandy.locale in ["{locale1.code}","{locale2.code}"]) && (true)'
)
def test_locale_correct_fields(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "locale"}])
assert res.status_code == 400
assert res.json() == {"filter_object": {"0": {"locales": ["This field is required."]}}}
def test_country_works(self, api_client):
country1 = CountryFactory()
country2 = CountryFactory(code="DE")
res = self.make_recipe(
api_client,
filter_object=[{"type": "country", "countries": [country1.code, country2.code]}],
)
assert res.status_code == 201, res.json()
recipe_data = res.json()
Recipe.objects.get(id=recipe_data["id"])
assert recipe_data["latest_revision"]["filter_expression"] == (
f'(normandy.country in ["{country1.code}","{country2.code}"]) && (true)'
)
def test_country_correct_fields(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "country"}])
assert res.status_code == 400
assert res.json() == {
"filter_object": {"0": {"countries": ["This field is required."]}}
}
def test_bucket_sample_works(self, api_client):
res = self.make_recipe(
api_client,
filter_object=[
{
"type": "bucketSample",
"start": 1,
"count": 2,
"total": 3,
"input": ["normandy.userId", "normandy.recipeId"],
}
],
)
assert res.status_code == 201, res.json()
recipe_data = res.json()
Recipe.objects.get(id=recipe_data["id"])
assert recipe_data["latest_revision"]["filter_expression"] == (
"([normandy.userId,normandy.recipeId]|bucketSample(1,2,3)) && (true)"
)
def test_bucket_sample_correct_fields(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "bucketSample"}])
assert res.status_code == 400
assert res.json() == {
"filter_object": {
"0": {
"start": ["This field is required."],
"count": ["This field is required."],
"total": ["This field is required."],
"input": ["This field is required."],
}
}
}
res = self.make_recipe(
api_client,
filter_object=[{"type": "bucketSample", "start": "a", "count": -1, "total": -2}],
)
assert res.status_code == 400
assert res.json() == {
"filter_object": {
"0": {
"start": ["A valid number is required."],
"count": ["Ensure this value is greater than or equal to 0."],
"total": ["Ensure this value is greater than or equal to 0."],
"input": ["This field is required."],
}
}
}
def test_stable_sample_works(self, api_client):
res = self.make_recipe(
api_client,
filter_object=[
{
"type": "stableSample",
"rate": 0.5,
"input": ["normandy.userId", "normandy.recipeId"],
}
],
)
assert res.status_code == 201, res.json()
recipe_data = res.json()
Recipe.objects.get(id=recipe_data["id"])
assert recipe_data["latest_revision"]["filter_expression"] == (
"([normandy.userId,normandy.recipeId]|stableSample(0.5)) && (true)"
)
def test_stable_sample_correct_fields(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "stableSample"}])
assert res.status_code == 400
assert res.json() == {
"filter_object": {
"0": {
"rate": ["This field is required."],
"input": ["This field is required."],
}
}
}
res = self.make_recipe(
api_client, filter_object=[{"type": "stableSample", "rate": 10}]
)
assert res.status_code == 400
assert res.json() == {
"filter_object": {
"0": {
"rate": ["Ensure this value is less than or equal to 1."],
"input": ["This field is required."],
}
}
}
def test_version_works(self, api_client):
res = self.make_recipe(
api_client, filter_object=[{"type": "version", "versions": [57, 58, 62]}]
)
assert res.status_code == 201, res.json()
recipe_data = res.json()
Recipe.objects.get(id=recipe_data["id"])
assert recipe_data["latest_revision"]["filter_expression"] == (
'((env.version|versionCompare("57.!")>=0)&&'
'(env.version|versionCompare("57.*")<0)||'
'(env.version|versionCompare("58.!")>=0)&&'
'(env.version|versionCompare("58.*")<0)||'
'(env.version|versionCompare("62.!")>=0)&&'
'(env.version|versionCompare("62.*")<0)) && (true)'
)
def test_version_correct_fields(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "version"}])
assert res.status_code == 400
assert res.json() == {
"filter_object": {"0": {"versions": ["This field is required."]}}
}
def test_invalid_filter(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "invalid"}])
assert res.status_code == 400
assert res.json() == {
"filter_object": {"0": {"type": ['Unknown filter object type "invalid".']}}
}
@pytest.mark.django_db
class TestDetail(object):
def test_history(self, api_client):
recipe = RecipeFactory(name="version 1")
recipe.revise(name="version 2")
recipe.revise(name="version 3")
res = api_client.get("/api/v3/recipe/%s/history/" % recipe.id)
assert res.data[0]["name"] == "version 3"
assert res.data[1]["name"] == "version 2"
assert res.data[2]["name"] == "version 1"
def test_it_can_enable_recipes(self, api_client):
recipe = RecipeFactory(approver=UserFactory())
res = api_client.post("/api/v3/recipe/%s/enable/" % recipe.id)
assert res.status_code == 200
assert res.data["approved_revision"]["enabled"] is True
recipe = Recipe.objects.all()[0]
assert recipe.approved_revision.enabled
def test_cannot_enable_unapproved_recipes(self, api_client):
recipe = RecipeFactory()
res = api_client.post("/api/v3/recipe/%s/enable/" % recipe.id)
assert res.status_code == 409
assert res.data["error"] == "Cannot enable a recipe that is not approved."
def test_cannot_enable_enabled_recipes(self, api_client):
recipe = RecipeFactory(approver=UserFactory(), enabler=UserFactory())
res = api_client.post("/api/v3/recipe/%s/enable/" % recipe.id)
assert res.status_code == 409
assert res.data["error"] == "This revision is already enabled."
def test_it_can_disable_enabled_recipes(self, api_client):
recipe = RecipeFactory(approver=UserFactory(), enabler=UserFactory())
assert recipe.approved_revision.enabled
res = api_client.post("/api/v3/recipe/%s/disable/" % recipe.id)
assert res.status_code == 200
assert res.data["approved_revision"]["enabled"] is False
recipe = Recipe.objects.all()[0]
assert not recipe.approved_revision.enabled
# Can't disable it a second time.
res = api_client.post("/api/v3/recipe/%s/disable/" % recipe.id)
assert res.status_code == 409
assert res.json()["error"] == "This revision is already disabled."
def test_detail_view_includes_cache_headers(self, api_client):
recipe = RecipeFactory()
res = api_client.get(f"/api/v3/recipe/{recipe.id}/")
assert res.status_code == 200
# It isn't important to assert a particular value for max-age
assert "max-age=" in res["Cache-Control"]
assert "public" in res["Cache-Control"]
def test_detail_sets_no_cookies(self, api_client):
recipe = RecipeFactory()
res = api_client.get("/api/v3/recipe/{id}/".format(id=recipe.id))
assert res.status_code == 200
assert res.client.cookies == {}
@pytest.mark.django_db
class TestFiltering(object):
def test_filtering_by_enabled_lowercase(self, api_client):
r1 = RecipeFactory(approver=UserFactory(), enabler=UserFactory())
RecipeFactory()
res = api_client.get("/api/v3/recipe/?enabled=true")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r1.id]
def test_filtering_by_enabled_fuzz(self, api_client):
"""
Test that we don't return 500 responses when we get unexpected boolean filters.
This was a real case that showed up in our error logging.
"""
url = (
"/api/v3/recipe/?enabled=javascript%3a%2f*"
"<%2fscript><svg%2fonload%3d'%2b%2f'%2f%2b"
)
res = api_client.get(url)
assert res.status_code == 200
def test_list_filter_status(self, api_client):
r1 = RecipeFactory()
r2 = RecipeFactory(approver=UserFactory(), enabler=UserFactory())
res = api_client.get("/api/v3/recipe/?status=enabled")
assert res.status_code == 200
results = res.data["results"]
assert len(results) == 1
assert results[0]["id"] == r2.id
res = api_client.get("/api/v3/recipe/?status=disabled")
assert res.status_code == 200
results = res.data["results"]
assert len(results) == 1
assert results[0]["id"] == r1.id
def test_list_filter_text(self, api_client):
r1 = RecipeFactory(name="first", extra_filter_expression="1 + 1 == 2")
r2 = RecipeFactory(name="second", extra_filter_expression="one + one == two")
res = api_client.get("/api/v3/recipe/?text=first")
assert res.status_code == 200
results = res.data["results"]
assert len(results) == 1
assert results[0]["id"] == r1.id
res = api_client.get("/api/v3/recipe/?text=one")
assert res.status_code == 200
results = res.data["results"]
assert len(results) == 1
assert results[0]["id"] == r2.id
res = api_client.get("/api/v3/recipe/?text=t")
assert res.status_code == 200
results = res.data["results"]
assert len(results) == 2
for recipe in results:
assert recipe["id"] in [r1.id, r2.id]
def test_list_filter_text_null_bytes(self, api_client):
res = api_client.get("/api/v3/recipe/?text=\x00")
assert res.status_code == 400
assert res.json()["detail"] == "Null bytes in text"
def test_search_works_with_arguments(self, api_client):
r1 = RecipeFactory(arguments={"one": 1})
r2 = RecipeFactory(arguments={"two": 2})
res = api_client.get("/api/v3/recipe/?text=one")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r1.id]
res = api_client.get("/api/v3/recipe/?text=2")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r2.id]
def test_search_out_of_order(self, api_client):
r1 = RecipeFactory(name="apple banana")
r2 = RecipeFactory(name="cherry daikon")
res = api_client.get("/api/v3/recipe/?text=banana apple")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r1.id]
res = api_client.get("/api/v3/recipe/?text=daikon cherry")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r2.id]
def test_search_all_words_required(self, api_client):
r1 = RecipeFactory(name="apple banana")
RecipeFactory(name="apple")
res = api_client.get("/api/v3/recipe/?text=apple banana")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r1.id]
def test_list_filter_action_legacy(self, api_client):
a1 = ActionFactory()
a2 = ActionFactory()
r1 = RecipeFactory(action=a1)
r2 = RecipeFactory(action=a2)
assert a1.id != a2.id
res = api_client.get(f"/api/v3/recipe/?latest_revision__action={a1.id}")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r1.id]
res = api_client.get(f"/api/v3/recipe/?latest_revision__action={a2.id}")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r2.id]
assert a1.id != -1 and a2.id != -1
res = api_client.get("/api/v3/recipe/?latest_revision__action=-1")
assert res.status_code == 400
assert res.data["latest_revision__action"][0].code == "invalid_choice"
def test_list_filter_action(self, api_client):
a1 = ActionFactory()
a2 = ActionFactory()
r1 = RecipeFactory(action=a1)
r2 = RecipeFactory(action=a2)
assert a1.name != a2.name
res = api_client.get(f"/api/v3/recipe/?action={a1.name}")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r1.id]
res = api_client.get(f"/api/v3/recipe/?action={a2.name}")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r2.id]
assert a1.name != "nonexistant" and a2.name != "nonexistant"
res = api_client.get("/api/v3/recipe/?action=nonexistant")
assert res.status_code == 200
assert res.data["count"] == 0
def test_filter_by_experimenter_slug(self, api_client):
RecipeFactory()
match1 = RecipeFactory(experimenter_slug="a-slug")
RecipeFactory(experimenter_slug="something-else")
RecipeFactory(experimenter_slug="some-other-slug")
res = api_client.get("/api/v3/recipe/?experimenter_slug=a-slug")
assert res.status_code == 200
assert res.data["count"] == 1
assert set(r["id"] for r in res.data["results"]) == set([match1.id])
def test_order_last_updated(self, api_client):
r1 = RecipeFactory()
r2 = RecipeFactory()
now = r1.latest_revision.updated
yesterday = now - timedelta(days=1)
r1.latest_revision.updated = yesterday
r2.latest_revision.updated = now
# Call the super class's save method so that
# `latest_revision.updated` doesn't get rewritten
super(RecipeRevision, r1.latest_revision).save()
super(RecipeRevision, r2.latest_revision).save()
res = api_client.get("/api/v3/recipe/?ordering=last_updated")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r1.id, r2.id]
res = api_client.get("/api/v3/recipe/?ordering=-last_updated")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r2.id, r1.id]
def test_order_name(self, api_client):
r1 = RecipeFactory(name="a")
r2 = RecipeFactory(name="b")
res = api_client.get("/api/v3/recipe/?ordering=name")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r1.id, r2.id]
res = api_client.get("/api/v3/recipe/?ordering=-name")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == [r2.id, r1.id]
def test_order_by_action_name(self, api_client):
r1 = RecipeFactory(name="a")
r1.latest_revision.action.name = "Bee"
r1.latest_revision.action.save()
r2 = RecipeFactory(name="b")
r2.latest_revision.action.name = "Cee"
r2.latest_revision.action.save()
r3 = RecipeFactory(name="c")
r3.latest_revision.action.name = "Ahh"
r3.latest_revision.action.save()
res = api_client.get("/api/v3/recipe/?ordering=action")
assert res.status_code == 200
# Expected order is ['Ahh', 'Bee', 'Cee']
assert [r["id"] for r in res.data["results"]] == [r3.id, r1.id, r2.id]
res = api_client.get("/api/v3/recipe/?ordering=-action")
assert res.status_code == 200
# Expected order is ['Cee', 'Bee', 'Ahh']
assert [r["id"] for r in res.data["results"]] == [r2.id, r1.id, r3.id]
def test_order_bogus(self, api_client):
"""Test that filtering by an unknown key doesn't change the sort order"""
RecipeFactory(name="a")
RecipeFactory(name="b")
res = api_client.get("/api/v3/recipe/?ordering=bogus")
assert res.status_code == 200
first_ordering = [r["id"] for r in res.data["results"]]
res = api_client.get("/api/v3/recipe/?ordering=-bogus")
assert res.status_code == 200
assert [r["id"] for r in res.data["results"]] == first_ordering
@pytest.mark.django_db
class TestRecipeRevisionAPI(object):
def test_it_works(self, api_client):
res = api_client.get("/api/v3/recipe_revision/")
assert res.status_code == 200
assert res.data == {"count": 0, "next": None, "previous": None, "results": []}
def test_it_serves_revisions(self, api_client):
recipe = RecipeFactory()
res = api_client.get("/api/v3/recipe_revision/%s/" % recipe.latest_revision.id)
assert res.status_code == 200
assert res.data["id"] == recipe.latest_revision.id
def test_request_approval(self, api_client):
recipe = RecipeFactory()
res = api_client.post(
"/api/v3/recipe_revision/{}/request_approval/".format(recipe.latest_revision.id)
)
assert res.status_code == 201
assert res.data["id"] == recipe.latest_revision.approval_request.id
def test_cannot_open_second_approval_request(self, api_client):
recipe = RecipeFactory()
ApprovalRequestFactory(revision=recipe.latest_revision)
res = api_client.post(
"/api/v3/recipe_revision/{}/request_approval/".format(recipe.latest_revision.id)
)
assert res.status_code == 400
def test_it_has_an_identicon_seed(self, api_client):
recipe = RecipeFactory(enabler=UserFactory(), approver=UserFactory())
res = api_client.get(f"/api/v3/recipe_revision/{recipe.latest_revision.id}/")
assert res.data["identicon_seed"] == recipe.latest_revision.identicon_seed
def test_metadata(self, api_client):
revision = RecipeFactory(metadata={"test1": "a"}).latest_revision
assert revision.metadata == {"test1": "a"}
url = f"/api/v3/recipe_revision/{revision.id}/"
metadata_url = url + "metadata/"
res = api_client.get(url)
assert res.status_code == 200
assert res.data["metadata"] == {"test1": "a"}
res = api_client.patch(metadata_url, {"test1": "b"})
assert res.status_code == 200
revision.refresh_from_db()
assert revision.metadata == {"test1": "b"}
res = api_client.patch(metadata_url, {"test2": "c"})
assert res.status_code == 200
revision.refresh_from_db()
assert revision.metadata == {"test1": "b", "test2": "c"}
res = api_client.patch(metadata_url, {"test1": None})
assert res.status_code == 200
revision.refresh_from_db()
assert revision.metadata == {"test2": "c"}
res = api_client.get(metadata_url)
assert res.status_code == 200
assert res.data == {"test2": "c"}
def test_filter_by_creation_date(self, api_client):
r1 = RecipeFactory(created=datetime(2019, 1, 1)).latest_revision
r2 = RecipeFactory(created=datetime(2020, 1, 1)).latest_revision
r3 = RecipeFactory(created=datetime(2021, 1, 1)).latest_revision
res = api_client.get("/api/v3/recipe_revision/")
assert res.status_code == 200, res.data
assert set(rev["id"] for rev in res.data["results"]) == set(
[r1.id, r2.id, r3.id]
), "no filters returns everything"
assert set(rev["date_created"][:4] for rev in res.data["results"]) == set(
["2019", "2020", "2021"]
), "correct dates are in the API"
res = api_client.get("/api/v3/recipe_revision/?created_end=2020-06-01")
assert res.status_code == 200, res.data
assert set(rev["id"] for rev in res.data["results"]) == set(
[r1.id, r2.id]
), "before filter works"
res = api_client.get("/api/v3/recipe_revision/?created_start=2019-06-01")
assert res.status_code == 200, res.data
assert set(rev["id"] for rev in res.data["results"]) == set(
[r2.id, r3.id]
), "after filter works"
res = api_client.get(
"/api/v3/recipe_revision/?created_start=2019-06-01&created_end=2020-06-01"
)
assert res.status_code == 200, res.data
assert set(rev["id"] for rev in res.data["results"]) == set(
[r2.id]
), "before and after can be combined"
res = api_client.get("/api/v3/recipe_revision/?created_end=1965-01-01")
assert res.status_code == 200, res.data
assert len(res.data["results"]) == 0, "before can find nothing"
res = api_client.get("/api/v3/recipe_revision/?created_start=2055-01-01")
assert res.status_code == 200, res.data
assert len(res.data["results"]) == 0, "after can find nothing"
def test_filter_by_creation_date_is_inclusive(self, api_client):
revisions = [RecipeFactory(created=datetime(2021, 1, d)) for d in range(1, 32)]
assert len(revisions) == 31
res = api_client.get(
"/api/v3/recipe_revision/?created_start=2021-01-10&created_end=2021-01-20"
)
assert res.status_code == 200, res.data
assert set(rev["date_created"][:10] for rev in res.data["results"]) == set(
f"2021-01-{d}" for d in range(10, 21)
)
@pytest.mark.django_db
class TestApprovalRequestAPI(object):
def test_it_works(self, api_client):
res = api_client.get("/api/v3/approval_request/")
assert res.status_code == 200
assert res.data == {"count": 0, "next": None, "previous": None, "results": []}
def test_approve(self, api_client):
r = RecipeFactory()
a = ApprovalRequestFactory(revision=r.latest_revision)
res = api_client.post(
"/api/v3/approval_request/{}/approve/".format(a.id), {"comment": "r+"}
)
assert res.status_code == 200
r.refresh_from_db()
assert r.is_approved
assert r.approved_revision.approval_request.comment == "r+"
def test_approve_no_comment(self, api_client):
r = RecipeFactory()
a = ApprovalRequestFactory(revision=r.latest_revision)
res = api_client.post("/api/v3/approval_request/{}/approve/".format(a.id))
assert res.status_code == 400
assert res.data["comment"] == "This field is required."
def test_approve_not_actionable(self, api_client):
r = RecipeFactory()
a = ApprovalRequestFactory(revision=r.latest_revision)
a.approve(UserFactory(), "r+")
res = api_client.post(
"/api/v3/approval_request/{}/approve/".format(a.id), {"comment": "r+"}
)
assert res.status_code == 400
assert res.data["error"] == "This approval request has already been approved or rejected."
def test_reject(self, api_client):
r = RecipeFactory()
a = ApprovalRequestFactory(revision=r.latest_revision)
res = api_client.post(
"/api/v3/approval_request/{}/reject/".format(a.id), {"comment": "r-"}
)
assert res.status_code == 200
r.latest_revision.approval_request.refresh_from_db()
assert r.latest_revision.approval_status == r.latest_revision.REJECTED
assert r.latest_revision.approval_request.comment == "r-"
def test_reject_no_comment(self, api_client):
r = RecipeFactory()
a = ApprovalRequestFactory(revision=r.latest_revision)
res = api_client.post("/api/v3/approval_request/{}/reject/".format(a.id))
assert res.status_code == 400
assert res.data["comment"] == "This field is required."
def test_reject_not_actionable(self, api_client):
r = RecipeFactory()
a = ApprovalRequestFactory(revision=r.latest_revision)
a.approve(UserFactory(), "r+")
res = api_client.post(
"/api/v3/approval_request/{}/reject/".format(a.id), {"comment": "-r"}
)
assert res.status_code == 400
assert res.data["error"] == "This approval request has already been approved or rejected."
def test_close(self, api_client):
r = RecipeFactory()
a = ApprovalRequestFactory(revision=r.latest_revision)
res = api_client.post("/api/v3/approval_request/{}/close/".format(a.id))
assert res.status_code == 204
with pytest.raises(ApprovalRequest.DoesNotExist):
ApprovalRequest.objects.get(pk=a.pk)
def test_list(self, api_client):
approval_requests = ApprovalRequestFactory.create_batch(5)
res = api_client.get("/api/v3/approval_request/")
assert res.status_code == 200
assert set(a.id for a in approval_requests) == set(a["id"] for a in res.data["results"])
def test_filter_by_approval(self, api_client):
pending = ApprovalRequestFactory(approved=None)
approved = ApprovalRequestFactory(approved=True)
rejected = ApprovalRequestFactory(approved=False)
# First check that all of them show up
res = api_client.get("/api/v3/approval_request/")
assert res.status_code == 200
assert {approved.id, pending.id, rejected.id} == set(a["id"] for a in res.data["results"])
# Now check that filtering works as expected
patterns = [
(["true", "1", "approved"], approved.id),
(["false", "0", "rejected"], rejected.id),
(["null", "pending"], pending.id),
]
for (aliases, expected_id) in patterns:
for alias in aliases:
res = api_client.get(f"/api/v3/approval_request/?approved={alias}")
assert res.status_code == 200, f"Expected {alias} to give a 200 status"
assert {expected_id} == set(
a["id"] for a in res.data["results"]
), f"Expected alias {alias!r} to return the right approval request"
@pytest.mark.django_db
class TestApprovalFlow(object):
def verify_signatures(self, api_client, expected_count=None):
# v1 usage here is correct, since v3 doesn't yet provide signatures
res = api_client.get("/api/v1/recipe/signed/")
assert res.status_code == 200
signed_data = res.json()
if expected_count is not None:
assert len(signed_data) == expected_count
for recipe_and_signature in signed_data:
recipe = recipe_and_signature["recipe"]
expected_signature = recipe_and_signature["signature"]["signature"]
data = canonical_json_dumps(recipe).encode()
actual_signature = fake_sign([data])[0]["signature"]
assert actual_signature == expected_signature
def test_full_approval_flow(self, settings, api_client, mocked_autograph):
# The `mocked_autograph` fixture is provided so that recipes can be signed
settings.PEER_APPROVAL_ENFORCED = True
action = ActionFactory()
user1 = UserFactory(is_superuser=True)
user2 = UserFactory(is_superuser=True)
api_client.force_authenticate(user1)
settings.BASELINE_CAPABILITIES |= action.capabilities
# Create a recipe
res = api_client.post(
"/api/v3/recipe/",
{
"action_id": action.id,
"arguments": {},
"name": "test recipe",
"extra_filter_expression": "counter == 0",
"enabled": "false",
},
)
assert res.status_code == 201, res.data
recipe_data_0 = res.json()
# It is visible in the api but not approved
res = api_client.get(f"/api/v3/recipe/{recipe_data_0['id']}/")
assert res.status_code == 200
assert res.json()["latest_revision"] is not None
assert res.json()["approved_revision"] is None
# Request approval for it
res = api_client.post(
"/api/v3/recipe_revision/{}/request_approval/".format(
recipe_data_0["latest_revision"]["id"]
)
)
approval_data = res.json()
assert res.status_code == 201
# The requester isn't allowed to approve a recipe
res = api_client.post(
"/api/v3/approval_request/{}/approve/".format(approval_data["id"]), {"comment": "r+"}
)
assert res.status_code == 403 # Forbidden
# Approve and enable the recipe
api_client.force_authenticate(user2)
res = api_client.post(
"/api/v3/approval_request/{}/approve/".format(approval_data["id"]), {"comment": "r+"}
)
assert res.status_code == 200
res = api_client.post("/api/v3/recipe/{}/enable/".format(recipe_data_0["id"]))
assert res.status_code == 200
# It is now visible in the API as approved and signed
res = api_client.get("/api/v3/recipe/{}/".format(recipe_data_0["id"]))
assert res.status_code == 200
recipe_data_1 = res.json()
assert recipe_data_1["approved_revision"] is not None
assert (
Recipe.objects.get(id=recipe_data_1["id"]).latest_revision.capabilities
<= settings.BASELINE_CAPABILITIES
)
self.verify_signatures(api_client, expected_count=1)
# Make another change
api_client.force_authenticate(user1)
res = api_client.patch(
"/api/v3/recipe/{}/".format(recipe_data_1["id"]),
{"extra_filter_expression": "counter == 1"},
)
assert res.status_code == 200
# The change should only be seen in the latest revision, not the approved
res = api_client.get("/api/v3/recipe/{}/".format(recipe_data_1["id"]))
assert res.status_code == 200
recipe_data_2 = res.json()
assert recipe_data_2["approved_revision"]["extra_filter_expression"] == "counter == 0"
assert recipe_data_2["latest_revision"]["extra_filter_expression"] == "counter == 1"
self.verify_signatures(api_client, expected_count=1)
# Request approval for the change
res = api_client.post(
"/api/v3/recipe_revision/{}/request_approval/".format(
recipe_data_2["latest_revision"]["id"]
)
)
approval_data = res.json()
recipe_data_2["latest_revision"]["approval_request"] = approval_data
assert res.status_code == 201
# The change should not be visible yet, since it isn't approved
res = api_client.get("/api/v3/recipe/{}/".format(recipe_data_1["id"]))
assert res.status_code == 200
assert res.json()["approved_revision"] == recipe_data_2["approved_revision"]
assert res.json()["latest_revision"] == recipe_data_2["latest_revision"]
self.verify_signatures(api_client, expected_count=1)
# Can't reject your own approval
res = api_client.post(
"/api/v3/approval_request/{}/reject/".format(approval_data["id"]), {"comment": "r-"}
)
assert res.status_code == 403
assert res.json()["error"] == "You cannot reject your own approval request."
# Reject the change
api_client.force_authenticate(user2)
res = api_client.post(
"/api/v3/approval_request/{}/reject/".format(approval_data["id"]), {"comment": "r-"}
)
approval_data = res.json()
recipe_data_2["approval_request"] = approval_data
recipe_data_2["latest_revision"]["approval_request"] = approval_data
assert res.status_code == 200
# The change should not be visible yet, since it isn't approved
res = api_client.get("/api/v3/recipe/{}/".format(recipe_data_1["id"]))
assert res.status_code == 200
assert res.json()["approved_revision"] == recipe_data_2["approved_revision"]
assert res.json()["latest_revision"] == recipe_data_2["latest_revision"]
self.verify_signatures(api_client, expected_count=1)
# Make a third version of the recipe
api_client.force_authenticate(user1)
res = api_client.patch(
"/api/v3/recipe/{}/".format(recipe_data_1["id"]),
{"extra_filter_expression": "counter == 2"},
)
recipe_data_3 = res.json()
assert res.status_code == 200
# Request approval
res = api_client.post(
"/api/v3/recipe_revision/{}/request_approval/".format(
recipe_data_3["latest_revision"]["id"]
)
)
approval_data = res.json()
assert res.status_code == 201
# Approve the change
api_client.force_authenticate(user2)
res = api_client.post(
"/api/v3/approval_request/{}/approve/".format(approval_data["id"]), {"comment": "r+"}
)
assert res.status_code == 200
# The change should be visible now, since it is approved
res = api_client.get("/api/v3/recipe/{}/".format(recipe_data_1["id"]))
assert res.status_code == 200
recipe_data_4 = res.json()
assert recipe_data_4["approved_revision"]["extra_filter_expression"] == "counter == 2"
self.verify_signatures(api_client, expected_count=1)
def test_cancel_approval(self, api_client, mocked_autograph, settings):
action = ActionFactory()
settings.BASELINE_CAPABILITIES |= action.capabilities
user1 = UserFactory(is_superuser=True)
user2 = UserFactory(is_superuser=True)
api_client.force_authenticate(user1)
# Create a recipe
res = api_client.post(
"/api/v3/recipe/",
{
"action_id": action.id,
"arguments": {},
"name": "test recipe",
"extra_filter_expression": "counter == 0",
"enabled": "false",
},
)
assert res.status_code == 201
recipe_id = res.json()["id"]
revision_id = res.json()["latest_revision"]["id"]
assert (
Recipe.objects.get(id=recipe_id).latest_revision.capabilities
<= settings.BASELINE_CAPABILITIES
)
# Request approval
res = api_client.post(f"/api/v3/recipe_revision/{revision_id}/request_approval/")
assert res.status_code == 201
approval_request_id = res.json()["id"]
# Approve the recipe
api_client.force_authenticate(user2)
res = api_client.post(
f"/api/v3/approval_request/{approval_request_id}/approve/", {"comment": "r+"}
)
assert res.status_code == 200
# The API shouldn't have any signed recipe yet
self.verify_signatures(api_client, expected_count=0)
# Enable the recipe
res = api_client.post(f"/api/v3/recipe/{recipe_id}/enable/")
assert res.status_code == 200
# The API should have correct signatures now
self.verify_signatures(api_client, expected_count=1)
# Make another change
api_client.force_authenticate(user1)
res = api_client.patch(
f"/api/v3/recipe/{recipe_id}/", {"extra_filter_expression": "counter == 1"}
)
assert res.status_code == 200
revision_id = res.json()["latest_revision"]["id"]
# Request approval for the second change
res = api_client.post(f"/api/v3/recipe_revision/{revision_id}/request_approval/")
approval_request_id = res.json()["id"]
assert res.status_code == 201
# Cancel the approval request
res = api_client.post(f"/api/v3/approval_request/{approval_request_id}/close/")
assert res.status_code == 204
# The API should still have correct signatures
self.verify_signatures(api_client, expected_count=1)
@pytest.mark.django_db
@pytest.mark.parametrize(
"endpoint,Factory",
[
("/api/v3/action/", ActionFactory),
("/api/v3/recipe/", RecipeFactory),
("/api/v3/recipe_revision/", RecipeRevisionFactory),
("/api/v3/approval_request/", ApprovalRequestFactory),
],
)
def test_apis_makes_a_reasonable_number_of_db_queries(endpoint, Factory, client, settings):
# Naive versions of this view could easily make several queries
# per item, which is very slow. Make sure that isn't the case.
Factory.create_batch(100)
queries = CaptureQueriesContext(connection)
with queries:
res = client.get(endpoint)
assert res.status_code == 200
# Pagination naturally makes one query per item in the page. Anything
# under `page_size * 2` isn't doing any additional queries per recipe.
page_size = settings.REST_FRAMEWORK["PAGE_SIZE"]
assert len(queries) < page_size * 2, queries
class TestIdenticonAPI(object):
def test_it_works(self, client):
res = client.get("/api/v3/identicon/v1:foobar.svg")
assert res.status_code == 200
def test_it_returns_the_same_output(self, client):
res1 = client.get("/api/v3/identicon/v1:foobar.svg")
res2 = client.get("/api/v3/identicon/v1:foobar.svg")
assert res1.content == res2.content
def test_it_returns_known_output(self, client):
res = client.get("/api/v3/identicon/v1:foobar.svg")
reference_svg = Path(settings.BASE_DIR).joinpath(
"normandy", "recipes", "tests", "api", "v3", "foobar.svg"
)
with open(reference_svg, "rb") as svg_file:
assert svg_file.read() == res.content
def test_includes_cache_headers(self, client):
res = client.get("/api/v3/identicon/v1:foobar.svg")
assert f"max-age={settings.IMMUTABLE_CACHE_TIME}" in res["Cache-Control"]
assert "public" in res["Cache-Control"]
assert "immutable" in res["Cache-Control"]
def test_unrecognized_generation(self, client):
res = client.get("/api/v3/identicon/v9:foobar.svg")
assert res.status_code == 400
assert res.json()["error"] == "Invalid identicon generation, only v1 is supported."
@pytest.mark.django_db
class TestFilterObjects(object):
def make_recipe(self, api_client, **kwargs):
data = {
"name": "Test Recipe",
"action_id": ActionFactory().id,
"arguments": {},
"enabled": True,
}
data.update(kwargs)
return api_client.post("/api/v3/recipe/", data)
def test_bad_filter_objects(self, api_client):
res = self.make_recipe(api_client, filter_object={}) # not a list
assert res.status_code == 400
assert res.json() == {"filter_object": ['Expected a list of items but got type "dict".']}
res = self.make_recipe(api_client, filter_object=["1 + 1 == 2"]) # not a list of objects
assert res.status_code == 400
assert res.json() == {
"filter_object": {
"0": {"non field errors": ["filter_object members must be objects."]}
}
}
res = self.make_recipe(api_client, filter_object=[{"channels": ["release"]}])
assert res.status_code == 400
assert res.json() == {"filter_object": {"0": {"type": ["This field is required."]}}}
def test_channel_works(self, api_client):
channel1 = ChannelFactory(slug="beta")
channel2 = ChannelFactory(slug="release")
res = self.make_recipe(
api_client,
filter_object=[{"type": "channel", "channels": [channel1.slug, channel2.slug]}],
)
assert res.status_code == 201, res.json()
assert res.json()["latest_revision"]["filter_expression"] == (
f'normandy.channel in ["{channel1.slug}","{channel2.slug}"]'
)
def test_channel_correct_fields(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "channel"}])
assert res.status_code == 400
assert res.json() == {"filter_object": {"0": {"channels": ["This field is required."]}}}
def test_locale_works(self, api_client):
locale1 = LocaleFactory()
locale2 = LocaleFactory(code="de")
res = self.make_recipe(
api_client, filter_object=[{"type": "locale", "locales": [locale1.code, locale2.code]}]
)
assert res.status_code == 201, res.json()
assert res.json()["latest_revision"]["filter_expression"] == (
f'normandy.locale in ["{locale1.code}","{locale2.code}"]'
)
def test_locale_correct_fields(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "locale"}])
assert res.status_code == 400
assert res.json() == {"filter_object": {"0": {"locales": ["This field is required."]}}}
def test_country_works(self, api_client):
country1 = CountryFactory()
country2 = CountryFactory(code="DE")
res = self.make_recipe(
api_client,
filter_object=[{"type": "country", "countries": [country1.code, country2.code]}],
)
assert res.status_code == 201, res.json()
assert res.json()["latest_revision"]["filter_expression"] == (
f'normandy.country in ["{country1.code}","{country2.code}"]'
)
def test_country_correct_fields(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "country"}])
assert res.status_code == 400
assert res.json() == {"filter_object": {"0": {"countries": ["This field is required."]}}}
def test_bucket_sample_works(self, api_client):
res = self.make_recipe(
api_client,
filter_object=[
{
"type": "bucketSample",
"start": 1,
"count": 2,
"total": 3,
"input": ["normandy.userId", "normandy.recipeId"],
}
],
)
assert res.status_code == 201, res.json()
assert res.json()["latest_revision"]["filter_expression"] == (
"[normandy.userId,normandy.recipeId]|bucketSample(1,2,3)"
)
def test_bucket_sample_correct_fields(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "bucketSample"}])
assert res.status_code == 400
assert res.json() == {
"filter_object": {
"0": {
"start": ["This field is required."],
"count": ["This field is required."],
"total": ["This field is required."],
"input": ["This field is required."],
}
}
}
res = self.make_recipe(
api_client,
filter_object=[{"type": "bucketSample", "start": "a", "count": -1, "total": -2}],
)
assert res.status_code == 400
assert res.json() == {
"filter_object": {
"0": {
"start": ["A valid number is required."],
"count": ["Ensure this value is greater than or equal to 0."],
"total": ["Ensure this value is greater than or equal to 0."],
"input": ["This field is required."],
}
}
}
def test_stable_sample_works(self, api_client):
res = self.make_recipe(
api_client,
filter_object=[
{
"type": "stableSample",
"rate": 0.5,
"input": ["normandy.userId", "normandy.recipeId"],
}
],
)
assert res.status_code == 201, res.json()
assert res.json()["latest_revision"]["filter_expression"] == (
"[normandy.userId,normandy.recipeId]|stableSample(0.5)"
)
def test_stable_sample_correct_fields(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "stableSample"}])
assert res.status_code == 400
assert res.json() == {
"filter_object": {
"0": {"rate": ["This field is required."], "input": ["This field is required."]}
}
}
res = self.make_recipe(api_client, filter_object=[{"type": "stableSample", "rate": 10}])
assert res.status_code == 400
assert res.json() == {
"filter_object": {
"0": {
"rate": ["Ensure this value is less than or equal to 1."],
"input": ["This field is required."],
}
}
}
def test_version_works(self, api_client):
res = self.make_recipe(
api_client, filter_object=[{"type": "version", "versions": [57, 58, 59, 60]}]
)
assert res.status_code == 201, res.json()
assert res.json()["latest_revision"]["filter_expression"] == (
'(env.version|versionCompare("57.!")>=0)&&' '(env.version|versionCompare("60.*")<0)'
)
def test_version_correct_fields(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "version"}])
assert res.status_code == 400
assert res.json() == {"filter_object": {"0": {"versions": ["This field is required."]}}}
def test_invalid_filter(self, api_client):
res = self.make_recipe(api_client, filter_object=[{"type": "invalid"}])
assert res.status_code == 400
assert res.json() == {
"filter_object": {"0": {"type": ['Unknown filter object type "invalid".']}}
}
@pytest.mark.django_db
class TestFilters(object):
def test_it_works(self, api_client):
country = CountryFactory()
locale = LocaleFactory()
channel = ChannelFactory()
res = api_client.get("/api/v3/filters/")
assert res.status_code == 200, res.json()
assert res.json() == {
"countries": [{"key": country.code, "value": country.name}],
"locales": [{"key": locale.code, "value": locale.name}],
"channels": [{"key": channel.slug, "value": channel.name}],
"status": [
{"key": "enabled", "value": "Enabled"},
{"key": "disabled", "value": "Disabled"},
],
}
| mpl-2.0 |
mozilla/normandy | normandy/recipes/tests/test_checks.py | 1 | 4881 | from datetime import timedelta
from django.core.exceptions import ImproperlyConfigured
from django.db.utils import ProgrammingError
import pytest
import requests.exceptions
from normandy.recipes import checks, signing
from normandy.recipes.tests import ActionFactory, RecipeFactory, SignatureFactory, UserFactory
@pytest.mark.django_db
class TestSignaturesUseGoodCertificates(object):
def test_it_works(self):
assert checks.signatures_use_good_certificates(None) == []
def test_it_fails_if_a_signature_does_not_verify(self, mocker, settings):
settings.CERTIFICATES_EXPIRE_EARLY_DAYS = None
recipe = RecipeFactory(approver=UserFactory(), signed=True)
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
mock_verify_x5u.side_effect = signing.BadCertificate("testing exception")
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once_with(recipe.signature.x5u, None)
assert len(errors) == 1
assert errors[0].id == checks.ERROR_BAD_SIGNING_CERTIFICATE
assert recipe.approved_revision.name in errors[0].msg
def test_it_ignores_signatures_without_x5u(self):
recipe = RecipeFactory(approver=UserFactory(), signed=True)
recipe.signature.x5u = None
recipe.signature.save()
actions = ActionFactory(signed=True)
actions.signature.x5u = None
actions.signature.save()
assert checks.signatures_use_good_certificates(None) == []
def test_it_ignores_signatures_not_in_use(self, mocker, settings):
settings.CERTIFICATES_EXPIRE_EARLY_DAYS = None
recipe = RecipeFactory(approver=UserFactory(), signed=True)
SignatureFactory(x5u="https://example.com/bad_x5u") # unused signature
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
def side_effect(x5u, *args):
if "bad" in x5u:
raise signing.BadCertificate("testing exception")
return True
mock_verify_x5u.side_effect = side_effect
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once_with(recipe.signature.x5u, None)
assert errors == []
def test_it_passes_expire_early_setting(self, mocker, settings):
settings.CERTIFICATES_EXPIRE_EARLY_DAYS = 7
recipe = RecipeFactory(approver=UserFactory(), signed=True)
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once_with(recipe.signature.x5u, timedelta(7))
assert errors == []
def test_it_reports_x5u_network_errors(self, mocker):
RecipeFactory(approver=UserFactory(), signed=True)
mock_verify_x5u = mocker.patch("normandy.recipes.checks.signing.verify_x5u")
mock_verify_x5u.side_effect = requests.exceptions.ConnectionError
errors = checks.signatures_use_good_certificates(None)
mock_verify_x5u.assert_called_once()
assert len(errors) == 1
assert errors[0].id == checks.ERROR_COULD_NOT_VERIFY_CERTIFICATE
@pytest.mark.django_db
class TestRecipeSignatureAreCorrect:
def test_it_warns_if_a_field_isnt_available(self, mocker):
"""This is to allow for un-applied to migrations to not break running migrations."""
RecipeFactory(approver=UserFactory(), signed=True)
mock_canonical_json = mocker.patch("normandy.recipes.models.Recipe.canonical_json")
mock_canonical_json.side_effect = ProgrammingError("error for testing")
errors = checks.recipe_signatures_are_correct(None)
assert len(errors) == 1
assert errors[0].id == checks.WARNING_COULD_NOT_CHECK_SIGNATURES
@pytest.mark.django_db
class TestActionSignatureAreCorrect:
def test_it_warns_if_a_field_isnt_available(self, mocker):
"""This is to allow for un-applied to migrations to not break running migrations."""
ActionFactory(signed=True)
mock_canonical_json = mocker.patch("normandy.recipes.models.Action.canonical_json")
mock_canonical_json.side_effect = ProgrammingError("error for testing")
errors = checks.action_signatures_are_correct(None)
assert len(errors) == 1
assert errors[0].id == checks.WARNING_COULD_NOT_CHECK_SIGNATURES
class TestRemoteSettingsConfigIsCorrect:
def test_it_warns_if_remote_settings_config_is_incorrect(self, mocker):
mock_check_config = mocker.patch("normandy.recipes.exports.RemoteSettings.check_config")
mock_check_config.side_effect = ImproperlyConfigured("error for testing")
errors = checks.remotesettings_config_is_correct(None)
assert len(errors) == 1
assert errors[0].id == checks.ERROR_REMOTE_SETTINGS_INCORRECT_CONFIG
| mpl-2.0 |
mozilla/normandy | normandy/recipes/api/filters.py | 1 | 4813 | import django_filters
from rest_framework import serializers
from normandy.recipes.models import Recipe
class EnabledStateFilter(django_filters.Filter):
"""A special case filter for filtering recipes by their enabled state"""
def filter(self, qs, value):
if value is not None:
lc_value = value.lower()
if lc_value in ["true", "1"]:
return qs.only_enabled()
elif lc_value in ["false", "0"]:
return qs.only_disabled()
return qs
class ApprovalStateFilter(django_filters.Filter):
"""A special case filter for filtering approval requests by their approval state"""
def filter(self, qs, value):
if value is None:
return qs
lc_value = value.lower()
if lc_value in ["true", "1", "approved"]:
return qs.filter(approved=True)
elif lc_value in ["false", "0", "rejected"]:
return qs.filter(approved=False)
elif lc_value in ["null", "pending"]:
return qs.filter(approved=None)
class BaselineCapabilitiesFilter(django_filters.Filter):
"""Filters recipe by whether they use only baseline capabilities, defaulting to only baseline."""
def __init__(self, *args, default_only_baseline=False, **kwargs):
super().__init__(*args, **kwargs)
self.default_only_baseline = default_only_baseline
def filter(self, qs, value):
baseline_only = self.default_only_baseline
if value is not None:
lc_value = value.lower()
baseline_only = lc_value in ["true", "1"]
if baseline_only:
recipes = list(qs)
if not all(isinstance(recipe, Recipe) for recipe in recipes):
raise TypeError("BaselineCapabilitiesFilter can only be used to filter recipes")
match_ids = []
for recipe in recipes:
if (
recipe.approved_revision
and recipe.approved_revision.uses_only_baseline_capabilities()
):
match_ids.append(recipe.id)
return Recipe.objects.filter(id__in=match_ids)
return qs
class CharSplitFilter(django_filters.CharFilter):
"""Custom CharFilter class that splits the value (if it's set) by `,` into a list
and uses the `__in` operator."""
def filter(self, qs, value):
if value:
qs = qs.filter(**{"{}__in".format(self.field_name): value.split(",")})
return qs
class FilterObjectFieldFilter(django_filters.Filter):
"""
Find recipes that have a filter object with the given field
Format for the filter's value is `key1:value1,key2:value2`. This would
include recipes that have a filter object that has a field `key1` that
contains the value `value1`, and that have a filter object with a field
`key2` that contains `value2`. The two filter objects do not have to be
the same, but may be.
"""
def filter(self, qs, value):
if value is None:
return qs
needles = []
for segment in value.split(","):
if ":" not in segment:
raise serializers.ValidationError(
{"filter_object": "Filters must be of the format `key1:val1,key2:val2,..."}
)
key, val = segment.split(":", 1)
needles.append((key, val))
# Let the database do a first pass filter
for k, v in needles:
qs = qs.filter(latest_revision__filter_object_json__contains=k)
qs = qs.filter(latest_revision__filter_object_json__contains=v)
recipes = list(qs)
if not all(isinstance(recipe, Recipe) for recipe in recipes):
raise TypeError("FilterObjectFieldFilter can only be used to filter recipes")
# For every recipe that contains the right substrings, look through
# their filter objects for an actual match
match_ids = []
for recipe in recipes:
recipe_matches = True
# Recipes needs to have all the keys and values in the needles
for k, v in needles:
for filter_object in recipe.latest_revision.filter_object:
# Don't consider invalid filter objects
if not filter_object.is_valid():
continue
if k in filter_object.data and v in str(filter_object.data[k]):
# Found a match
break
else:
# Did not break, so no match was not found
recipe_matches = False
break
if recipe_matches:
match_ids.append(recipe.id)
return Recipe.objects.filter(id__in=match_ids)
| mpl-2.0 |
mozilla/normandy | normandy/recipes/migrations/0008_auto_20180510_2252.py | 1 | 1967 | # Generated by Django 2.0.5 on 2018-05-10 22:52
# flake8: noqa
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("recipes", "0007_convert_simple_filters_to_filter_objects"),
]
operations = [
migrations.CreateModel(
name="EnabledState",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("created", models.DateTimeField(default=django.utils.timezone.now)),
("enabled", models.BooleanField(default=False)),
(
"creator",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="enabled_states",
to=settings.AUTH_USER_MODEL,
),
),
(
"revision",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="enabled_states",
to="recipes.RecipeRevision",
),
),
],
options={"ordering": ("-created",)},
),
migrations.AddField(
model_name="reciperevision",
name="enabled_state",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="current_for_revision",
to="recipes.EnabledState",
),
),
]
| mpl-2.0 |
mozilla/normandy | normandy/recipes/exports.py | 1 | 8717 | import logging
import kinto_http
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from normandy.base.utils import ScopedSettings
APPROVE_CHANGES_FLAG = {"status": "to-sign"}
ROLLBACK_CHANGES_FLAG = {"status": "to-rollback"}
logger = logging.getLogger(__name__)
rs_settings = ScopedSettings("REMOTE_SETTINGS_")
def recipe_as_record(recipe):
"""
Transform a recipe to a dict with the minimum amount of fields needed for clients
to verify and execute recipes.
:param recipe: a recipe ready to be exported.
:returns: a dict to be posted on Remote Settings.
"""
from normandy.recipes.api.v1.serializers import (
MinimalRecipeSerializer,
SignatureSerializer,
) # avoid circular imports
record = {
"id": str(recipe.id),
"recipe": MinimalRecipeSerializer(recipe).data,
"signature": SignatureSerializer(recipe.signature).data,
}
return record
class RemoteSettings:
"""
Interacts with a RemoteSettings service.
Recipes get published as records in one or both of the dedicated
collections on Remote Settings. When disabled, those records are removed.
Since Normandy already has the required approval/signoff features, the integration
bypasses the one of Remote Settings (leveraging a specific server configuration for this
particular collection).
There are two collections used. One is the "baseline" collection, which
is used only for recipes that fit within the baseline capabilities, and
are therefore compatible with a broad range of clients. The second is the
"capabilities" collection, in which all recipes are published. Clients
that read from the capabilities collection are expected to process
capabilities and only execute compatible recipes.
.. notes::
Remote Settings signoff workflow relies on several buckets (see kinto-signer API).
The ``main-workspace`` is only readable and writable by authorized accounts.
The ``main`` bucket is read-only, but publicly readable. The Remote Settings
clients pull data from there.
Since the review step is disabled for Normandy, publishing data is done in two steps:
1. Create, update or delete records in the ``main-workspace`` bucket
2. Approve the changes by flipping the ``status`` field to ``to-sign``
in the collection metadata
3. The server will sign and publish the new data to the ``main`` bucket.
"""
def __init__(self):
# Kinto is the underlying implementation of Remote Settings. The client
# is basically a tiny abstraction on top of the requests library.
self.client = (
kinto_http.Client(
server_url=rs_settings.URL,
auth=(rs_settings.USERNAME, rs_settings.PASSWORD),
retry=rs_settings.RETRY_REQUESTS,
)
if rs_settings.URL
else None
)
def check_config(self):
"""
Verify that integration with Remote Settings is configured properly.
"""
if self.client is None:
return # no check if disabled.
required_keys = [
"CAPABILITIES_COLLECTION_ID",
"WORKSPACE_BUCKET_ID",
"PUBLISH_BUCKET_ID",
"USERNAME",
"PASSWORD",
]
for key in required_keys:
if not getattr(settings, f"REMOTE_SETTINGS_{key}"):
msg = f"set settings.REMOTE_SETTINGS_{key} to use Remote Settings integration"
raise ImproperlyConfigured(msg)
# Test authentication.
server_info = self.client.server_info()
is_authenticated = (
"user" in server_info and rs_settings.USERNAME in server_info["user"]["id"]
)
if not is_authenticated:
raise ImproperlyConfigured("Invalid Remote Settings credentials")
# Test that collection is writable.
bucket = rs_settings.WORKSPACE_BUCKET_ID
collection = rs_settings.CAPABILITIES_COLLECTION_ID
metadata = self.client.get_collection(id=collection, bucket=bucket)
if server_info["user"]["id"] not in metadata["permissions"].get("write", []):
raise ImproperlyConfigured(
f"Remote Settings collection {collection} is not writable in bucket {bucket}."
)
# Test that collection has the proper review settings.
capabilities = server_info["capabilities"]
if "signer" in capabilities:
signer_config = capabilities["signer"]
normandy_resource = [
r
for r in signer_config["resources"]
if r["source"]["bucket"] == bucket and r["source"]["collection"] == collection
]
review_disabled = len(normandy_resource) == 1 and not normandy_resource[0].get(
"to_review_enabled", signer_config["to_review_enabled"]
)
if not review_disabled:
raise ImproperlyConfigured(
f"Review was not disabled on Remote Settings collection {collection}."
)
def published_recipes(self):
"""
Return the current list of remote records.
"""
if self.client is None:
raise ImproperlyConfigured("Remote Settings is not enabled.")
capabilities_records = self.client.get_records(
bucket=rs_settings.PUBLISH_BUCKET_ID, collection=rs_settings.CAPABILITIES_COLLECTION_ID
)
return capabilities_records
def publish(self, recipe, approve_changes=True):
"""
Publish the specified `recipe` on the remote server by upserting a record.
"""
if self.client is None:
return # no-op if disabled.
# 1. Put the record.
record = recipe_as_record(recipe)
self.client.update_record(
data=record,
bucket=rs_settings.WORKSPACE_BUCKET_ID,
collection=rs_settings.CAPABILITIES_COLLECTION_ID,
)
# 2. Approve the changes immediately (multi-signoff is disabled).
log_action = "Batch published"
if approve_changes:
self.approve_changes()
log_action = "Published"
logger.info(
f"{log_action} record '{recipe.id}' for recipe {recipe.approved_revision.name!r}"
)
def unpublish(self, recipe, approve_changes=True):
"""
Unpublish the specified `recipe` by deleted its associated record on the remote server.
"""
if self.client is None:
return # no-op if disabled.
# 1. Delete the record
either_existed = False
try:
self.client.delete_record(
id=str(recipe.id),
bucket=rs_settings.WORKSPACE_BUCKET_ID,
collection=rs_settings.CAPABILITIES_COLLECTION_ID,
)
either_existed = True
except kinto_http.KintoException as e:
if e.response.status_code == 404:
logger.warning(
f"The recipe '{recipe.id}' was not published in the capabilities collection. Skip."
)
else:
raise
# 2. Approve the changes immediately (multi-signoff is disabled).
log_action = "Batch deleted"
if either_existed and approve_changes:
self.approve_changes()
log_action = "Deleted"
logger.info(
f"{log_action} record '{recipe.id}' of recipe {recipe.approved_revision.name!r}"
)
def approve_changes(self):
"""
Approve the changes made in the workspace collection.
.. note::
This only works because multi-signoff is disabled for the Normandy recipes
in configuration (see :ref:`remote-settings-install`)
"""
if self.client is None:
return # no-op if disabled.
try:
self.client.patch_collection(
id=rs_settings.CAPABILITIES_COLLECTION_ID,
data=APPROVE_CHANGES_FLAG,
bucket=rs_settings.WORKSPACE_BUCKET_ID,
)
logger.info("Changes were approved.")
except kinto_http.exceptions.KintoException:
# Approval failed unexpectedly.
# The changes in the `main-workspace` bucket must be reverted.
self.client.patch_collection(
id=rs_settings.CAPABILITIES_COLLECTION_ID,
data=ROLLBACK_CHANGES_FLAG,
bucket=rs_settings.WORKSPACE_BUCKET_ID,
)
raise
| mpl-2.0 |
mozilla/normandy | normandy/recipes/tests/test_filters.py | 1 | 25613 | from datetime import datetime
import factory.fuzzy
import pytest
import re
from collections import defaultdict
from rest_framework import serializers
from normandy.base.jexl import get_normandy_jexl
from normandy.recipes import filters
from normandy.recipes.tests import (
ChannelFactory,
CountryFactory,
LocaleFactory,
RecipeRevisionFactory,
WindowsVersionFactory,
)
@pytest.mark.django_db
class FilterTestsBase:
"""Common tests for all filter object types"""
should_be_baseline = True
def create_basic_filter(self):
"""To be overwritten by subclasses to create test filters"""
raise NotImplementedError
def create_revision(self, *args, **kwargs):
return RecipeRevisionFactory(*args, **kwargs)
def test_it_can_be_constructed(self):
self.create_basic_filter()
def test_has_capabilities(self):
filter = self.create_basic_filter()
# Would throw if not defined
assert isinstance(filter.get_capabilities(), set)
def test_jexl_works(self):
filter = self.create_basic_filter()
rev = self.create_revision()
# Would throw if not defined
expr = filter.to_jexl(rev)
assert isinstance(expr, str)
jexl = get_normandy_jexl()
errors = jexl.validate(expr)
assert list(errors) == []
def test_uses_only_baseline_capabilities(self, settings):
if self.should_be_baseline == "skip":
return
filter = self.create_basic_filter()
capabilities = filter.get_capabilities()
if self.should_be_baseline:
assert capabilities <= settings.BASELINE_CAPABILITIES
else:
assert capabilities - settings.BASELINE_CAPABILITIES
def test_it_is_in_the_by_type_list(self):
filter_instance = self.create_basic_filter()
filter_class = filter_instance.__class__
assert filter_class in filters.by_type.values()
def test_its_type_is_camelcase(self):
filter_instance = self.create_basic_filter()
assert re.match("[a-zA-Z]+", filter_instance.type)
assert "_" not in filter_instance.type
class TestProfileCreationDateFilter(FilterTestsBase):
def create_basic_filter(self, direction="olderThan", date="2020-02-01"):
return filters.ProfileCreateDateFilter.create(direction=direction, date=date)
def test_generates_jexl_older_than(self):
filter = self.create_basic_filter(direction="olderThan", date="2020-07-30")
assert (
filter.to_jexl(self.create_revision())
== "(normandy.telemetry.main.environment.profile.creationDate<=18473)"
)
def test_generates_jexl_newer_than(self):
filter = self.create_basic_filter(direction="newerThan", date="2020-02-01")
assert set(filter.to_jexl(self.create_revision()).split("||")) == {
"(!normandy.telemetry.main)",
"(normandy.telemetry.main.environment.profile.creationDate>18293)",
}
def test_issue_2242(self):
"""Make sure that dates are parsed correctly"""
epoch = datetime.utcfromtimestamp(0)
datetime_factory = factory.fuzzy.FuzzyNaiveDateTime(epoch)
dt = datetime_factory.fuzz()
# Profile Creation Date is measured in days since epoch.
daystamp = (dt - epoch).days
filter = self.create_basic_filter(date=f"{dt.year}-{dt.month}-{dt.day}")
assert str(daystamp) in filter.to_jexl(self.create_revision())
def test_throws_error_on_bad_direction(self):
filter = self.create_basic_filter(direction="newer", date="2020-02-01")
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
def test_throws_error_on_bad_date(self):
with pytest.raises(AssertionError):
self.create_basic_filter(direction="newerThan", date="Jan 7, 2020")
class TestVersionFilter(FilterTestsBase):
should_be_baseline = False
def create_basic_filter(self, versions=None):
if versions is None:
versions = [72, 74]
return filters.VersionFilter.create(versions=versions)
def test_generates_jexl(self):
filter = self.create_basic_filter(versions=[72, 74])
assert set(filter.to_jexl(self.create_revision()).split("||")) == {
'(env.version|versionCompare("72.!")>=0)&&(env.version|versionCompare("72.*")<0)',
'(env.version|versionCompare("74.!")>=0)&&(env.version|versionCompare("74.*")<0)',
}
class TestVersionRangeFilter(FilterTestsBase):
should_be_baseline = False
def create_basic_filter(self, min_version="72.0b2", max_version="72.0b8"):
return filters.VersionRangeFilter.create(min_version=min_version, max_version=max_version)
def test_generates_jexl(self):
filter = self.create_basic_filter(min_version="72.0b2", max_version="75.0a1")
assert set(filter.to_jexl(self.create_revision()).split("&&")) == {
'(env.version|versionCompare("72.0b2")>=0)',
'(env.version|versionCompare("75.0a1")<0)',
}
class TestDateRangeFilter(FilterTestsBase):
def create_basic_filter(
self, not_before="2020-02-01T00:00:00Z", not_after="2020-03-01T00:00:00Z"
):
return filters.DateRangeFilter.create(not_before=not_before, not_after=not_after)
def test_generates_jexl(self):
filter = self.create_basic_filter()
assert set(filter.to_jexl(self.create_revision()).split("&&")) == {
'(normandy.request_time>="2020-02-01T00:00:00Z"|date)',
'(normandy.request_time<"2020-03-01T00:00:00Z"|date)',
}
class TestWindowsBuildNumberFilter(FilterTestsBase):
def create_basic_filter(self, value=12345, comparison="equal"):
return filters.WindowsBuildNumberFilter.create(value=value, comparison=comparison)
@pytest.mark.parametrize(
"comparison,symbol",
[
("equal", "=="),
("greater_than", ">"),
("greater_than_equal", ">="),
("less_than", "<"),
("less_than_equal", "<="),
],
)
def test_generates_jexl_number_ops(self, comparison, symbol):
filter = self.create_basic_filter(comparison=comparison)
assert (
filter.to_jexl(self.create_revision())
== f"(normandy.os.isWindows && normandy.os.windowsBuildNumber {symbol} 12345)"
)
def test_generates_jexl_error_on_bad_comparison(self):
filter = self.create_basic_filter(comparison="typo")
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
class TestWindowsVersionFilter(FilterTestsBase):
def create_basic_filter(self, versions_list=[6.1]):
WindowsVersionFactory(nt_version=6.1)
return filters.WindowsVersionFilter.create(versions_list=versions_list)
def test_generates_jexl(self):
filter = self.create_basic_filter()
assert (
filter.to_jexl(self.create_revision())
== "(normandy.os.isWindows && normandy.os.windowsVersion in [6.1])"
)
def test_generates_jexl_error_on_bad_version(self):
with pytest.raises(AssertionError):
filters.WindowsVersionFilter.create(versions_list=[8.9])
class TestChannelFilter(FilterTestsBase):
def create_basic_filter(self, channels=None):
if channels:
channel_objs = [ChannelFactory(slug=slug) for slug in channels]
else:
channel_objs = [ChannelFactory()]
return filters.ChannelFilter.create(channels=[c.slug for c in channel_objs])
def test_generates_jexl(self):
filter = self.create_basic_filter(channels=["release", "beta"])
assert filter.to_jexl(self.create_revision()) == 'normandy.channel in ["release","beta"]'
class TestLocaleFilter(FilterTestsBase):
def create_basic_filter(self, locales=None):
if locales:
locale_objs = [LocaleFactory(code=code) for code in locales]
else:
locale_objs = [LocaleFactory()]
return filters.LocaleFilter.create(locales=[locale.code for locale in locale_objs])
def test_generates_jexl(self):
filter = self.create_basic_filter(locales=["en-US", "en-CA"])
assert filter.to_jexl(self.create_revision()) == 'normandy.locale in ["en-US","en-CA"]'
class TestCountryFilter(FilterTestsBase):
def create_basic_filter(self, countries=None):
if countries:
country_objs = [CountryFactory(code=code) for code in countries]
else:
country_objs = [CountryFactory()]
return filters.CountryFilter.create(countries=[c.code for c in country_objs])
def test_generates_jexl(self):
filter = self.create_basic_filter(countries=["SV", "MX"])
assert filter.to_jexl(self.create_revision()) == 'normandy.country in ["SV","MX"]'
class TestPlatformFilter(FilterTestsBase):
def create_basic_filter(self, platforms=["all_mac", "all_windows"]):
return filters.PlatformFilter.create(platforms=platforms)
def test_generates_jexl_list_of_two(self):
filter = self.create_basic_filter()
assert set(filter.to_jexl(self.create_revision()).split("||")) == {
"normandy.os.isMac",
"normandy.os.isWindows",
}
def test_generates_jexl_list_of_one(self):
filter = self.create_basic_filter(platforms=["all_linux"])
assert set(filter.to_jexl(self.create_revision()).split("||")) == {"normandy.os.isLinux"}
def test_throws_error_on_bad_platform(self):
filter = self.create_basic_filter(platforms=["all_linu"])
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
class TestNegateFilter(FilterTestsBase):
def create_basic_filter(self):
data_for_filter = {"type": "channel", "channels": ["release", "beta"]}
return filters.NegateFilter.create(filter_to_negate=data_for_filter)
def test_generates_jexl(self):
negate_filter = self.create_basic_filter()
assert (
negate_filter.to_jexl(self.create_revision())
== '!(normandy.channel in ["release","beta"])'
)
class TestAndFilter(FilterTestsBase):
def create_basic_filter(self, subfilters=None):
if subfilters is None:
subfilters = [
{"type": "channel", "channels": ["release", "beta"]},
{"type": "locale", "locales": ["en-US", "de"]},
]
return filters.AndFilter.create(subfilters=subfilters)
def test_generates_jexl_zero_subfilters(self):
with pytest.raises(AssertionError) as excinfo:
self.create_basic_filter(subfilters=[])
assert "has at least 1 element" in str(excinfo.value)
def test_generates_jexl_one_subfilter(self):
negate_filter = self.create_basic_filter(
subfilters=[{"type": "channel", "channels": ["release"]}]
)
assert negate_filter.to_jexl(self.create_revision()) == '(normandy.channel in ["release"])'
def test_generates_jexl_two_subfilters(self):
negate_filter = self.create_basic_filter(
subfilters=[
{"type": "channel", "channels": ["release"]},
{"type": "locale", "locales": ["en-US"]},
]
)
assert (
negate_filter.to_jexl(self.create_revision())
== '(normandy.channel in ["release"]&&normandy.locale in ["en-US"])'
)
class TestOrFilter(FilterTestsBase):
def create_basic_filter(self, subfilters=None):
if subfilters is None:
subfilters = [
{"type": "channel", "channels": ["release", "beta"]},
{"type": "locale", "locales": ["en-US", "de"]},
]
return filters.OrFilter.create(subfilters=subfilters)
def test_generates_jexl_zero_subfilters(self):
with pytest.raises(AssertionError) as excinfo:
self.create_basic_filter(subfilters=[])
assert "has at least 1 element" in str(excinfo.value)
def test_generates_jexl_one_subfilter(self):
negate_filter = self.create_basic_filter(
subfilters=[{"type": "channel", "channels": ["release"]}]
)
assert negate_filter.to_jexl(self.create_revision()) == '(normandy.channel in ["release"])'
def test_generates_jexl_two_subfilters(self):
negate_filter = self.create_basic_filter(
subfilters=[
{"type": "channel", "channels": ["release"]},
{"type": "locale", "locales": ["en-US"]},
]
)
assert (
negate_filter.to_jexl(self.create_revision())
== '(normandy.channel in ["release"]||normandy.locale in ["en-US"])'
)
class TestAddonInstalledFilter(FilterTestsBase):
def create_basic_filter(self, addons=["@abcdef", "ghijk@lmnop"], any_or_all="any"):
return filters.AddonInstalledFilter.create(addons=addons, any_or_all=any_or_all)
def test_generates_jexl_installed_any(self):
filter = self.create_basic_filter()
assert set(filter.to_jexl(self.create_revision()).split("||")) == {
'normandy.addons["@abcdef"]',
'normandy.addons["ghijk@lmnop"]',
}
def test_generates_jexl_installed_all(self):
filter = self.create_basic_filter(any_or_all="all")
assert set(filter.to_jexl(self.create_revision()).split("&&")) == {
'normandy.addons["@abcdef"]',
'normandy.addons["ghijk@lmnop"]',
}
def test_throws_error_on_bad_any_or_all(self):
filter = self.create_basic_filter(any_or_all="error")
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
class TestAddonActiveFilter(FilterTestsBase):
def create_basic_filter(self, addons=["@abcdef", "ghijk@lmnop"], any_or_all="any"):
return filters.AddonActiveFilter.create(addons=addons, any_or_all=any_or_all)
def test_generates_jexl_active_any(self):
filter = self.create_basic_filter()
assert set(filter.to_jexl(self.create_revision()).split("||")) == {
'normandy.addons["@abcdef"].isActive',
'normandy.addons["ghijk@lmnop"].isActive',
}
def test_generates_jexl_active_all(self):
filter = self.create_basic_filter(any_or_all="all")
assert set(filter.to_jexl(self.create_revision()).split("&&")) == {
'normandy.addons["@abcdef"].isActive',
'normandy.addons["ghijk@lmnop"].isActive',
}
def test_throws_error_on_bad_any_or_all(self):
filter = self.create_basic_filter(any_or_all="error")
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
class TestPrefCompareFilter(FilterTestsBase):
def create_basic_filter(
self, pref="browser.urlbar.maxRichResults", value=10, comparison="equal"
):
return filters.PrefCompareFilter.create(pref=pref, value=value, comparison=comparison)
def test_generates_jexl(self):
filter = self.create_basic_filter()
assert (
filter.to_jexl(self.create_revision())
== "'browser.urlbar.maxRichResults'|preferenceValue == 10"
)
@pytest.mark.parametrize(
"comparison,symbol",
[
("greater_than", ">"),
("greater_than_equal", ">="),
("less_than", "<"),
("less_than_equal", "<="),
],
)
def test_generates_jexl_number_ops(self, comparison, symbol):
filter = self.create_basic_filter(comparison=comparison)
assert (
filter.to_jexl(self.create_revision())
== f"'browser.urlbar.maxRichResults'|preferenceValue {symbol} 10"
)
def test_generates_jexl_boolean(self):
filter = self.create_basic_filter(value=False)
assert (
filter.to_jexl(self.create_revision())
== "'browser.urlbar.maxRichResults'|preferenceValue == false"
)
def test_generates_jexl_string_in(self):
filter = self.create_basic_filter(value="default", comparison="contains")
assert (
filter.to_jexl(self.create_revision())
== "\"default\" in 'browser.urlbar.maxRichResults'|preferenceValue"
)
def test_generates_jexl_error(self):
filter = self.create_basic_filter(comparison="invalid")
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
class TestPrefExistsFilter(FilterTestsBase):
def create_basic_filter(self, pref="browser.urlbar.maxRichResults", value=True):
return filters.PrefExistsFilter.create(pref=pref, value=value)
def test_generates_jexl_pref_exists_true(self):
filter = self.create_basic_filter()
assert (
filter.to_jexl(self.create_revision())
== "'browser.urlbar.maxRichResults'|preferenceExists"
)
def test_generates_jexl_pref_exists_false(self):
filter = self.create_basic_filter(value=False)
assert (
filter.to_jexl(self.create_revision())
== "!('browser.urlbar.maxRichResults'|preferenceExists)"
)
class TestPrefUserSetFilter(FilterTestsBase):
def create_basic_filter(self, pref="browser.urlbar.maxRichResults", value=True):
return filters.PrefUserSetFilter.create(pref=pref, value=value)
def test_generates_jexl_is_user_set_true(self):
filter = self.create_basic_filter()
assert (
filter.to_jexl(self.create_revision())
== "'browser.urlbar.maxRichResults'|preferenceIsUserSet"
)
def test_generates_jexl_is_user_set_false(self):
filter = self.create_basic_filter(value=False)
assert (
filter.to_jexl(self.create_revision())
== "!('browser.urlbar.maxRichResults'|preferenceIsUserSet)"
)
class TestBucketSampleFilter(FilterTestsBase):
def create_basic_filter(self, input=None, start=123, count=10, total=1_000):
if input is None:
input = ["normandy.clientId"]
return filters.BucketSampleFilter.create(
input=input, start=start, count=count, total=total
)
def test_generates_jexl(self):
filter = self.create_basic_filter(input=["A"], start=10, count=20, total=1_000)
assert filter.to_jexl(self.create_revision()) == "[A]|bucketSample(10,20,1000)"
def test_supports_floats(self):
filter = self.create_basic_filter(input=["A"], start=10, count=0.5, total=1_000)
assert filter.to_jexl(self.create_revision()) == "[A]|bucketSample(10,0.5,1000)"
class TestStableSampleFilter(FilterTestsBase):
def create_basic_filter(self, input=None, rate=0.01):
if input is None:
input = ["normandy.clientId"]
return filters.StableSampleFilter.create(input=input, rate=rate)
def test_generates_jexl(self):
filter = self.create_basic_filter(input=["A"], rate=0.1)
assert filter.to_jexl(self.create_revision()) == "[A]|stableSample(0.1)"
class TestNamespaceSampleFilter(FilterTestsBase):
def create_basic_filter(self, namespace="global-v42", start=123, count=10):
return filters.NamespaceSampleFilter.create(namespace=namespace, start=start, count=count)
def test_generates_jexl(self):
filter = self.create_basic_filter(namespace="fancy-rollout", start=10, count=20)
assert (
filter.to_jexl(self.create_revision())
== '["fancy-rollout",normandy.userId]|bucketSample(10,20,10000)'
)
def test_supports_floats(self):
filter = self.create_basic_filter(namespace="risky-experiment", start=123, count=0.5)
assert (
filter.to_jexl(self.create_revision())
== '["risky-experiment",normandy.userId]|bucketSample(123,0.5,10000)'
)
class TestJexlFilter(FilterTestsBase):
should_be_baseline = "skip"
def create_basic_filter(self, expression="true", capabilities=None, comment="a comment"):
if capabilities is None:
capabilities = ["capabilities-v1"]
return filters.JexlFilter.create(
expression=expression, capabilities=capabilities, comment=comment
)
def test_generates_jexl(self):
filter = self.create_basic_filter(expression="2 + 2")
assert filter.to_jexl(self.create_revision()) == "(2 + 2)"
def test_it_rejects_invalid_jexl(self):
filter = self.create_basic_filter(expression="this is an invalid expression")
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
def test_it_has_capabilities(self):
filter = self.create_basic_filter(capabilities=["a.b", "c.d"])
assert filter.get_capabilities() == {"a.b", "c.d"}
def test_empty_capabilities_is_ok(self):
filter = self.create_basic_filter(capabilities=[])
assert filter.get_capabilities() == set()
filter.to_jexl(None) # should not throw
def test_throws_error_on_non_iterable_capabilities(self):
with pytest.raises(AssertionError) as excinfo:
self.create_basic_filter(capabilities=5)
assert excinfo.value.args[0]["capabilities"][0].code == "not_a_list"
def test_throws_error_on_non_list_capabilities(self):
with pytest.raises(AssertionError) as excinfo:
self.create_basic_filter(capabilities="a mistake")
assert excinfo.value.args[0]["capabilities"][0].code == "not_a_list"
class TestPresetFilter(FilterTestsBase):
def create_basic_filter(self, name="pocket-1"):
return filters.PresetFilter.create(name=name)
def test_all_choices_have_generators(self):
f = filters.PresetFilter()
choices = f.preset_choices
for choice in choices:
identifier = choice.replace("-", "_")
generator_name = f"_get_subfilters_{identifier}"
getattr(f, generator_name)()
def test_pocket_1(self):
filter_object = self.create_basic_filter(name="pocket-1")
# The preset is an and filter
assert filter_object._get_operator() == "&&"
# Pull out the first level subfilters
subfilters = defaultdict(lambda: [])
for filter in filter_object._get_subfilters():
subfilters[type(filter)].append(filter)
# There should be one or filter
or_filters = subfilters.pop(filters.OrFilter)
assert len(or_filters) == 1
or_subfilters = or_filters[0]._get_subfilters()
# It should be made up of negative PrefUserSet filters
for f in or_subfilters:
assert isinstance(f, filters.PrefUserSetFilter)
assert f.initial_data["value"] is False
# And it should use the exected prefs
assert set(f.initial_data["pref"] for f in or_subfilters) == set(
["browser.newtabpage.enabled", "browser.startup.homepage"]
)
# There should be a bunch more negative PrefUserSet filters at the top level
pref_subfilters = subfilters.pop(filters.PrefUserSetFilter)
for f in pref_subfilters:
assert f.initial_data["value"] is False
# and they should be the expected prefs
assert set(f.initial_data["pref"] for f in pref_subfilters) == set(
[
"browser.newtabpage.activity-stream.showSearch",
"browser.newtabpage.activity-stream.feeds.topsites",
"browser.newtabpage.activity-stream.feeds.section.topstories",
"browser.newtabpage.activity-stream.feeds.section.highlights",
]
)
# There should be no other filters
assert subfilters == {}, "no unexpected filters"
class TestQaOnlyFilter(FilterTestsBase):
def create_basic_filter(self):
return filters.QaOnlyFilter.create()
def create_revision(self, *args, **kwargs):
kwargs.setdefault("action__name", "multi-preference-experiment")
return super().create_revision(*args, **kwargs)
def test_it_works_for_multi_preference_experiment(self):
rev = self.create_revision(action__name="multi-preference-experiment")
filter = self.create_basic_filter()
slug = rev.arguments["slug"]
assert (
filter.to_jexl(rev)
== f"\"{slug}\" in 'app.normandy.testing-for-recipes'|preferenceValue"
)
def test_it_works_for_branched_addon_study(self):
rev = self.create_revision(action__name="branched-addon-study")
filter = self.create_basic_filter()
slug = rev.arguments["slug"]
assert (
filter.to_jexl(rev)
== f"\"{slug}\" in 'app.normandy.testing-for-recipes'|preferenceValue"
)
def test_it_works_for_preference_rollout(self):
rev = self.create_revision(action__name="preference-rollout")
filter = self.create_basic_filter()
slug = rev.arguments["slug"]
assert (
filter.to_jexl(rev)
== f"\"{slug}\" in 'app.normandy.testing-for-recipes'|preferenceValue"
)
def test_it_works_for_heartbeat(self):
rev = self.create_revision(action__name="show-heartbeat")
filter = self.create_basic_filter()
slug = rev.arguments["surveyId"]
assert (
filter.to_jexl(rev)
== f"\"{slug}\" in 'app.normandy.testing-for-recipes'|preferenceValue"
)
| mpl-2.0 |
mozilla/normandy | normandy/recipes/migrations/0009_auto_20180510_2328.py | 1 | 1037 | # Generated by Django 2.0.5 on 2018-05-10 23:28
from django.db import migrations
def enabled_to_enabled_state(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
EnabledState = apps.get_model("recipes", "EnabledState")
for recipe in Recipe.objects.filter(enabled=True):
if recipe.approved_revision:
es = EnabledState.objects.create(revision=recipe.approved_revision, enabled=True)
es.current_for_revision.add(recipe.approved_revision)
def enabled_state_to_enabled(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
for recipe in Recipe.objects.exclude(approved_revision=None):
enabled_state = recipe.approved_revision.enabled_state
if enabled_state and enabled_state.enabled:
recipe.enabled = True
recipe.save()
class Migration(migrations.Migration):
dependencies = [("recipes", "0008_auto_20180510_2252")]
operations = [migrations.RunPython(enabled_to_enabled_state, enabled_state_to_enabled)]
| mpl-2.0 |
mozilla/normandy | normandy/studies/tests/__init__.py | 1 | 4038 | import factory
import json
import tempfile
import zipfile
from factory.django import DjangoModelFactory
from faker import Faker
from normandy.base.tests import FuzzyUnicode
from normandy.studies.models import Extension
INSTALL_RDF_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<RDF xmlns="http://w3.org/1999/02/22-rdf-syntax-ns#" xmlns:em="http://mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<em:type>2</em:type>
<em:bootstrap>true</em:bootstrap>
<em:unpack>false</em:unpack>
<em:multiprocessCompatible>true</em:multiprocessCompatible>
{}
<em:targetApplication>
<Description>
<em:id>{{ec8030f7-c20a-464f-9b0e-13a3a9e97384}}</em:id>
<em:minVersion>52.0</em:minVersion>
<em:maxVersion>*</em:maxVersion>
</Description>
</em:targetApplication>
</Description>
</RDF>
"""
class XPIFileFactory(object):
def __init__(self, signed=True):
# Generate a unique random path for the new XPI file
f, self._path = tempfile.mkstemp(suffix=".xpi")
# Create a blank zip file on disk
zf = zipfile.ZipFile(self.path, mode="w")
zf.close()
if signed:
self.add_file("META-INF/manifest.mf", b"")
self.add_file("META-INF/mozilla.rsa", b"")
self.add_file("META-INF/mozilla.sf", b"")
@property
def path(self):
return self._path
def add_file(self, filename, data):
with zipfile.ZipFile(self.path, mode="a") as zf:
with zf.open(filename, mode="w") as f:
f.write(data)
def open(self, mode="rb"):
return open(self.path, mode="rb")
class WebExtensionFileFactory(XPIFileFactory):
def __init__(self, signed=True, from_file=None, gecko_id=None, overwrite_data=None):
super().__init__(signed=signed)
if not gecko_id:
gecko_id = f"{Faker().md5()}@normandy.mozilla.org"
if from_file:
self._manifest = json.load(from_file)
else:
self._manifest = {
"manifest_version": 2,
"name": "normandy test addon",
"version": "0.1",
"description": "This is an add-on for us in Normandy's tests",
"applications": {"gecko": {"id": gecko_id}},
}
if overwrite_data:
self._manifest.update(overwrite_data)
self.save_manifest()
@property
def manifest(self):
return self._manifest
def save_manifest(self):
self.add_file("manifest.json", json.dumps(self.manifest).encode())
def update_manifest(self, data):
self._manifest.update(data)
self.save_manifest()
def replace_manifest(self, data):
self._manifest = data
self.save_manifest()
class LegacyAddonFileFactory(XPIFileFactory):
def __init__(self, signed=True, from_file=None, addon_id=None, overwrite_data=None):
super().__init__(signed=signed)
if not addon_id:
name = Faker().md5()
addon_id = f"{name}@normandy.mozilla.org"
if from_file:
with open(from_file, "rb") as f:
self.add_file("install.rdf", f.read())
else:
data = {
"id": addon_id,
"version": "0.1",
"name": "Signed Bootstrap Mozilla Extension Example",
"description": "Example of a bootstrapped addon",
}
if overwrite_data:
data.update(overwrite_data)
self.generate_install_rdf(data)
def generate_install_rdf(self, data):
insert = ""
for k in data:
insert += "<em:{}>{}</em:{}>\n".format(k, data[k], k)
self.add_file("install.rdf", INSTALL_RDF_TEMPLATE.format(insert).encode())
class ExtensionFactory(DjangoModelFactory):
name = FuzzyUnicode()
xpi = factory.django.FileField(from_func=lambda: WebExtensionFileFactory().open())
class Meta:
model = Extension
| mpl-2.0 |
mozilla/normandy | normandy/recipes/management/commands/initial_data.py | 1 | 1928 | from django.core.management.base import BaseCommand
from django_countries import countries
from normandy.recipes.models import Channel, Country, WindowsVersion
class Command(BaseCommand):
"""
Adds some helpful initial data to the site's database. If matching
data already exists, it should _not_ be overwritten, making this
safe to run multiple times.
This exists instead of data migrations so that test runs do not load
this data into the test database.
If this file grows too big, we should consider finding a library or
coming up with a more robust way of adding this data.
"""
help = "Adds initial data to database"
def handle(self, *args, **options):
self.add_release_channels()
self.add_countries()
self.add_windows_versions()
def add_release_channels(self):
self.stdout.write("Adding Release Channels...", ending="")
channels = {
"release": "Release",
"beta": "Beta",
"aurora": "Developer Edition",
"nightly": "Nightly",
}
for slug, name in channels.items():
Channel.objects.update_or_create(slug=slug, defaults={"name": name})
self.stdout.write("Done")
def add_countries(self):
self.stdout.write("Adding Countries...", ending="")
for code, name in countries:
Country.objects.update_or_create(code=code, defaults={"name": name})
self.stdout.write("Done")
def add_windows_versions(self):
self.stdout.write("Adding Windows Versions...", ending="")
versions = [
(6.1, "Windows 7"),
(6.2, "Windows 8"),
(6.3, "Windows 8.1"),
(10.0, "Windows 10"),
]
for nt_version, name in versions:
WindowsVersion.objects.update_or_create(nt_version=nt_version, defaults={"name": name})
self.stdout.write("Done")
| mpl-2.0 |
mozilla/normandy | normandy/base/tests/test_storage.py | 1 | 1594 | from itertools import chain
import pytest
from django.core.files.base import ContentFile
from normandy.base.storage import PermissiveFilenameStorageMixin
class TestPermissiveFilenameStorageMixin(object):
@pytest.fixture
def storage(self):
return PermissiveFilenameStorageMixin()
class TestGetValidName(object):
def test_it_works(self, storage):
assert storage.get_valid_name("simple-name") == "simple-name"
def test_it_removes_whitespace(self, storage):
assert storage.get_valid_name(" hello world ") == "hello_world"
def test_it_removes_some_special_chars(self, storage):
assert (
storage.get_valid_name("""special \\^`<>{}[]#%"'~|[]*? characters""")
== "special_characters"
)
def test_it_removes_non_printable_ascii_characters(self, storage):
for c in chain(range(32), range(127, 256)):
assert storage.get_valid_name(chr(c)) == ""
def test_it_allows_an_addon_filename(self, storage):
addon_filename = "shield-recipe-client@mozilla.org-82.1.g32b36827-signed.xpi"
assert storage.get_valid_name(addon_filename) == addon_filename
class TestRestrictedOverwriteFilenameStorageMixin(object):
def test_get_available_name(self, storage):
assert storage.get_available_name("tmp/f00") == "tmp/f00"
def test_file_exists(self, storage):
storage.save("tmp/foo", ContentFile(b""))
with pytest.raises(FileExistsError):
storage.get_available_name("tmp/foo")
| mpl-2.0 |
mozilla/normandy | contract-tests/v1_api/test_performance.py | 1 | 3083 | from urllib.parse import urljoin
import html5lib
import pytest
"""These are paths hit by self repair that need to be very fast"""
HOT_PATHS = [
"/en-US/repair",
"/en-US/repair/",
"/api/v1/recipe/?enabled=1",
"/api/v1/recipe/signed/?enabled=1",
"/api/v1/action/",
]
@pytest.mark.parametrize("path", HOT_PATHS)
class TestHotPaths(object):
"""
Test for performance-enhancing properties of the site.
This file does not test performance by measuring runtimes and throughput.
Instead it tests for markers of features that would speed up or slow down the
site, such as cache headers.
"""
def test_no_redirects(self, conf, requests_session, path):
r = requests_session.get(conf.getoption("server") + path)
r.raise_for_status()
assert 200 <= r.status_code < 300
def test_no_vary_cookie(self, conf, requests_session, path, only_readonly):
r = requests_session.get(conf.getoption("server") + path)
r.raise_for_status()
assert "cookie" not in r.headers.get("vary", "").lower()
def test_cache_headers(self, conf, requests_session, path, only_readonly):
if path.startswith("/api/"):
pytest.xfail("caching temporarily hidden on api by nginx")
r = requests_session.get(conf.getoption("server") + path)
r.raise_for_status()
cache_control = r.headers.get("cache-control")
assert cache_control is not None
# parse cache-control header.
parts = [part.strip() for part in cache_control.split(",")]
max_age = [part for part in parts if part.startswith("max-age=")][0]
max_age_seconds = int(max_age.split("=")[1])
assert "public" in parts
assert max_age_seconds > 0
def test_static_cache_headers(conf, requests_session):
"""Test that all scripts included from self-repair have long lived cache headers"""
req = requests_session.get(conf.getoption("server") + "/en-US/repair")
req.raise_for_status()
document = html5lib.parse(req.content, treebuilder="dom")
scripts = document.getElementsByTagName("script")
for script in scripts:
src = script.getAttribute("src")
url = urljoin(conf.getoption("server"), src)
script_req = requests_session.get(url)
script_req.raise_for_status()
cache_control = parse_cache_control(script_req.headers["cache-control"])
assert cache_control["public"], f"Cache-control: public for {url}"
ONE_YEAR = 31_536_000
assert cache_control["max-age"] >= ONE_YEAR, f"Cache-control: max-age > 1 year for {url}"
assert cache_control["immutable"], f"Cache-control: immutable for {url}"
def parse_cache_control(header):
parsed = {}
parts = header.split(",")
for part in parts:
part = part.strip()
if "=" in part:
key, val = part.split("=", 1)
try:
val = int(val)
except ValueError:
pass
parsed[key] = val
else:
parsed[part] = True
return parsed
| mpl-2.0 |
mozilla/normandy | normandy/recipes/api/v1/views.py | 1 | 6564 | from django.conf import settings
from django.db import transaction
from django.db.models import Q
from django.views.decorators.cache import never_cache
import django_filters
from rest_framework import generics, permissions, views, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound, ParseError
from rest_framework.response import Response
from normandy.base.api.mixins import CachingViewsetMixin
from normandy.base.api.permissions import AdminEnabledOrReadOnly
from normandy.base.api.renderers import JavaScriptRenderer
from normandy.base.decorators import api_cache_control
from normandy.recipes.models import Action, ApprovalRequest, Client, Recipe, RecipeRevision
from normandy.recipes.api.filters import (
BaselineCapabilitiesFilter,
CharSplitFilter,
EnabledStateFilter,
)
from normandy.recipes.api.v1.serializers import (
ActionSerializer,
ApprovalRequestSerializer,
ClientSerializer,
RecipeRevisionSerializer,
RecipeSerializer,
SignedActionSerializer,
SignedRecipeSerializer,
)
class ActionViewSet(CachingViewsetMixin, viewsets.ReadOnlyModelViewSet):
"""Viewset for viewing recipe actions."""
queryset = Action.objects.all()
serializer_class = ActionSerializer
pagination_class = None
lookup_field = "name"
lookup_value_regex = r"[_\-\w]+"
@action(detail=False, methods=["GET"])
@api_cache_control()
def signed(self, request, pk=None):
actions = self.filter_queryset(self.get_queryset()).exclude(signature=None)
serializer = SignedActionSerializer(actions, many=True)
return Response(serializer.data)
class ActionImplementationView(generics.RetrieveAPIView):
"""
Retrieves the implementation code for an action. Raises a 404 if the
given hash doesn't match the hash we've stored.
"""
queryset = Action.objects.all()
lookup_field = "name"
permission_classes = []
renderer_classes = [JavaScriptRenderer]
pagination_class = None
@api_cache_control(max_age=settings.IMMUTABLE_CACHE_TIME)
def retrieve(self, request, name, impl_hash):
action = self.get_object()
if impl_hash != action.implementation_hash:
raise NotFound("Hash does not match current stored action.")
return Response(action.implementation)
class RecipeFilters(django_filters.FilterSet):
enabled = EnabledStateFilter()
action = django_filters.CharFilter(field_name="latest_revision__action__name")
channels = CharSplitFilter("latest_revision__channels__slug")
locales = CharSplitFilter("latest_revision__locales__code")
countries = CharSplitFilter("latest_revision__countries__code")
only_baseline_capabilities = BaselineCapabilitiesFilter(default_only_baseline=False)
class Meta:
model = Recipe
fields = ["action", "enabled", "latest_revision__action"]
class SignedRecipeFilters(RecipeFilters):
only_baseline_capabilities = BaselineCapabilitiesFilter(default_only_baseline=True)
class RecipeViewSet(CachingViewsetMixin, viewsets.ReadOnlyModelViewSet):
"""Viewset for viewing and uploading recipes."""
queryset = (
Recipe.objects.all()
# Foreign keys
.select_related("latest_revision")
.select_related("latest_revision__action")
.select_related("latest_revision__approval_request")
# Many-to-many
.prefetch_related("latest_revision__channels")
.prefetch_related("latest_revision__countries")
.prefetch_related("latest_revision__locales")
)
serializer_class = RecipeSerializer
filterset_class = RecipeFilters
permission_classes = [permissions.DjangoModelPermissionsOrAnonReadOnly, AdminEnabledOrReadOnly]
pagination_class = None
def get_queryset(self):
queryset = self.queryset
if self.request.GET.get("status") == "enabled":
queryset = queryset.only_enabled()
elif self.request.GET.get("status") == "disabled":
queryset = queryset.only_disabled()
if "text" in self.request.GET:
text = self.request.GET.get("text")
if "\x00" in text:
raise ParseError("Null bytes in text")
queryset = queryset.filter(
Q(latest_revision__name__contains=text)
| Q(latest_revision__extra_filter_expression__contains=text)
)
return queryset
@transaction.atomic
def create(self, request, *args, **kwargs):
return super().create(request, *args, **kwargs)
@transaction.atomic
def update(self, request, *args, **kwargs):
return super().update(request, *args, **kwargs)
@action(detail=False, methods=["GET"], filterset_class=SignedRecipeFilters)
@api_cache_control()
def signed(self, request, pk=None):
recipes = self.filter_queryset(self.get_queryset()).exclude(signature=None)
serializer = SignedRecipeSerializer(recipes, many=True)
return Response(serializer.data)
@action(detail=True, methods=["GET"])
@api_cache_control()
def history(self, request, pk=None):
recipe = self.get_object()
serializer = RecipeRevisionSerializer(
recipe.revisions.all().order_by("-id"), many=True, context={"request": request}
)
return Response(serializer.data)
class RecipeRevisionViewSet(viewsets.ReadOnlyModelViewSet):
queryset = (
RecipeRevision.objects.all()
.select_related("action")
.select_related("approval_request")
.select_related("recipe")
# Many-to-many
.prefetch_related("channels")
.prefetch_related("countries")
.prefetch_related("locales")
)
serializer_class = RecipeRevisionSerializer
permission_classes = [AdminEnabledOrReadOnly, permissions.DjangoModelPermissionsOrAnonReadOnly]
pagination_class = None
class ApprovalRequestViewSet(viewsets.ReadOnlyModelViewSet):
queryset = ApprovalRequest.objects.all()
serializer_class = ApprovalRequestSerializer
permission_classes = [AdminEnabledOrReadOnly, permissions.DjangoModelPermissionsOrAnonReadOnly]
pagination_class = None
class ClassifyClient(views.APIView):
authentication_classes = []
permission_classes = []
serializer_class = ClientSerializer
@never_cache
def get(self, request, format=None):
client = Client(request)
serializer = self.serializer_class(client, context={"request": request})
return Response(serializer.data)
| mpl-2.0 |
mozilla/normandy | normandy/recipes/migrations/0004_auto_20180502_2340.py | 1 | 5164 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-02 23:40
# flake8: noqa
from __future__ import unicode_literals
import hashlib
from django.db import migrations
def create_tmp_from_revision(apps, revision, parent=None):
ApprovalRequest = apps.get_model("recipes", "ApprovalRequest")
TmpRecipeRevision = apps.get_model("recipes", "TmpRecipeRevision")
tmp = TmpRecipeRevision(
created=revision.created,
updated=revision.updated,
comment=revision.comment,
name=revision.name,
arguments_json=revision.arguments_json,
extra_filter_expression=revision.extra_filter_expression,
identicon_seed=revision.identicon_seed,
action=revision.action,
parent=parent,
recipe=revision.recipe,
user=revision.user,
)
tmp.save()
if revision.approved_for_recipe.count():
tmp.approved_for_recipe.add(revision.approved_for_recipe.get())
if revision.latest_for_recipe.count():
tmp.latest_for_recipe.add(revision.latest_for_recipe.get())
try:
approval_request = revision.approval_request
approval_request.tmp_revision = tmp
approval_request.save()
except ApprovalRequest.DoesNotExist:
pass
for channel in revision.channels.all():
tmp.channels.add(channel)
for country in revision.countries.all():
tmp.countries.add(country)
for locale in revision.locales.all():
tmp.locales.add(locale)
return tmp
def copy_revisions_to_tmp(apps, schema_editor):
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
for revision in RecipeRevision.objects.filter(parent=None):
current_rev = revision
parent_tmp = create_tmp_from_revision(apps, current_rev)
try:
while current_rev.child:
parent_tmp = create_tmp_from_revision(apps, current_rev.child, parent=parent_tmp)
current_rev = current_rev.child
except RecipeRevision.DoesNotExist:
pass
def get_filter_expression(revision):
parts = []
if revision.locales.count():
locales = ", ".join(["'{}'".format(l.code) for l in revision.locales.all()])
parts.append("normandy.locale in [{}]".format(locales))
if revision.countries.count():
countries = ", ".join(["'{}'".format(c.code) for c in revision.countries.all()])
parts.append("normandy.country in [{}]".format(countries))
if revision.channels.count():
channels = ", ".join(["'{}'".format(c.slug) for c in revision.channels.all()])
parts.append("normandy.channel in [{}]".format(channels))
if revision.extra_filter_expression:
parts.append(revision.extra_filter_expression)
expression = ") && (".join(parts)
return "({})".format(expression) if len(parts) > 1 else expression
def hash(revision):
data = "{}{}{}{}{}{}".format(
revision.recipe.id,
revision.created,
revision.name,
revision.action.id,
revision.arguments_json,
get_filter_expression(revision),
)
return hashlib.sha256(data.encode()).hexdigest()
def create_revision_from_tmp(apps, tmp, parent=None):
ApprovalRequest = apps.get_model("recipes", "ApprovalRequest")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
rev = RecipeRevision(
created=tmp.created,
updated=tmp.updated,
comment=tmp.comment,
name=tmp.name,
arguments_json=tmp.arguments_json,
extra_filter_expression=tmp.extra_filter_expression,
identicon_seed=tmp.identicon_seed,
action=tmp.action,
parent=parent,
recipe=tmp.recipe,
user=tmp.user,
)
initial_id = hash(tmp)
rev.id = initial_id
rev.save()
if tmp.approved_for_recipe.count():
rev.approved_for_recipe.add(tmp.approved_for_recipe.get())
if tmp.latest_for_recipe.count():
rev.latest_for_recipe.add(tmp.latest_for_recipe.get())
try:
approval_request = tmp.approval_request
approval_request.revision = rev
approval_request.save()
except ApprovalRequest.DoesNotExist:
pass
for channel in tmp.channels.all():
rev.channels.add(channel)
for country in tmp.countries.all():
rev.countries.add(country)
for locale in tmp.locales.all():
rev.locales.add(locale)
return rev
def copy_tmp_to_revisions(apps, schema_editor):
TmpRecipeRevision = apps.get_model("recipes", "TmpRecipeRevision")
for tmp in TmpRecipeRevision.objects.filter(parent=None):
current_tmp = tmp
parent_rev = create_revision_from_tmp(apps, current_tmp)
try:
while current_tmp.child:
parent_rev = create_revision_from_tmp(apps, current_tmp.child, parent=parent_rev)
current_tmp = current_tmp.child
except TmpRecipeRevision.DoesNotExist:
pass
class Migration(migrations.Migration):
dependencies = [("recipes", "0003_tmpreciperevision")]
operations = [migrations.RunPython(copy_revisions_to_tmp, copy_tmp_to_revisions)]
| mpl-2.0 |
mozilla/normandy | contract-tests/v3_api/test_approval_request_reject.py | 1 | 1850 | from support.assertions import assert_valid_schema
from support.helpers import new_recipe
from urllib.parse import urljoin
def test_approval_request_reject(conf, requests_session, headers):
# Get an action we can work with
action_response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/action/"), headers=headers
)
data = action_response.json()
action_id = data["results"][0]["id"]
# Create a recipe associated with that action
recipe_details = new_recipe(requests_session, action_id, conf.getoption("server"), headers)
# Create a approval request
response = requests_session.post(
urljoin(
conf.getoption("server"),
"/api/v3/recipe_revision/{}/request_approval/".format(
recipe_details["latest_revision_id"]
),
),
headers=headers,
)
data = response.json()
approval_id = data["id"]
assert response.status_code != 404
assert_valid_schema(response.json())
# Reject the approval
response = requests_session.post(
urljoin(
conf.getoption("server"), "/api/v3/approval_request/{}/reject/".format(approval_id)
),
data={"comment": "r-"},
headers=headers,
)
assert response.status_code == 200
assert_valid_schema(response.json())
# Look at the recipe and make sure it the approval status has been set to False and our comment shows up
response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/recipe/{}/".format(recipe_details["id"]))
)
assert response.status_code != 404
assert_valid_schema(response.json())
approval_request = response.json()["latest_revision"]["approval_request"]
assert approval_request["approved"] is False
assert approval_request["comment"] == "r-"
| mpl-2.0 |
mozilla/normandy | normandy/recipes/api/v3/serializers.py | 1 | 11345 | from rest_framework import serializers
from factory.fuzzy import FuzzyText
from normandy.base.api.v3.serializers import UserSerializer
from normandy.base.jexl import get_normandy_jexl
from normandy.recipes import filters
from normandy.recipes.api.fields import (
ActionImplementationHyperlinkField,
FilterObjectField,
)
from normandy.recipes.models import (
Action,
ApprovalRequest,
EnabledState,
Recipe,
RecipeRevision,
Signature,
)
from normandy.recipes.validators import JSONSchemaValidator
class CustomizableSerializerMixin:
"""Serializer Mixin that allows callers to exclude fields on instance of this serializer."""
def __init__(self, *args, **kwargs):
exclude_fields = kwargs.pop("exclude_fields", [])
super().__init__(*args, **kwargs)
if exclude_fields:
for field in exclude_fields:
self.fields.pop(field)
class ActionSerializer(serializers.ModelSerializer):
arguments_schema = serializers.JSONField()
implementation_url = ActionImplementationHyperlinkField()
class Meta:
model = Action
fields = ["arguments_schema", "name", "id", "implementation_url"]
class ApprovalRequestSerializer(serializers.ModelSerializer):
approver = UserSerializer()
created = serializers.DateTimeField(read_only=True)
creator = UserSerializer()
revision = serializers.SerializerMethodField(read_only=True)
class Meta:
model = ApprovalRequest
fields = ["approved", "approver", "comment", "created", "creator", "id", "revision"]
def get_revision(self, instance):
serializer = RecipeRevisionLinkSerializer(instance.revision)
return serializer.data
class EnabledStateSerializer(CustomizableSerializerMixin, serializers.ModelSerializer):
creator = UserSerializer()
class Meta:
model = EnabledState
fields = ["id", "revision_id", "created", "creator", "enabled", "carryover_from"]
class RecipeRevisionSerializer(serializers.ModelSerializer):
action = serializers.SerializerMethodField(read_only=True)
approval_request = ApprovalRequestSerializer(read_only=True)
capabilities = serializers.ListField(read_only=True)
comment = serializers.CharField(required=False)
creator = UserSerializer(source="user", read_only=True)
date_created = serializers.DateTimeField(source="created", read_only=True)
enabled_states = EnabledStateSerializer(many=True, exclude_fields=["revision_id"])
filter_object = serializers.ListField(child=FilterObjectField())
recipe = serializers.SerializerMethodField(read_only=True)
class Meta:
model = RecipeRevision
fields = [
"action",
"approval_request",
"arguments",
"experimenter_slug",
"capabilities",
"comment",
"creator",
"date_created",
"enabled_states",
"enabled",
"extra_capabilities",
"extra_filter_expression",
"filter_expression",
"filter_object",
"id",
"identicon_seed",
"metadata",
"name",
"recipe",
"updated",
]
def get_recipe(self, instance):
serializer = RecipeLinkSerializer(instance.recipe)
return serializer.data
def get_action(self, instance):
serializer = ActionSerializer(
instance.action, read_only=True, context={"request": self.context.get("request")}
)
return serializer.data
class SignatureSerializer(serializers.ModelSerializer):
timestamp = serializers.DateTimeField(read_only=True)
signature = serializers.ReadOnlyField()
x5u = serializers.ReadOnlyField()
public_key = serializers.ReadOnlyField()
class Meta:
model = Signature
fields = ["timestamp", "signature", "x5u", "public_key"]
class RecipeSerializer(CustomizableSerializerMixin, serializers.ModelSerializer):
# read-only fields
approved_revision = RecipeRevisionSerializer(read_only=True)
latest_revision = RecipeRevisionSerializer(read_only=True)
signature = SignatureSerializer(read_only=True)
uses_only_baseline_capabilities = serializers.BooleanField(
source="latest_revision.uses_only_baseline_capabilities", read_only=True
)
# write-only fields
action_id = serializers.PrimaryKeyRelatedField(
source="action", queryset=Action.objects.all(), write_only=True
)
arguments = serializers.JSONField(write_only=True)
extra_filter_expression = serializers.CharField(
required=False, allow_blank=True, write_only=True
)
filter_object = serializers.ListField(
child=FilterObjectField(), required=False, write_only=True
)
name = serializers.CharField(write_only=True)
identicon_seed = serializers.CharField(required=False, write_only=True)
comment = serializers.CharField(required=False, write_only=True)
experimenter_slug = serializers.CharField(
required=False, write_only=True, allow_null=True, allow_blank=True
)
extra_capabilities = serializers.ListField(required=False, write_only=True)
class Meta:
model = Recipe
fields = [
# read-only
"approved_revision",
"id",
"latest_revision",
"signature",
"uses_only_baseline_capabilities",
# write-only
"action_id",
"arguments",
"extra_filter_expression",
"filter_object",
"name",
"identicon_seed",
"comment",
"experimenter_slug",
"extra_capabilities",
]
def get_action(self, instance):
serializer = ActionSerializer(
instance.latest_revision.action,
read_only=True,
context={"request": self.context.get("request")},
)
return serializer.data
def update(self, instance, validated_data):
request = self.context.get("request")
if request and request.user:
validated_data["user"] = request.user
instance.revise(**validated_data)
return instance
def create(self, validated_data):
request = self.context.get("request")
if request and request.user:
validated_data["user"] = request.user
if "identicon_seed" not in validated_data:
validated_data["identicon_seed"] = f"v1:{FuzzyText().fuzz()}"
recipe = Recipe.objects.create()
return self.update(recipe, validated_data)
def validate_extra_filter_expression(self, value):
if value:
jexl = get_normandy_jexl()
errors = list(jexl.validate(value))
if errors:
raise serializers.ValidationError(errors)
return value
def validate(self, data):
data = super().validate(data)
action = data.get("action")
if action is None:
action = self.instance.latest_revision.action
arguments = data.get("arguments")
if arguments is not None:
# Ensure the value is a dict
if not isinstance(arguments, dict):
raise serializers.ValidationError({"arguments": "Must be an object."})
# Get the schema associated with the selected action
schema = action.arguments_schema
schemaValidator = JSONSchemaValidator(schema)
errorResponse = {}
errors = sorted(schemaValidator.iter_errors(arguments), key=lambda e: e.path)
# Loop through ValidationErrors returned by JSONSchema
# Each error contains a message and a path attribute
# message: string human-readable error explanation
# path: list containing path to offending element
for error in errors:
currentLevel = errorResponse
# Loop through the path of the current error
# e.g. ['surveys'][0]['weight']
for index, path in enumerate(error.path):
# If this key already exists in our error response, step into it
if path in currentLevel:
currentLevel = currentLevel[path]
continue
else:
# If we haven't reached the end of the path, add this path
# as a key in our error response object and step into it
if index < len(error.path) - 1:
currentLevel[path] = {}
currentLevel = currentLevel[path]
continue
# If we've reached the final path, set the error message
else:
currentLevel[path] = error.message
if errorResponse:
raise serializers.ValidationError({"arguments": errorResponse})
if self.instance is None:
if data.get("extra_filter_expression", "").strip() == "":
if not data.get("filter_object"):
raise serializers.ValidationError(
"one of extra_filter_expression or filter_object is required"
)
else:
if "extra_filter_expression" in data or "filter_object" in data:
# If either is attempted to be updated, at least one of them must be truthy.
if not data.get("extra_filter_expression", "").strip() and not data.get(
"filter_object"
):
raise serializers.ValidationError(
"if extra_filter_expression is blank, "
"at least one filter_object is required"
)
return data
def validate_filter_object(self, value):
if not isinstance(value, list):
raise serializers.ValidationError(
{"non field errors": ["filter_object must be a list."]}
)
errors = {}
for i, obj in enumerate(value):
if not isinstance(obj, dict):
errors[i] = {"non field errors": ["filter_object members must be objects."]}
continue
if "type" not in obj:
errors[i] = {"type": ["This field is required."]}
break
Filter = filters.by_type.get(obj["type"])
if Filter is not None:
filter = Filter(data=obj)
if not filter.is_valid():
errors[i] = filter.errors
else:
errors[i] = {"type": [f'Unknown filter object type "{obj["type"]}".']}
if errors:
raise serializers.ValidationError(errors)
return value
class RecipeLinkSerializer(RecipeSerializer):
class Meta(RecipeSerializer.Meta):
fields = ["approved_revision_id", "id", "latest_revision_id"]
class RecipeRevisionLinkSerializer(RecipeRevisionSerializer):
recipe_id = serializers.SerializerMethodField(read_only=True)
class Meta(RecipeSerializer.Meta):
fields = ["id", "recipe_id"]
def get_recipe_id(self, instance):
return instance.recipe.id
| mpl-2.0 |
mozilla/normandy | contract-tests/v3_api/test_group_delete.py | 1 | 1231 | import uuid
from support.assertions import assert_valid_schema
from urllib.parse import urljoin
def test_group_delete(conf, requests_session, headers):
# Create a new group
data = {"name": str(uuid.uuid4())}
response = requests_session.post(
urljoin(conf.getoption("server"), "/api/v3/group/"), headers=headers, data=data
)
assert response.status_code == 201
assert_valid_schema(response.json())
group_data = response.json()
group_id = group_data["id"]
# Verify group was stored and contains expected data
response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
group_data = response.json()
assert response.status_code == 200
assert_valid_schema(response.json())
# Delete the group
response = requests_session.delete(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
assert response.status_code == 204
# Verify that it no longer exists
response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
assert response.status_code == 404
| mpl-2.0 |
developmentseed/landsat-util | docs/conf.py | 9 | 9890 | # -*- coding: utf-8 -*-
#
# Landsat-util documentation build configuration file, created by
# sphinx-quickstart on Thu May 28 17:52:10 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numpy', 'rasterio', 'scipy', 'scikit-image', 'homura', 'boto',
'termcolor', 'requests', 'python-dateutil']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
print project_root
import landsat
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'landsat-util'
copyright = u'2015, Development Seed'
author = u'Development Seed'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = landsat.__version__
# The full version, including alpha/beta/rc tags.
release = landsat.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Landsat-utildoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Landsat-util.tex', u'Landsat-util Documentation',
u'Development Seed', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'landsat-util', u'Landsat-util Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Landsat-util', u'Landsat-util Documentation',
author, 'Landsat-util', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| cc0-1.0 |
rmmh/skybot | plugins/lastfm.py | 3 | 2391 | """
The Last.fm API key is retrieved from the bot config file.
"""
from util import hook, http
api_url = "http://ws.audioscrobbler.com/2.0/?format=json"
@hook.api_key("lastfm")
@hook.command(autohelp=False)
def lastfm(inp, chan="", nick="", reply=None, api_key=None, db=None):
".lastfm <username> [dontsave] | @<nick> -- gets current or last played " "track from lastfm"
db.execute(
"create table if not exists "
"lastfm(chan, nick, user, primary key(chan, nick))"
)
if inp[0:1] == "@":
nick = inp[1:].strip()
user = None
dontsave = True
else:
user = inp
dontsave = user.endswith(" dontsave")
if dontsave:
user = user[:-9].strip().lower()
if not user:
user = db.execute(
"select user from lastfm where chan=? and nick=lower(?)", (chan, nick)
).fetchone()
if not user:
return lastfm.__doc__
user = user[0]
response = http.get_json(
api_url, method="user.getrecenttracks", api_key=api_key, user=user, limit=1
)
if "error" in response:
return "error: %s" % response["message"]
if (
not "track" in response["recenttracks"]
or len(response["recenttracks"]["track"]) == 0
):
return "no recent tracks for user \x02%s\x0F found" % user
tracks = response["recenttracks"]["track"]
if type(tracks) == list:
# if the user is listening to something, the tracks entry is a list
# the first item is the current track
track = tracks[0]
status = "current track"
elif type(tracks) == dict:
# otherwise, they aren't listening to anything right now, and
# the tracks entry is a dict representing the most recent track
track = tracks
status = "last track"
else:
return "error parsing track listing"
title = track["name"]
album = track["album"]["#text"]
artist = track["artist"]["#text"]
ret = "\x02%s\x0F's %s - \x02%s\x0f" % (user, status, title)
if artist:
ret += " by \x02%s\x0f" % artist
if album:
ret += " on \x02%s\x0f" % album
reply(ret)
if inp and not dontsave:
db.execute(
"insert or replace into lastfm(chan, nick, user) " "values (?, ?, ?)",
(chan, nick.lower(), inp),
)
db.commit()
| unlicense |
rmmh/skybot | plugins/google.py | 2 | 1308 | from __future__ import unicode_literals
import random
from util import hook, http
def api_get(query, key, is_image=None, num=1):
url = (
"https://www.googleapis.com/customsearch/v1?cx=007629729846476161907:ud5nlxktgcw"
"&fields=items(title,link,snippet)&safe=off&nfpr=1"
+ ("&searchType=image" if is_image else "")
)
return http.get_json(url, key=key, q=query, num=num)
@hook.api_key("google")
@hook.command("can i get a picture of")
@hook.command("can you grab me a picture of")
@hook.command("give me a print out of")
@hook.command
def gis(inp, api_key=None):
""".gis <term> -- finds an image using google images (safesearch off)"""
parsed = api_get(inp, api_key, is_image=True, num=10)
if "items" not in parsed:
return "no images found"
return random.choice(parsed["items"])["link"]
@hook.api_key("google")
@hook.command("g")
@hook.command
def google(inp, api_key=None):
""".g/.google <query> -- returns first google search result"""
parsed = api_get(inp, api_key)
if "items" not in parsed:
return "no results found"
out = '{link} -- \x02{title}\x02: "{snippet}"'.format(**parsed["items"][0])
out = " ".join(out.split())
if len(out) > 300:
out = out[: out.rfind(" ")] + '..."'
return out
| unlicense |
rmmh/skybot | plugins/util/timesince.py | 3 | 4139 | # Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, ("year", "years")),
(60 * 60 * 24 * 30, ("month", "months")),
(60 * 60 * 24 * 7, ("week", "weeks")),
(60 * 60 * 24, ("day", "days")),
(60 * 60, ("hour", "hours")),
(60, ("minute", "minutes")),
)
# Convert int or float (unix epoch) to datetime.datetime for comparison
if isinstance(d, int) or isinstance(d, float):
d = datetime.datetime.fromtimestamp(d)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return "0 " + "minutes"
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
if count == 1:
s = "%(number)d %(type)s" % {"number": count, "type": name[0]}
else:
s = "%(number)d %(type)s" % {"number": count, "type": name[1]}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
if count2 == 1:
s += ", %d %s" % (count2, name2[0])
else:
s += ", %d %s" % (count2, name2[1])
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if not now:
now = datetime.datetime.now()
return timesince(now, d)
| unlicense |
rmmh/skybot | plugins/mtg.py | 3 | 2470 | from __future__ import print_function
from builtins import range
from util import hook, http
import random
def card_search(name):
matching_cards = http.get_json(
"https://api.magicthegathering.io/v1/cards", name=name
)
for card in matching_cards["cards"]:
if card["name"].lower() == name.lower():
return card
return random.choice(matching_cards["cards"])
@hook.command
def mtg(inp, say=None):
""".mtg <name> - Searches for Magic the Gathering card given <name>"""
try:
card = card_search(inp)
except IndexError:
return "Card not found."
symbols = {
"{0}": "0",
"{1}": "1",
"{2}": "2",
"{3}": "3",
"{4}": "4",
"{5}": "5",
"{6}": "6",
"{7}": "7",
"{8}": "8",
"{9}": "9",
"{10}": "10",
"{11}": "11",
"{12}": "12",
"{13}": "13",
"{14}": "14",
"{15}": "15",
"{16}": "16",
"{17}": "17",
"{18}": "18",
"{19}": "19",
"{20}": "20",
"{T}": "\u27F3",
"{S}": "\u2744",
"{Q}": "\u21BA",
"{C}": "\u27E1",
"{W}": "W",
"{U}": "U",
"{B}": "B",
"{R}": "R",
"{G}": "G",
"{W/P}": "\u03D5",
"{U/P}": "\u03D5",
"{B/P}": "\u03D5",
"{R/P}": "\u03D5",
"{G/P}": "\u03D5",
"{X}": "X",
"\n": " ",
}
results = {
"name": card["name"],
"type": card["type"],
"cost": card.get("manaCost", ""),
"text": card.get("text", ""),
"power": card.get("power"),
"toughness": card.get("toughness"),
"loyalty": card.get("loyalty"),
"multiverseid": card.get("multiverseid"),
}
for fragment, rep in symbols.items():
results["text"] = results["text"].replace(fragment, rep)
results["cost"] = results["cost"].replace(fragment, rep)
template = ["{name} -"]
template.append("{type}")
template.append("- {cost} |")
if results["loyalty"]:
template.append("{loyalty} Loyalty |")
if results["power"]:
template.append("{power}/{toughness} |")
template.append(
"{text} | http://gatherer.wizards.com/Pages/Card/Details.aspx?multiverseid={multiverseid}"
)
return " ".join(template).format(**results)
if __name__ == "__main__":
print(card_search("Black Lotus"))
print(mtg("Black Lotus"))
| unlicense |
pytube/pytube | tests/test_captions.py | 1 | 5759 | import os
import pytest
from unittest import mock
from unittest.mock import MagicMock, mock_open, patch
from pytube import Caption, CaptionQuery, captions
def test_float_to_srt_time_format():
caption1 = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
assert caption1.float_to_srt_time_format(3.89) == "00:00:03,890"
def test_caption_query_sequence():
caption1 = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
caption2 = Caption(
{"url": "url2", "name": {"simpleText": "name2"}, "languageCode": "fr", "vssId": ".fr"}
)
caption_query = CaptionQuery(captions=[caption1, caption2])
assert len(caption_query) == 2
assert caption_query["en"] == caption1
assert caption_query["fr"] == caption2
with pytest.raises(KeyError):
assert caption_query["nada"] is not None
def test_caption_query_get_by_language_code_when_exists():
caption1 = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
caption2 = Caption(
{"url": "url2", "name": {"simpleText": "name2"}, "languageCode": "fr", "vssId": ".fr"}
)
caption_query = CaptionQuery(captions=[caption1, caption2])
assert caption_query["en"] == caption1
def test_caption_query_get_by_language_code_when_not_exists():
caption1 = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
caption2 = Caption(
{"url": "url2", "name": {"simpleText": "name2"}, "languageCode": "fr", "vssId": ".fr"}
)
caption_query = CaptionQuery(captions=[caption1, caption2])
with pytest.raises(KeyError):
assert caption_query["hello"] is not None
# assert not_found is not None # should never reach here
@mock.patch("pytube.captions.Caption.generate_srt_captions")
def test_download(srt):
open_mock = mock_open()
with patch("builtins.open", open_mock):
srt.return_value = ""
caption = Caption(
{
"url": "url1",
"name": {"simpleText": "name1"},
"languageCode": "en",
"vssId": ".en"
}
)
caption.download("title")
assert (
open_mock.call_args_list[0][0][0].split(os.path.sep)[-1] == "title (en).srt"
)
@mock.patch("pytube.captions.Caption.generate_srt_captions")
def test_download_with_prefix(srt):
open_mock = mock_open()
with patch("builtins.open", open_mock):
srt.return_value = ""
caption = Caption(
{
"url": "url1",
"name": {"simpleText": "name1"},
"languageCode": "en",
"vssId": ".en"
}
)
caption.download("title", filename_prefix="1 ")
assert (
open_mock.call_args_list[0][0][0].split(os.path.sep)[-1]
== "1 title (en).srt"
)
@mock.patch("pytube.captions.Caption.generate_srt_captions")
def test_download_with_output_path(srt):
open_mock = mock_open()
captions.target_directory = MagicMock(return_value="/target")
with patch("builtins.open", open_mock):
srt.return_value = ""
caption = Caption(
{
"url": "url1",
"name": {"simpleText": "name1"},
"languageCode": "en",
"vssId": ".en"
}
)
file_path = caption.download("title", output_path="blah")
assert file_path == os.path.join("/target","title (en).srt")
captions.target_directory.assert_called_with("blah")
@mock.patch("pytube.captions.Caption.xml_captions")
def test_download_xml_and_trim_extension(xml):
open_mock = mock_open()
with patch("builtins.open", open_mock):
xml.return_value = ""
caption = Caption(
{
"url": "url1",
"name": {"simpleText": "name1"},
"languageCode": "en",
"vssId": ".en"
}
)
caption.download("title.xml", srt=False)
assert (
open_mock.call_args_list[0][0][0].split(os.path.sep)[-1] == "title (en).xml"
)
def test_repr():
caption = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
assert str(caption) == '<Caption lang="name1" code="en">'
caption_query = CaptionQuery(captions=[caption])
assert repr(caption_query) == '{\'en\': <Caption lang="name1" code="en">}'
@mock.patch("pytube.request.get")
def test_xml_captions(request_get):
request_get.return_value = "test"
caption = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
assert caption.xml_captions == "test"
@mock.patch("pytube.captions.request")
def test_generate_srt_captions(request):
request.get.return_value = (
'<?xml version="1.0" encoding="utf-8" ?><transcript><text start="6.5" dur="1.7">['
'Herb, Software Engineer]\nๆฌๅฝฑ็ๅ
ๅซ้ฑ่ๅผๅญๅนใ</text><text start="8.3" dur="2.7">'
"ๅฆ่ฆๅๅๅญๅน๏ผ่ซๆไธไธ้่ฃก็ๅ็คบใ</text></transcript>"
)
caption = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
assert caption.generate_srt_captions() == (
"1\n"
"00:00:06,500 --> 00:00:08,200\n"
"[Herb, Software Engineer] ๆฌๅฝฑ็ๅ
ๅซ้ฑ่ๅผๅญๅนใ\n"
"\n"
"2\n"
"00:00:08,300 --> 00:00:11,000\n"
"ๅฆ่ฆๅๅๅญๅน๏ผ่ซๆไธไธ้่ฃก็ๅ็คบใ"
)
| unlicense |
pytube/pytube | pytube/cipher.py | 1 | 22529 | """
This module contains all logic necessary to decipher the signature.
YouTube's strategy to restrict downloading videos is to send a ciphered version
of the signature to the client, along with the decryption algorithm obfuscated
in JavaScript. For the clients to play the videos, JavaScript must take the
ciphered version, cycle it through a series of "transform functions," and then
signs the media URL with the output.
This module is responsible for (1) finding and extracting those "transform
functions" (2) maps them to Python equivalents and (3) taking the ciphered
signature and decoding it.
"""
import logging
import re
from itertools import chain
from typing import Any, Callable, Dict, List, Optional, Tuple
from pytube.exceptions import ExtractError, RegexMatchError
from pytube.helpers import cache, regex_search
from pytube.parser import find_object_from_startpoint, throttling_array_split
logger = logging.getLogger(__name__)
class Cipher:
def __init__(self, js: str):
self.transform_plan: List[str] = get_transform_plan(js)
var_regex = re.compile(r"^\w+\W")
var_match = var_regex.search(self.transform_plan[0])
if not var_match:
raise RegexMatchError(
caller="__init__", pattern=var_regex.pattern
)
var = var_match.group(0)[:-1]
self.transform_map = get_transform_map(js, var)
self.js_func_patterns = [
r"\w+\.(\w+)\(\w,(\d+)\)",
r"\w+\[(\"\w+\")\]\(\w,(\d+)\)"
]
self.throttling_plan = get_throttling_plan(js)
self.throttling_array = get_throttling_function_array(js)
self.calculated_n = None
def calculate_n(self, initial_n: list):
"""Converts n to the correct value to prevent throttling."""
if self.calculated_n:
return self.calculated_n
# First, update all instances of 'b' with the list(initial_n)
for i in range(len(self.throttling_array)):
if self.throttling_array[i] == 'b':
self.throttling_array[i] = initial_n
for step in self.throttling_plan:
curr_func = self.throttling_array[int(step[0])]
if not callable(curr_func):
logger.debug(f'{curr_func} is not callable.')
logger.debug(f'Throttling array:\n{self.throttling_array}\n')
raise ExtractError(f'{curr_func} is not callable.')
first_arg = self.throttling_array[int(step[1])]
if len(step) == 2:
curr_func(first_arg)
elif len(step) == 3:
second_arg = self.throttling_array[int(step[2])]
curr_func(first_arg, second_arg)
self.calculated_n = ''.join(initial_n)
return self.calculated_n
def get_signature(self, ciphered_signature: str) -> str:
"""Decipher the signature.
Taking the ciphered signature, applies the transform functions.
:param str ciphered_signature:
The ciphered signature sent in the ``player_config``.
:rtype: str
:returns:
Decrypted signature required to download the media content.
"""
signature = list(ciphered_signature)
for js_func in self.transform_plan:
name, argument = self.parse_function(js_func) # type: ignore
signature = self.transform_map[name](signature, argument)
logger.debug(
"applied transform function\n"
"output: %s\n"
"js_function: %s\n"
"argument: %d\n"
"function: %s",
"".join(signature),
name,
argument,
self.transform_map[name],
)
return "".join(signature)
@cache
def parse_function(self, js_func: str) -> Tuple[str, int]:
"""Parse the Javascript transform function.
Break a JavaScript transform function down into a two element ``tuple``
containing the function name and some integer-based argument.
:param str js_func:
The JavaScript version of the transform function.
:rtype: tuple
:returns:
two element tuple containing the function name and an argument.
**Example**:
parse_function('DE.AJ(a,15)')
('AJ', 15)
"""
logger.debug("parsing transform function")
for pattern in self.js_func_patterns:
regex = re.compile(pattern)
parse_match = regex.search(js_func)
if parse_match:
fn_name, fn_arg = parse_match.groups()
return fn_name, int(fn_arg)
raise RegexMatchError(
caller="parse_function", pattern="js_func_patterns"
)
def get_initial_function_name(js: str) -> str:
"""Extract the name of the function responsible for computing the signature.
:param str js:
The contents of the base.js asset file.
:rtype: str
:returns:
Function name from regex match
"""
function_patterns = [
r"\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', # noqa: E501
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', # noqa: E501
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r"\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(",
r"yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
]
logger.debug("finding initial function name")
for pattern in function_patterns:
regex = re.compile(pattern)
function_match = regex.search(js)
if function_match:
logger.debug("finished regex search, matched: %s", pattern)
return function_match.group(1)
raise RegexMatchError(
caller="get_initial_function_name", pattern="multiple"
)
def get_transform_plan(js: str) -> List[str]:
"""Extract the "transform plan".
The "transform plan" is the functions that the ciphered signature is
cycled through to obtain the actual signature.
:param str js:
The contents of the base.js asset file.
**Example**:
['DE.AJ(a,15)',
'DE.VR(a,3)',
'DE.AJ(a,51)',
'DE.VR(a,3)',
'DE.kT(a,51)',
'DE.kT(a,8)',
'DE.VR(a,3)',
'DE.kT(a,21)']
"""
name = re.escape(get_initial_function_name(js))
pattern = r"%s=function\(\w\){[a-z=\.\(\"\)]*;(.*);(?:.+)}" % name
logger.debug("getting transform plan")
return regex_search(pattern, js, group=1).split(";")
def get_transform_object(js: str, var: str) -> List[str]:
"""Extract the "transform object".
The "transform object" contains the function definitions referenced in the
"transform plan". The ``var`` argument is the obfuscated variable name
which contains these functions, for example, given the function call
``DE.AJ(a,15)`` returned by the transform plan, "DE" would be the var.
:param str js:
The contents of the base.js asset file.
:param str var:
The obfuscated variable name that stores an object with all functions
that descrambles the signature.
**Example**:
>>> get_transform_object(js, 'DE')
['AJ:function(a){a.reverse()}',
'VR:function(a,b){a.splice(0,b)}',
'kT:function(a,b){var c=a[0];a[0]=a[b%a.length];a[b]=c}']
"""
pattern = r"var %s={(.*?)};" % re.escape(var)
logger.debug("getting transform object")
regex = re.compile(pattern, flags=re.DOTALL)
transform_match = regex.search(js)
if not transform_match:
raise RegexMatchError(caller="get_transform_object", pattern=pattern)
return transform_match.group(1).replace("\n", " ").split(", ")
def get_transform_map(js: str, var: str) -> Dict:
"""Build a transform function lookup.
Build a lookup table of obfuscated JavaScript function names to the
Python equivalents.
:param str js:
The contents of the base.js asset file.
:param str var:
The obfuscated variable name that stores an object with all functions
that descrambles the signature.
"""
transform_object = get_transform_object(js, var)
mapper = {}
for obj in transform_object:
# AJ:function(a){a.reverse()} => AJ, function(a){a.reverse()}
name, function = obj.split(":", 1)
fn = map_functions(function)
mapper[name] = fn
return mapper
def get_throttling_function_name(js: str) -> str:
"""Extract the name of the function that computes the throttling parameter.
:param str js:
The contents of the base.js asset file.
:rtype: str
:returns:
The name of the function used to compute the throttling parameter.
"""
function_patterns = [
# https://github.com/ytdl-org/youtube-dl/issues/29326#issuecomment-865985377
# https://github.com/yt-dlp/yt-dlp/commit/48416bc4a8f1d5ff07d5977659cb8ece7640dcd8
# var Bpa = [iha];
# ...
# a.C && (b = a.get("n")) && (b = Bpa[0](b), a.set("n", b),
# Bpa.length || iha("")) }};
# In the above case, `iha` is the relevant function name
r'a\.[a-zA-Z]\s*&&\s*\([a-z]\s*=\s*a\.get\("n"\)\)\s*&&\s*'
r'\([a-z]\s*=\s*([a-zA-Z0-9$]+)(\[\d+\])?\([a-z]\)',
]
logger.debug('Finding throttling function name')
for pattern in function_patterns:
regex = re.compile(pattern)
function_match = regex.search(js)
if function_match:
logger.debug("finished regex search, matched: %s", pattern)
if len(function_match.groups()) == 1:
return function_match.group(1)
idx = function_match.group(2)
if idx:
idx = idx.strip("[]")
array = re.search(
r'var {nfunc}\s*=\s*(\[.+?\]);'.format(
nfunc=re.escape(function_match.group(1))),
js
)
if array:
array = array.group(1).strip("[]").split(",")
array = [x.strip() for x in array]
return array[int(idx)]
raise RegexMatchError(
caller="get_throttling_function_name", pattern="multiple"
)
def get_throttling_function_code(js: str) -> str:
"""Extract the raw code for the throttling function.
:param str js:
The contents of the base.js asset file.
:rtype: str
:returns:
The name of the function used to compute the throttling parameter.
"""
# Begin by extracting the correct function name
name = re.escape(get_throttling_function_name(js))
# Identify where the function is defined
pattern_start = r"%s=function\(\w\)" % name
regex = re.compile(pattern_start)
match = regex.search(js)
# Extract the code within curly braces for the function itself, and merge any split lines
code_lines_list = find_object_from_startpoint(js, match.span()[1]).split('\n')
joined_lines = "".join(code_lines_list)
# Prepend function definition (e.g. `Dea=function(a)`)
return match.group(0) + joined_lines
def get_throttling_function_array(js: str) -> List[Any]:
"""Extract the "c" array.
:param str js:
The contents of the base.js asset file.
:returns:
The array of various integers, arrays, and functions.
"""
raw_code = get_throttling_function_code(js)
array_start = r",c=\["
array_regex = re.compile(array_start)
match = array_regex.search(raw_code)
array_raw = find_object_from_startpoint(raw_code, match.span()[1] - 1)
str_array = throttling_array_split(array_raw)
converted_array = []
for el in str_array:
try:
converted_array.append(int(el))
continue
except ValueError:
# Not an integer value.
pass
if el == 'null':
converted_array.append(None)
continue
if el.startswith('"') and el.endswith('"'):
# Convert e.g. '"abcdef"' to string without quotation marks, 'abcdef'
converted_array.append(el[1:-1])
continue
if el.startswith('function'):
mapper = (
(r"{for\(\w=\(\w%\w\.length\+\w\.length\)%\w\.length;\w--;\)\w\.unshift\(\w.pop\(\)\)}", throttling_unshift), # noqa:E501
(r"{\w\.reverse\(\)}", throttling_reverse),
(r"{\w\.push\(\w\)}", throttling_push),
(r";var\s\w=\w\[0\];\w\[0\]=\w\[\w\];\w\[\w\]=\w}", throttling_swap),
(r"case\s\d+", throttling_cipher_function),
(r"\w\.splice\(0,1,\w\.splice\(\w,1,\w\[0\]\)\[0\]\)", throttling_nested_splice), # noqa:E501
(r";\w\.splice\(\w,1\)}", js_splice),
(r"\w\.splice\(-\w\)\.reverse\(\)\.forEach\(function\(\w\){\w\.unshift\(\w\)}\)", throttling_prepend), # noqa:E501
(r"for\(var \w=\w\.length;\w;\)\w\.push\(\w\.splice\(--\w,1\)\[0\]\)}", throttling_reverse), # noqa:E501
)
found = False
for pattern, fn in mapper:
if re.search(pattern, el):
converted_array.append(fn)
found = True
if found:
continue
converted_array.append(el)
# Replace null elements with array itself
for i in range(len(converted_array)):
if converted_array[i] is None:
converted_array[i] = converted_array
return converted_array
def get_throttling_plan(js: str):
"""Extract the "throttling plan".
The "throttling plan" is a list of tuples used for calling functions
in the c array. The first element of the tuple is the index of the
function to call, and any remaining elements of the tuple are arguments
to pass to that function.
:param str js:
The contents of the base.js asset file.
:returns:
The full function code for computing the throttlign parameter.
"""
raw_code = get_throttling_function_code(js)
transform_start = r"try{"
plan_regex = re.compile(transform_start)
match = plan_regex.search(raw_code)
transform_plan_raw = find_object_from_startpoint(raw_code, match.span()[1] - 1)
# Steps are either c[x](c[y]) or c[x](c[y],c[z])
step_start = r"c\[(\d+)\]\(c\[(\d+)\](,c(\[(\d+)\]))?\)"
step_regex = re.compile(step_start)
matches = step_regex.findall(transform_plan_raw)
transform_steps = []
for match in matches:
if match[4] != '':
transform_steps.append((match[0],match[1],match[4]))
else:
transform_steps.append((match[0],match[1]))
return transform_steps
def reverse(arr: List, _: Optional[Any]):
"""Reverse elements in a list.
This function is equivalent to:
.. code-block:: javascript
function(a, b) { a.reverse() }
This method takes an unused ``b`` variable as their transform functions
universally sent two arguments.
**Example**:
>>> reverse([1, 2, 3, 4])
[4, 3, 2, 1]
"""
return arr[::-1]
def splice(arr: List, b: int):
"""Add/remove items to/from a list.
This function is equivalent to:
.. code-block:: javascript
function(a, b) { a.splice(0, b) }
**Example**:
>>> splice([1, 2, 3, 4], 2)
[1, 2]
"""
return arr[b:]
def swap(arr: List, b: int):
"""Swap positions at b modulus the list length.
This function is equivalent to:
.. code-block:: javascript
function(a, b) { var c=a[0];a[0]=a[b%a.length];a[b]=c }
**Example**:
>>> swap([1, 2, 3, 4], 2)
[3, 2, 1, 4]
"""
r = b % len(arr)
return list(chain([arr[r]], arr[1:r], [arr[0]], arr[r + 1 :]))
def throttling_reverse(arr: list):
"""Reverses the input list.
Needs to do an in-place reversal so that the passed list gets changed.
To accomplish this, we create a reversed copy, and then change each
indvidual element.
"""
reverse_copy = arr.copy()[::-1]
for i in range(len(reverse_copy)):
arr[i] = reverse_copy[i]
def throttling_push(d: list, e: Any):
"""Pushes an element onto a list."""
d.append(e)
def throttling_mod_func(d: list, e: int):
"""Perform the modular function from the throttling array functions.
In the javascript, the modular operation is as follows:
e = (e % d.length + d.length) % d.length
We simply translate this to python here.
"""
return (e % len(d) + len(d)) % len(d)
def throttling_unshift(d: list, e: int):
"""Rotates the elements of the list to the right.
In the javascript, the operation is as follows:
for(e=(e%d.length+d.length)%d.length;e--;)d.unshift(d.pop())
"""
e = throttling_mod_func(d, e)
new_arr = d[-e:] + d[:-e]
d.clear()
for el in new_arr:
d.append(el)
def throttling_cipher_function(d: list, e: str):
"""This ciphers d with e to generate a new list.
In the javascript, the operation is as follows:
var h = [A-Za-z0-9-_], f = 96; // simplified from switch-case loop
d.forEach(
function(l,m,n){
this.push(
n[m]=h[
(h.indexOf(l)-h.indexOf(this[m])+m-32+f--)%h.length
]
)
},
e.split("")
)
"""
h = list('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_')
f = 96
# by naming it "this" we can more closely reflect the js
this = list(e)
# This is so we don't run into weirdness with enumerate while
# we change the input list
copied_list = d.copy()
for m, l in enumerate(copied_list):
bracket_val = (h.index(l) - h.index(this[m]) + m - 32 + f) % len(h)
this.append(
h[bracket_val]
)
d[m] = h[bracket_val]
f -= 1
def throttling_nested_splice(d: list, e: int):
"""Nested splice function in throttling js.
In the javascript, the operation is as follows:
function(d,e){
e=(e%d.length+d.length)%d.length;
d.splice(
0,
1,
d.splice(
e,
1,
d[0]
)[0]
)
}
While testing, all this seemed to do is swap element 0 and e,
but the actual process is preserved in case there was an edge
case that was not considered.
"""
e = throttling_mod_func(d, e)
inner_splice = js_splice(
d,
e,
1,
d[0]
)
js_splice(
d,
0,
1,
inner_splice[0]
)
def throttling_prepend(d: list, e: int):
"""
In the javascript, the operation is as follows:
function(d,e){
e=(e%d.length+d.length)%d.length;
d.splice(-e).reverse().forEach(
function(f){
d.unshift(f)
}
)
}
Effectively, this moves the last e elements of d to the beginning.
"""
start_len = len(d)
# First, calculate e
e = throttling_mod_func(d, e)
# Then do the prepending
new_arr = d[-e:] + d[:-e]
# And update the input list
d.clear()
for el in new_arr:
d.append(el)
end_len = len(d)
assert start_len == end_len
def throttling_swap(d: list, e: int):
"""Swap positions of the 0'th and e'th elements in-place."""
e = throttling_mod_func(d, e)
f = d[0]
d[0] = d[e]
d[e] = f
def js_splice(arr: list, start: int, delete_count=None, *items):
"""Implementation of javascript's splice function.
:param list arr:
Array to splice
:param int start:
Index at which to start changing the array
:param int delete_count:
Number of elements to delete from the array
:param *items:
Items to add to the array
Reference: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/splice # noqa:E501
"""
# Special conditions for start value
try:
if start > len(arr):
start = len(arr)
# If start is negative, count backwards from end
if start < 0:
start = len(arr) - start
except TypeError:
# Non-integer start values are treated as 0 in js
start = 0
# Special condition when delete_count is greater than remaining elements
if not delete_count or delete_count >= len(arr) - start:
delete_count = len(arr) - start # noqa: N806
deleted_elements = arr[start:start + delete_count]
# Splice appropriately.
new_arr = arr[:start] + list(items) + arr[start + delete_count:]
# Replace contents of input array
arr.clear()
for el in new_arr:
arr.append(el)
return deleted_elements
def map_functions(js_func: str) -> Callable:
"""For a given JavaScript transform function, return the Python equivalent.
:param str js_func:
The JavaScript version of the transform function.
"""
mapper = (
# function(a){a.reverse()}
(r"{\w\.reverse\(\)}", reverse),
# function(a,b){a.splice(0,b)}
(r"{\w\.splice\(0,\w\)}", splice),
# function(a,b){var c=a[0];a[0]=a[b%a.length];a[b]=c}
(r"{var\s\w=\w\[0\];\w\[0\]=\w\[\w\%\w.length\];\w\[\w\]=\w}", swap),
# function(a,b){var c=a[0];a[0]=a[b%a.length];a[b%a.length]=c}
(
r"{var\s\w=\w\[0\];\w\[0\]=\w\[\w\%\w.length\];\w\[\w\%\w.length\]=\w}",
swap,
),
)
for pattern, fn in mapper:
if re.search(pattern, js_func):
return fn
raise RegexMatchError(caller="map_functions", pattern="multiple")
| unlicense |
pytube/pytube | tests/contrib/test_channel.py | 1 | 3122 | from unittest import mock
from pytube import Channel
@mock.patch('pytube.request.get')
def test_init_with_url(request_get, channel_videos_html):
request_get.return_value = channel_videos_html
c = Channel('https://www.youtube.com/c/ProgrammingKnowledge/videos')
assert c.channel_url == 'https://www.youtube.com/c/ProgrammingKnowledge'
assert c.videos_url == f'{c.channel_url}/videos'
assert c.playlists_url == f'{c.channel_url}/playlists'
assert c.community_url == f'{c.channel_url}/community'
assert c.featured_channels_url == f'{c.channel_url}/channels'
assert c.about_url == f'{c.channel_url}/about'
@mock.patch('pytube.request.get')
def test_channel_uri(request_get, channel_videos_html):
request_get.return_value = channel_videos_html
c = Channel('https://www.youtube.com/c/ProgrammingKnowledge/videos')
assert c.channel_uri == '/c/ProgrammingKnowledge'
c = Channel('https://www.youtube.com/channel/UCs6nmQViDpUw0nuIx9c_WvA/videos')
assert c.channel_uri == '/channel/UCs6nmQViDpUw0nuIx9c_WvA'
@mock.patch('pytube.request.get')
def test_channel_name(request_get, channel_videos_html):
request_get.return_value = channel_videos_html
c = Channel('https://www.youtube.com/c/ProgrammingKnowledge/videos')
assert c.channel_name == 'ProgrammingKnowledge'
@mock.patch('pytube.request.get')
def test_channel_id(request_get, channel_videos_html):
request_get.return_value = channel_videos_html
c = Channel('https://www.youtube.com/c/ProgrammingKnowledge/videos')
assert c.channel_id == 'UCs6nmQViDpUw0nuIx9c_WvA'
@mock.patch('pytube.request.get')
def test_channel_vanity_url(request_get, channel_videos_html):
request_get.return_value = channel_videos_html
c = Channel('https://www.youtube.com/c/ProgrammingKnowledge/videos')
assert c.vanity_url == 'http://www.youtube.com/c/ProgrammingKnowledge'
@mock.patch('pytube.request.get')
def test_channel_video_list(request_get, channel_videos_html):
request_get.return_value = channel_videos_html
c = Channel('https://www.youtube.com/c/ProgrammingKnowledge/videos')
first_ten = [
'https://www.youtube.com/watch?v=t_xLpJo_35k',
'https://www.youtube.com/watch?v=ccbh5YhxouQ',
'https://www.youtube.com/watch?v=wDnFjDjxW_0',
'https://www.youtube.com/watch?v=F3W_p_4XftA',
'https://www.youtube.com/watch?v=_fxm0xGGEi4',
'https://www.youtube.com/watch?v=cRbKZzcuIsg',
'https://www.youtube.com/watch?v=sdDu3dfIuow',
'https://www.youtube.com/watch?v=10KIbp-gJCE',
'https://www.youtube.com/watch?v=wZIT-cRtd6s',
'https://www.youtube.com/watch?v=KucCvEbTj0w',
]
assert c.video_urls[:10] == first_ten
@mock.patch('pytube.request.get')
def test_videos_html(request_get, channel_videos_html):
request_get.return_value = channel_videos_html
c = Channel('https://www.youtube.com/c/ProgrammingKnowledge')
assert c.html == channel_videos_html
# Because the Channel object subclasses the Playlist object, most of the tests
# are already taken care of by the Playlist test suite.
| unlicense |
pytube/pytube | pytube/request.py | 1 | 8512 | """Implements a simple wrapper around urlopen."""
import http.client
import json
import logging
import re
import socket
from functools import lru_cache
from urllib import parse
from urllib.error import URLError
from urllib.request import Request, urlopen
from pytube.exceptions import RegexMatchError, MaxRetriesExceeded
from pytube.helpers import regex_search
logger = logging.getLogger(__name__)
default_range_size = 9437184 # 9MB
def _execute_request(
url,
method=None,
headers=None,
data=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT
):
base_headers = {"User-Agent": "Mozilla/5.0", "accept-language": "en-US,en"}
if headers:
base_headers.update(headers)
if data:
# encode data for request
if not isinstance(data, bytes):
data = bytes(json.dumps(data), encoding="utf-8")
if url.lower().startswith("http"):
request = Request(url, headers=base_headers, method=method, data=data)
else:
raise ValueError("Invalid URL")
return urlopen(request, timeout=timeout) # nosec
def get(url, extra_headers=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Send an http GET request.
:param str url:
The URL to perform the GET request for.
:param dict extra_headers:
Extra headers to add to the request
:rtype: str
:returns:
UTF-8 encoded string of response
"""
if extra_headers is None:
extra_headers = {}
response = _execute_request(url, headers=extra_headers, timeout=timeout)
return response.read().decode("utf-8")
def post(url, extra_headers=None, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Send an http POST request.
:param str url:
The URL to perform the POST request for.
:param dict extra_headers:
Extra headers to add to the request
:param dict data:
The data to send on the POST request
:rtype: str
:returns:
UTF-8 encoded string of response
"""
# could technically be implemented in get,
# but to avoid confusion implemented like this
if extra_headers is None:
extra_headers = {}
if data is None:
data = {}
# required because the youtube servers are strict on content type
# raises HTTPError [400]: Bad Request otherwise
extra_headers.update({"Content-Type": "application/json"})
response = _execute_request(
url,
headers=extra_headers,
data=data,
timeout=timeout
)
return response.read().decode("utf-8")
def seq_stream(
url,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
max_retries=0
):
"""Read the response in sequence.
:param str url: The URL to perform the GET request for.
:rtype: Iterable[bytes]
"""
# YouTube expects a request sequence number as part of the parameters.
split_url = parse.urlsplit(url)
base_url = '%s://%s/%s?' % (split_url.scheme, split_url.netloc, split_url.path)
querys = dict(parse.parse_qsl(split_url.query))
# The 0th sequential request provides the file headers, which tell us
# information about how the file is segmented.
querys['sq'] = 0
url = base_url + parse.urlencode(querys)
segment_data = b''
for chunk in stream(url, timeout=timeout, max_retries=max_retries):
yield chunk
segment_data += chunk
# We can then parse the header to find the number of segments
stream_info = segment_data.split(b'\r\n')
segment_count_pattern = re.compile(b'Segment-Count: (\\d+)')
for line in stream_info:
match = segment_count_pattern.search(line)
if match:
segment_count = int(match.group(1).decode('utf-8'))
# We request these segments sequentially to build the file.
seq_num = 1
while seq_num <= segment_count:
# Create sequential request URL
querys['sq'] = seq_num
url = base_url + parse.urlencode(querys)
yield from stream(url, timeout=timeout, max_retries=max_retries)
seq_num += 1
return # pylint: disable=R1711
def stream(
url,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
max_retries=0
):
"""Read the response in chunks.
:param str url: The URL to perform the GET request for.
:rtype: Iterable[bytes]
"""
file_size: int = default_range_size # fake filesize to start
downloaded = 0
while downloaded < file_size:
stop_pos = min(downloaded + default_range_size, file_size) - 1
range_header = f"bytes={downloaded}-{stop_pos}"
tries = 0
# Attempt to make the request multiple times as necessary.
while True:
# If the max retries is exceeded, raise an exception
if tries >= 1 + max_retries:
raise MaxRetriesExceeded()
# Try to execute the request, ignoring socket timeouts
try:
response = _execute_request(
url,
method="GET",
headers={"Range": range_header},
timeout=timeout
)
except URLError as e:
# We only want to skip over timeout errors, and
# raise any other URLError exceptions
if isinstance(e.reason, socket.timeout):
pass
else:
raise
except http.client.IncompleteRead:
# Allow retries on IncompleteRead errors for unreliable connections
pass
else:
# On a successful request, break from loop
break
tries += 1
if file_size == default_range_size:
try:
content_range = response.info()["Content-Range"]
file_size = int(content_range.split("/")[1])
except (KeyError, IndexError, ValueError) as e:
logger.error(e)
while True:
chunk = response.read()
if not chunk:
break
downloaded += len(chunk)
yield chunk
return # pylint: disable=R1711
@lru_cache()
def filesize(url):
"""Fetch size in bytes of file at given URL
:param str url: The URL to get the size of
:returns: int: size in bytes of remote file
"""
return int(head(url)["content-length"])
@lru_cache()
def seq_filesize(url):
"""Fetch size in bytes of file at given URL from sequential requests
:param str url: The URL to get the size of
:returns: int: size in bytes of remote file
"""
total_filesize = 0
# YouTube expects a request sequence number as part of the parameters.
split_url = parse.urlsplit(url)
base_url = '%s://%s/%s?' % (split_url.scheme, split_url.netloc, split_url.path)
querys = dict(parse.parse_qsl(split_url.query))
# The 0th sequential request provides the file headers, which tell us
# information about how the file is segmented.
querys['sq'] = 0
url = base_url + parse.urlencode(querys)
response = _execute_request(
url, method="GET"
)
response_value = response.read()
# The file header must be added to the total filesize
total_filesize += len(response_value)
# We can then parse the header to find the number of segments
segment_count = 0
stream_info = response_value.split(b'\r\n')
segment_regex = b'Segment-Count: (\\d+)'
for line in stream_info:
# One of the lines should contain the segment count, but we don't know
# which, so we need to iterate through the lines to find it
try:
segment_count = int(regex_search(segment_regex, line, 1))
except RegexMatchError:
pass
if segment_count == 0:
raise RegexMatchError('seq_filesize', segment_regex)
# We make HEAD requests to the segments sequentially to find the total filesize.
seq_num = 1
while seq_num <= segment_count:
# Create sequential request URL
querys['sq'] = seq_num
url = base_url + parse.urlencode(querys)
total_filesize += int(head(url)['content-length'])
seq_num += 1
return total_filesize
def head(url):
"""Fetch headers returned http GET request.
:param str url:
The URL to perform the GET request for.
:rtype: dict
:returns:
dictionary of lowercase headers
"""
response_headers = _execute_request(url, method="HEAD").info()
return {k.lower(): v for k, v in response_headers.items()}
| unlicense |
mozilla-iam/cis | python-modules/cis_identity_vault/cis_identity_vault/vault.py | 1 | 15533 | """Create, destroy, and configure the appropriate vault for the environment."""
import boto3
import time
from botocore.exceptions import ClientError
from botocore.stub import Stubber
from cis_identity_vault import autoscale
from cis_identity_vault.common import get_config
from cis_identity_vault.models import rds
from logging import getLogger
from sqlalchemy import create_engine
from sqlalchemy.exc import OperationalError
from sqlalchemy_utils import create_database
logger = getLogger(__name__)
class IdentityVault(object):
def __init__(self):
self.boto_session = None
self.dynamodb_client = None
self.config = get_config()
def connect(self):
self._session()
if self.dynamodb_client is None:
if self._get_cis_environment() == "local":
dynalite_port = self.config("dynalite_port", namespace="cis", default="4567")
dynalite_host = self.config("dynalite_host", namespace="cis", default="localhost")
self.dynamodb_client = self.boto_session.client(
"dynamodb", endpoint_url="http://{}:{}".format(dynalite_host, dynalite_port)
)
else:
self.dynamodb_client = self.boto_session.client("dynamodb")
return self.dynamodb_client
def _session(self):
if self.boto_session is None:
region = self.config("region_name", namespace="cis", default="us-west-2")
if self._get_cis_environment() == "local":
self.boto_session = Stubber(boto3.session.Session(region_name=region)).client
else:
self.boto_session = boto3.session.Session(region_name=region)
return self.boto_session
def _get_cis_environment(self):
return self.config("environment", namespace="cis", default="local")
def _generate_table_name(self):
return "{}-identity-vault".format(self._get_cis_environment())
def enable_stream(self):
self.connect()
result = self.dynamodb_client.update_table(
TableName=self._generate_table_name(),
StreamSpecification={"StreamEnabled": True, "StreamViewType": "NEW_AND_OLD_IMAGES"},
)
return result
def enable_autoscaler(self):
scaler_config = autoscale.ScalableTable(self._generate_table_name())
scaler_config.connect()
return scaler_config.enable_autoscaler()
def tag_vault(self):
self.connect()
arn = self.find()
tags = [
{"Key": "cis_environment", "Value": self._get_cis_environment()},
{"Key": "application", "Value": "identity-vault"},
]
try:
return self.dynamodb_client.tag_resource(ResourceArn=arn, Tags=tags)
except ClientError:
logger.error("The table does not support tagging.")
except Exception as e:
logger.error("The table did not tag for an unknown reason: {}".format(e))
def find(self):
self.connect()
try:
if self._get_cis_environment() == "local":
# Assume that the local identity vault is always called local-identity-vault
return self.dynamodb_client.describe_table(TableName="local-identity-vault")["Table"]["TableArn"]
else:
# Assume that we are in AWS and list tables, describe tables, and check tags.
tables = self.dynamodb_client.list_tables(Limit=100)
for table in tables.get("TableNames"):
table_arn = self.dynamodb_client.describe_table(TableName=table)["Table"]["TableArn"]
if table == self._generate_table_name():
return table_arn
except ClientError as exception:
if exception.response["Error"]["Code"] == "ResourceNotFoundException":
return None
else:
raise
def create(self):
if self._get_cis_environment() not in ["production", "development", "testing"]:
result = self.dynamodb_client.create_table(
TableName=self._generate_table_name(),
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
AttributeDefinitions=[
# auth0 user_id
{"AttributeName": "id", "AttributeType": "S"},
# user_uuid formerly dinopark id (uuid is a reserverd keyword in dynamo, hence user_uuid)
{"AttributeName": "user_uuid", "AttributeType": "S"},
# sequence number for the last integration
{"AttributeName": "sequence_number", "AttributeType": "S"},
# value of the primary_email attribute
{"AttributeName": "primary_email", "AttributeType": "S"},
# value of the primary_username attribute
{"AttributeName": "primary_username", "AttributeType": "S"},
],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
GlobalSecondaryIndexes=[
{
"IndexName": "{}-sequence_number".format(self._generate_table_name()),
"KeySchema": [{"AttributeName": "sequence_number", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
},
{
"IndexName": "{}-primary_username".format(self._generate_table_name()),
"KeySchema": [
{"AttributeName": "primary_username", "KeyType": "HASH"},
{"AttributeName": "id", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
},
{
"IndexName": "{}-primary_email".format(self._generate_table_name()),
"KeySchema": [
{"AttributeName": "primary_email", "KeyType": "HASH"},
{"AttributeName": "id", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
"ProvisionedThroughput": {"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
},
{
"IndexName": "{}-user_uuid".format(self._generate_table_name()),
"KeySchema": [
{"AttributeName": "user_uuid", "KeyType": "HASH"},
{"AttributeName": "id", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
# Removed due to moving to pay per query.
"ProvisionedThroughput": {"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
},
],
)
else:
result = self.dynamodb_client.create_table(
TableName=self._generate_table_name(),
KeySchema=[{"AttributeName": "id", "KeyType": "HASH"}],
AttributeDefinitions=[
# auth0 user_id
{"AttributeName": "id", "AttributeType": "S"},
# user_uuid formerly dinopark id (uuid is a reserverd keyword in dynamo, hence user_uuid)
{"AttributeName": "user_uuid", "AttributeType": "S"},
# sequence number for the last integration
{"AttributeName": "sequence_number", "AttributeType": "S"},
# value of the primary_email attribute
{"AttributeName": "primary_email", "AttributeType": "S"},
# value of the primary_username attribute
{"AttributeName": "primary_username", "AttributeType": "S"},
],
BillingMode="PAY_PER_REQUEST",
GlobalSecondaryIndexes=[
{
"IndexName": "{}-sequence_number".format(self._generate_table_name()),
"KeySchema": [{"AttributeName": "sequence_number", "KeyType": "HASH"}],
"Projection": {"ProjectionType": "ALL"},
},
{
"IndexName": "{}-primary_username".format(self._generate_table_name()),
"KeySchema": [
{"AttributeName": "primary_username", "KeyType": "HASH"},
{"AttributeName": "id", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
},
{
"IndexName": "{}-primary_email".format(self._generate_table_name()),
"KeySchema": [
{"AttributeName": "primary_email", "KeyType": "HASH"},
{"AttributeName": "id", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
},
{
"IndexName": "{}-user_uuid".format(self._generate_table_name()),
"KeySchema": [
{"AttributeName": "user_uuid", "KeyType": "HASH"},
{"AttributeName": "id", "KeyType": "RANGE"},
],
"Projection": {"ProjectionType": "ALL"},
},
],
)
waiter = self.dynamodb_client.get_waiter("table_exists")
if self._get_cis_environment() in ["production", "development", "testing"]:
waiter.wait(TableName=self._generate_table_name(), WaiterConfig={"Delay": 20, "MaxAttempts": 20})
self.tag_vault()
self.setup_stream()
else:
waiter.wait(TableName=self._generate_table_name(), WaiterConfig={"Delay": 1, "MaxAttempts": 5})
return result
def destroy(self):
result = self.dynamodb_client.delete_table(TableName=self._generate_table_name())
return result
def __get_table_resource(self):
region = self.config("region_name", namespace="cis", default="us-west-2")
if self._get_cis_environment() == "local":
self.boto_session = Stubber(boto3.session.Session(region_name=region)).client
dynalite_port = self.config("dynalite_port", namespace="cis", default="4567")
dynalite_host = self.config("dynalite_host", namespace="cis", default="localhost")
dynamodb_resource = self.boto_session.resource(
"dynamodb", endpoint_url="http://{}:{}".format(dynalite_host, dynalite_port)
)
table = dynamodb_resource.Table(self._generate_table_name())
else:
dynamodb_resource = boto3.resource("dynamodb", region_name=region)
table = dynamodb_resource.Table(self._generate_table_name())
return table
def find_or_create(self):
if self.find() is not None:
table = self.__get_table_resource()
else:
self.create()
table = self.__get_table_resource()
return table
def describe_indices(self):
return self.dynamodb_client.describe_table(TableName=self._generate_table_name())
def _has_stream(self):
result = self.dynamodb_client.describe_table(TableName=self._generate_table_name()).get("Table")
if result.get("StreamSpecification"):
return True
else:
return False
def setup_stream(self):
if self._has_stream() is False:
try:
return self.dynamodb_client.update_table(
TableName=self._generate_table_name(),
StreamSpecification={"StreamEnabled": True, "StreamViewType": "KEYS_ONLY"},
)
except ClientError as e:
logger.error("The table does not support streams: {}.".format(e))
return
except Exception as e:
logger.error("The table did not tag for an unknown reason: {}".format(e))
class RelationalIdentityVault(object):
"""Create a postgres model of the data that is in DynamoDb in order to support advanced search."""
def __init__(self):
self.config = get_config()
self.environment = self.config("environment", namespace="cis", default="testing")
self.postgres_host = self.config("postgres_host", namespace="cis", default="localhost")
self.postgres_port = int(self.config("postgres_port", namespace="cis", default="5432"))
self.db_name = self.config("identity_vault", namespace="cis", default=f"{self.environment}-identity-vault")
self.db_user = self.config("db_user", namespace="cis", default="cis_user")
self.db_password = self._db_password_from_ssm()
def _db_password_from_ssm(self):
password_from_environment = self.config("db_password", namespace="cis", default="None")
retries = 5
backoff = 15
if password_from_environment != "None":
return password_from_environment
else:
result = None
while result is None:
try:
self.ssm_client = boto3.client("ssm")
ssm_path = self.config("db_password_path", namespace="cis", default="/iam/development")
ssm_response = self.ssm_client.get_parameter(Name=ssm_path, WithDecryption=True)
result = ssm_response.get("Parameter").get("Value")
logger.debug("Secret manager SSM provider loading db_password: {}".format(ssm_path))
except ClientError as e:
retries = retries - 1
backoff = backoff + 1
logger.debug(
"Backing-off: fetch secret due to: {} retries {} backoff {}".format(e, retries, backoff)
)
time.sleep(backoff)
if retries <= 0:
break
else:
pass
return result
def _db_string(self):
proto = "postgresql+psycopg2://"
access_information = f"{self.db_user}:{self.db_password}"
connection_information = f"@{self.postgres_host}:{self.postgres_port}/{self.db_name}"
return proto + access_information + connection_information
def session(self):
return create_engine(self._db_string())
def engine(self):
engine = self.session().connect()
return engine
def create(self):
return rds.Base.metadata.create_all(self.engine())
def delete(self):
return rds.Base.metadata.drop_all(self.engine())
def table(self):
metadata = rds.Base.metadata
metadata.bind = self.engine()
return metadata.tables.get("people")
def find_or_create(self):
try:
self.table()
except OperationalError:
create_database(create_engine(self._db_string()).url)
self.create()
self.table()
return self.table()
| mpl-2.0 |
mozilla-iam/cis | python-modules/cis_notifications/tests/test_notifier.py | 1 | 1570 | import cis_notifications
import json
import mock
class TestNotifier(object):
@mock.patch("cis_notifications.event.Event._notify_via_post")
@mock.patch("cis_notifications.secret.Manager.secret")
@mock.patch("cis_notifications.secret.Manager.secretmgr")
@mock.patch("cis_notifications.secret.Manager.secretmgr_store")
@mock.patch("cis_notifications.secret.AuthZero.exchange_for_access_token")
def test_event_to_request(self, mock_authzero, mock_secretsmgr_store, mock_secretsmgr, mock_secrets, mock_request):
"""Test ingesting the event from the lambda function event handler and transforming it into a request.
Arguments:
object {[object]} -- [Takes an instance of the testNotifier object and asserts about the behavior.]
"""
mock_authzero.return_value = {"access_token": "dinopark", "expires_in": 86400}
mock_secrets.return_value = "is_pretty_cool"
mock_secretsmgr.return_value = mock_authzero.return_value
mock_secretsmgr_store.return_value = None
mock_request.return_value = 200
fh = open("tests/fixtures/event.json")
event_fixture = json.loads(fh.read())
fh.close()
for record in event_fixture["Records"]:
e = cis_notifications.event.Event(event=record)
notification = e.to_notification()
result = e.send(notification)
assert notification is not None
assert result is not None
assert result["https://dinopark.k8s.dev.sso.allizom.org/events/update"] == 200
| mpl-2.0 |
mozilla-iam/cis | python-modules/cis_identity_vault/integration_tests/test_scan_speed.py | 1 | 2425 | """Designed to run against the testing environment."""
import boto3
import logging
import os
from cis_identity_vault.models import user
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
dynamodb_client = boto3.client("dynamodb")
dynamodb_table = boto3.resource("dynamodb").Table("testing-identity-vault")
def setup_environment():
os.environ["CIS_ENVIRONMENT"] = "testing"
os.environ["CIS_REGION_NAME"] = "us-west-2"
os.environ["DEFAULT_AWS_REGION"] = "us-west-2"
def filtered_scan_wrapper():
setup_environment()
u = user.Profile(
dynamodb_table_resource=dynamodb_table,
dynamodb_client=dynamodb_client, transactions=True
)
connection_methods = ["github", "ad", "email", "oauth2", "google-oauth2"]
results = []
for conn_method in connection_methods:
result = u.all_filtered(connection_method=conn_method, active=True, next_page=None)
results.extend(result["users"])
while result.get("nextPage"):
logger.info("Trying to follow the page.")
logger.info("Next page currently is: {}".format(result.get("nextPage")))
result = u.all_filtered(connection_method=conn_method, active=True, next_page=result.get("nextPage"))
results.extend(result["users"])
logger.debug("Total records retrieved: {}".format(len(results)))
logger.debug("Total records retrieved: {}".format(len(results)))
def filtered_scan_wrapper_inactive():
setup_environment()
u = user.Profile(
dynamodb_table_resource=dynamodb_table,
dynamodb_client=dynamodb_client, transactions=True
)
results = []
result = u.all_filtered(connection_method="ad")
results.extend(result["users"])
while result.get("nextPage"):
result = u.all_filtered(connection_method="ad", next_page=result.get("nextPage"), active=False)
results.extend(result["users"])
logger.debug("Total records retrieved: {}".format(len(results)))
logger.debug("Total records retrieved: {}".format(len(results)))
def test_filtered_scan(benchmark):
benchmark.pedantic(filtered_scan_wrapper, iterations=1, rounds=1)
def test_filtered_scan_inactive(benchmark):
benchmark.pedantic(filtered_scan_wrapper_inactive, iterations=1, rounds=1)
| mpl-2.0 |
mozilla-iam/cis | python-modules/cis_crypto/cis_crypto/operation.py | 1 | 5491 | import json
import logging
import os
import yaml
from jose import jwk
from jose import jws
from jose.exceptions import JWSError
from cis_crypto import secret
from cis_crypto import common
logger = logging.getLogger(__name__)
# Note:
# These attrs on sign/verify could be refactored to use object inheritance. Leaving as is for now for readability.
class Sign(object):
def __init__(self):
self.config = common.get_config()
self.key_name = self.config("signing_key_name", namespace="cis", default="file")
self._jwk = None
self.secret_manager = self.config("secret_manager", namespace="cis", default="file")
self.payload = None
def load(self, data):
"""Loads a payload to the object and ensures that the thing is serializable."""
try:
data = yaml.safe_load(data)
except yaml.scanner.ScannerError:
logger.debug("This file is likely not YAML. Attempting JSON load.")
except AttributeError:
logger.debug("This file is likely not YAML. Attempting JSON load.")
if isinstance(data, str):
data = json.loads(data)
else:
pass
self.payload = data
return self.payload
def jws(self, keyname=None):
"""Assumes you loaded a payload. Returns a jws."""
# Override key name
if keyname is not None:
self.key_name = keyname
key_jwk = self._get_key()
sig = jws.sign(self.payload, key_jwk.to_dict(), algorithm="RS256")
return sig
def _get_key(self):
if self._jwk is None:
manager = secret.Manager(provider_type=self.secret_manager)
self._jwk = manager.get_key(key_name=self.key_name)
return self._jwk
class Verify(object):
def __init__(self):
self.config = common.get_config()
# Provide file or URL as opts.
self.well_known_mode = self.config("well_known_mode", namespace="cis", default="file")
self.public_key_name = None # Optional for use with file based well known mode
self.jws_signature = None
self.well_known = None # Well known JSON data
def load(self, jws_signature):
"""Takes data in the form of a dict() and a JWS sig."""
# Store the original form in the jws_signature attribute
self.jws_signature = jws_signature
def _get_public_key(self, keyname=None):
"""Returns a jwk construct for the public key and mode specified."""
if self.well_known_mode == "file":
key_dir = self.config(
"secret_manager_file_path",
namespace="cis",
default=("{}/.mozilla-iam/keys/".format(os.path.expanduser("~"))),
)
key_name = self.config("public_key_name", namespace="cis", default="access-file-key")
file_name = "{}".format(key_name)
fh = open((os.path.join(key_dir, file_name)), "rb")
key_content = fh.read()
key_construct = jwk.construct(key_content, "RS256")
return [key_construct.to_dict()]
elif self.well_known_mode == "http" or self.well_known_mode == "https":
logger.debug("Well known mode engaged. Reducing key structure.", extra={"well_known": self.well_known})
return self._reduce_keys(keyname)
def _reduce_keys(self, keyname):
access_file_keys = self.well_known["access_file"]["jwks"]["keys"]
publishers_supported = self.well_known["api"]["publishers_jwks"]
keys = []
if "access-file-key" in self.config("public_key_name", namespace="cis"):
logger.debug("This is an access file verification.")
return access_file_keys
else:
# If not an access key verification this will attempt to verify against any listed publisher.
keys = publishers_supported[keyname]["keys"]
logger.debug("Publisher based verification, will use {} public keys for verification.".format(keys))
return keys
def jws(self, keyname=None):
"""Assumes you loaded a payload. Return the same jws or raise a custom exception."""
key_material = self._get_public_key(keyname)
logger.debug(
"The key material for the payload was loaded for: {}".format(keyname), extra={"key_material": key_material}
)
if isinstance(key_material, list):
logger.debug("Multiple keys returned. Attempting match.")
for key in key_material:
try:
key.pop("x5t", None)
key.pop("x5c", None)
except AttributeError:
logger.warn("x5t and x5c attrs do not exist in key material.")
logger.debug("Attempting to match against: {}".format(key))
try:
sig = jws.verify(self.jws_signature, key, algorithms="RS256", verify=True)
logger.debug(
"Matched a verified signature for: {}".format(key), extra={"signature": self.jws_signature}
)
return sig
except JWSError as e:
logger.error(
"The signature was not valid for the payload.", extra={"signature": self.jws_signature}
)
logger.error(e)
raise JWSError("The signature could not be verified for any trusted key", key_material)
| mpl-2.0 |
mozilla-iam/cis | python-modules/cis_identity_vault/tests/test_rds.py | 1 | 5959 | import os
from moto import mock_ssm
from cis_profile import FakeUser
@mock_ssm
class TestRDS(object):
def setup(self, *args):
os.environ["CIS_ENVIRONMENT"] = "testing"
os.environ["CIS_REGION_NAME"] = "us-east-1"
os.environ["DEFAULT_AWS_REGION"] = "us-east-1"
# Mock a user profile using the faker to send to the database.
self.user_profile = FakeUser().as_dict()
def test_table_init(self):
os.environ["CIS_POSTGRES_HOST"] = "db"
os.environ["CIS_POSTGRES_PORT"] = "5432"
os.environ["CIS_DB_USER"] = "cis_user"
os.environ["CIS_DB_PASSWORD"] = "testing"
from cis_identity_vault import vault
v = vault.RelationalIdentityVault()
v.create()
assert v.table() is not None
v.delete()
def test_db_create(self):
os.environ["CIS_POSTGRES_HOST"] = "db"
os.environ["CIS_POSTGRES_PORT"] = "5432"
os.environ["CIS_DB_USER"] = "cis_user"
os.environ["CIS_DB_PASSWORD"] = "testing"
os.environ["CIS_IDENTITY_VAULT"] = "purple-unicorn"
from cis_identity_vault import vault
v = vault.RelationalIdentityVault()
v.find_or_create()
v.delete()
def test_user_create(self):
os.environ["CIS_POSTGRES_HOST"] = "db"
os.environ["CIS_POSTGRES_PORT"] = "5432"
os.environ["CIS_DB_USER"] = "cis_user"
os.environ["CIS_DB_PASSWORD"] = "testing"
from cis_identity_vault import vault
from cis_identity_vault.models import user
v = vault.RelationalIdentityVault()
v.create()
u = user.ProfileRDS()
res = u.create(user_profile=self.user_profile)
u.delete(user_profile=self.user_profile)
assert res is not None
v.delete()
def test_user_find(self):
os.environ["CIS_POSTGRES_HOST"] = "db"
os.environ["CIS_POSTGRES_PORT"] = "5432"
os.environ["CIS_DB_USER"] = "cis_user"
os.environ["CIS_DB_PASSWORD"] = "testing"
from cis_identity_vault import vault
from cis_identity_vault.models import user
v = vault.RelationalIdentityVault()
v.create()
u = user.ProfileRDS()
res = u.create(user_profile=self.user_profile)
positive_search_result = u.find(self.user_profile)
assert positive_search_result is not None
non_existant_user = FakeUser().as_dict()
negative_search_result = u.find(non_existant_user)
assert negative_search_result is None
u.delete(user_profile=self.user_profile)
assert res is not None
v.delete()
def test_user_delete(self):
os.environ["CIS_POSTGRES_HOST"] = "db"
os.environ["CIS_POSTGRES_PORT"] = "5432"
os.environ["CIS_DB_USER"] = "cis_user"
os.environ["CIS_DB_PASSWORD"] = "testing"
from cis_identity_vault import vault
from cis_identity_vault.models import user
v = vault.RelationalIdentityVault()
v.create()
u = user.ProfileRDS()
u.create(user_profile=self.user_profile)
u.delete(user_profile=self.user_profile)
assert u.find(self.user_profile) is None
def test_user_update(self):
os.environ["CIS_POSTGRES_HOST"] = "db"
os.environ["CIS_POSTGRES_PORT"] = "5432"
os.environ["CIS_DB_USER"] = "cis_user"
os.environ["CIS_DB_PASSWORD"] = "testing"
from cis_identity_vault import vault
from cis_identity_vault.models import user
v = vault.RelationalIdentityVault()
v.create()
u = user.ProfileRDS()
u.create(user_profile=self.user_profile)
mutated_user_profile = self.user_profile
mutated_user_profile["active"]["value"] = False
u.update(user_profile=mutated_user_profile)
u.delete(user_profile=self.user_profile)
def test_find_by_email(self):
os.environ["CIS_POSTGRES_HOST"] = "db"
os.environ["CIS_POSTGRES_PORT"] = "5432"
os.environ["CIS_DB_USER"] = "cis_user"
os.environ["CIS_DB_PASSWORD"] = "testing"
from cis_identity_vault import vault
from cis_identity_vault.models import user
v = vault.RelationalIdentityVault()
v.create()
u = user.ProfileRDS()
self.user_profile["primary_email"]["value"] = "bob@bob.com"
u.create(user_profile=self.user_profile)
primary_email = self.user_profile["primary_email"]["value"]
s = user.ProfileRDS()
search_result = s.find_by_email(primary_email)
assert len(search_result) > 0
def test_find_by_uuid(self):
os.environ["CIS_POSTGRES_HOST"] = "db"
os.environ["CIS_POSTGRES_PORT"] = "5432"
os.environ["CIS_DB_USER"] = "cis_user"
os.environ["CIS_DB_PASSWORD"] = "testing"
from cis_identity_vault import vault
from cis_identity_vault.models import user
v = vault.RelationalIdentityVault()
v.create()
u = user.ProfileRDS()
u.create(user_profile=self.user_profile)
s = user.ProfileRDS()
search_result = s.find_by_uuid(self.user_profile["uuid"]["value"])
assert search_result.profile["uuid"]["value"] == self.user_profile["uuid"]["value"]
def test_find_by_primary_username(self):
os.environ["CIS_POSTGRES_HOST"] = "db"
os.environ["CIS_POSTGRES_PORT"] = "5432"
os.environ["CIS_DB_USER"] = "cis_user"
os.environ["CIS_DB_PASSWORD"] = "testing"
from cis_identity_vault import vault
from cis_identity_vault.models import user
v = vault.RelationalIdentityVault()
v.create()
u = user.ProfileRDS()
u.create(user_profile=self.user_profile)
s = user.ProfileRDS()
search_result = s.find_by_username(self.user_profile["primary_username"]["value"])
assert search_result.profile["primary_username"]["value"] == self.user_profile["primary_username"]["value"]
| mpl-2.0 |
mozilla-iam/cis | python-modules/cis_logger/cis_logger/__init__.py | 1 | 1579 | import logging.handlers
from pythonjsonlogger import jsonlogger
import datetime
class JsonFormatter(jsonlogger.JsonFormatter, object):
def __init__(
self,
fmt="%(asctime) %(name) %(processName) %(filename) \
%(funcName) %(levelname) %(lineno) %(module) %(threadName) %(message)",
datefmt="%Y-%m-%dT%H:%M:%SZ%z",
style="%",
extra={},
*args,
**kwargs
):
self._extra = extra
jsonlogger.JsonFormatter.__init__(self, fmt=fmt, datefmt=datefmt, *args, **kwargs)
def process_log_record(self, log_record):
if "asctime" in log_record:
log_record["timestamp"] = log_record["asctime"]
else:
log_record["timestamp"] = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ%z")
if self._extra is not None:
for key, value in self._extra.items():
log_record[key] = value
return super(JsonFormatter, self).process_log_record(log_record)
class SysLogJsonHandler(logging.handlers.SysLogHandler, object):
def __init__(
self,
address=("localhost", logging.handlers.SYSLOG_UDP_PORT),
facility=logging.handlers.SysLogHandler.LOG_USER,
socktype=None,
prefix="",
):
super(SysLogJsonHandler, self).__init__(address, facility, socktype)
self._prefix = prefix
if self._prefix != "":
self._prefix = prefix + ": "
def format(self, record):
return self._prefix + super(SysLogJsonHandler, self).format(record)
| mpl-2.0 |
ibm-watson-iot/iot-python | test/test_device_command.py | 2 | 1862 | # *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import testUtils
import wiotp.sdk
import pytest
class FakePahoMessageCommand:
topic = "iot-2/cmd/commandid/fmt/json"
payload = b'{"a":4}'
class FakeFakePahoMessageCommand:
topic = "hi"
payload = b'{"a":4}'
class TestDeviceCommand(testUtils.AbstractTest):
def testCommand(self):
pahoMessage = FakePahoMessageCommand()
messageEncoderModules = {"json": wiotp.sdk.JsonCodec()}
command = wiotp.sdk.device.Command(pahoMessage, messageEncoderModules)
assert command.format == "json"
assert command.commandId == "commandid"
assert "a" in command.data
assert command.data["a"] == 4
def testCommandMissingCodec(self):
with pytest.raises(wiotp.sdk.MissingMessageDecoderException) as e:
pahoMessage = FakePahoMessageCommand()
messageEncoderModules = {"fidaa": wiotp.sdk.JsonCodec()}
command = wiotp.sdk.device.Command(pahoMessage, messageEncoderModules)
assert e.value.format == "json"
def testInvalidCommandTopic(self):
with pytest.raises(wiotp.sdk.InvalidEventException) as e:
pahoMessage = FakeFakePahoMessageCommand()
messageEncoderModules = {"b": wiotp.sdk.JsonCodec()}
command = wiotp.sdk.device.Command(pahoMessage, messageEncoderModules)
assert e.value.reason == "Received command on invalid topic: hi"
| epl-1.0 |
ibm-watson-iot/iot-python | src/wiotp/sdk/api/registry/devices.py | 2 | 15894 | # *****************************************************************************
# Copyright (c) 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import iso8601
from datetime import datetime
import json
from collections import defaultdict
from wiotp.sdk.api.common import IterableList
from wiotp.sdk.exceptions import ApiException
from wiotp.sdk.api.registry.diag import DeviceLogs, DeviceErrorCodes
class LogEntry(defaultdict):
def __init__(self, **kwargs):
if not set(["message", "timestamp"]).issubset(kwargs):
raise Exception("message and timestamp are required properties for a LogEntry")
kwargs["timestamp"] = iso8601.parse_date(kwargs["timestamp"])
dict.__init__(self, **kwargs)
@property
def message(self):
return self["message"]
@property
def timestamp(self):
return self["timestamp"]
class DeviceUid(defaultdict):
def __init__(self, **kwargs):
if not set(["deviceId", "typeId"]).issubset(kwargs):
raise Exception("typeId and deviceId are required properties to uniquely identify a device")
dict.__init__(self, **kwargs)
@property
def typeId(self):
return self["typeId"]
@property
def deviceId(self):
return self["deviceId"]
def __str__(self):
return self["typeId"] + ":" + self["deviceId"]
def __repr__(self):
return json.dumps(self, sort_keys=True, indent=2)
class DeviceCreateRequest(defaultdict):
def __init__(self, typeId, deviceId, authToken=None, deviceInfo=None, location=None, metadata=None):
dict.__init__(
self,
typeId=typeId,
deviceId=deviceId,
authToken=authToken,
deviceInfo=deviceInfo,
location=location,
metadata=metadata,
)
@property
def typeId(self):
return self["typeId"]
@property
def deviceId(self):
return self["deviceId"]
@property
def authToken(self):
return self["authToken"]
@property
def deviceInfo(self):
return DeviceInfo(**self["deviceInfo"])
@property
def location(self):
return self["location"]
@property
def metadata(self):
return self["metadata"]
class DeviceLocation(defaultdict):
def __init__(self, **kwargs):
if not set(["latitude", "longitude"]).issubset(kwargs):
raise Exception("Data passed to Device is not correct: %s" % (json.dumps(kwargs, sort_keys=True)))
if "measuredDateTime" in kwargs and not isinstance(kwargs["measuredDateTime"], datetime):
kwargs["measuredDateTime"] = iso8601.parse_date(kwargs["measuredDateTime"])
if "updatedDateTime" in kwargs and not isinstance(kwargs["updatedDateTime"], datetime):
kwargs["updatedDateTime"] = iso8601.parse_date(kwargs["updatedDateTime"])
dict.__init__(self, **kwargs)
@property
def latitude(self):
return self["latitude"]
@property
def longitude(self):
return self["longitude"]
@property
def measuredDateTime(self):
return self.get("measuredDateTime", None)
@property
def updatedDateTime(self):
return self.get("updatedDateTime", None)
class DeviceCreateResponse(defaultdict):
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@property
def typeId(self):
return self["typeId"]
@property
def deviceId(self):
return self["deviceId"]
@property
def success(self):
return self.get("success", None)
@property
def authToken(self):
return self["authToken"]
class DeviceInfo(defaultdict):
def __init__(
self,
description=None,
deviceClass=None,
fwVersion=None,
hwVersion=None,
manufacturer=None,
model=None,
serialNumber=None,
descriptiveLocation=None,
):
dict.__init__(
self,
description=description,
deviceClass=deviceClass,
fwVersion=fwVersion,
hwVersion=hwVersion,
manufacturer=manufacturer,
model=model,
serialNumber=serialNumber,
descriptiveLocation=descriptiveLocation,
)
@property
def description(self):
return self["description"]
@property
def deviceClass(self):
return self["deviceClass"]
@property
def fwVersion(self):
return self["fwVersion"]
@property
def hwVersion(self):
return self["hwVersion"]
@property
def manufacturer(self):
return self["manufacturer"]
@property
def model(self):
return self["model"]
@property
def serialNumber(self):
return self["serialNumber"]
@property
def descriptiveLocation(self):
return self["descriptiveLocation"]
class Device(defaultdict):
def __init__(self, apiClient, **kwargs):
self._apiClient = apiClient
if not set(["clientId", "deviceId", "typeId"]).issubset(kwargs):
raise Exception("Data passed to Device is not correct: %s" % (json.dumps(kwargs, sort_keys=True)))
self.diagLogs = DeviceLogs(self._apiClient, kwargs["typeId"], kwargs["deviceId"])
self.diagErrorCodes = DeviceErrorCodes(self._apiClient, kwargs["typeId"], kwargs["deviceId"])
dict.__init__(self, **kwargs)
# {u'clientId': u'xxxxxxxxx',
# u'deviceId': u'xxxxxxx',
# u'deviceInfo': {u'description': u'None (xxxxxxxx)',
# u'deviceClass': u'None',
# u'fwVersion': u'xxxxx',
# u'hwVersion': u'xxxxx',
# u'manufacturer': u'xxxx.',
# u'model': u'xxxx',
# u'serialNumber': u'xxxxxxxxx'},
# u'metadata': {},
# u'refs': {u'diag': {u'errorCodes': u'/api/v0002/device/types/xxx/devices/xxxx/diag/errorCodes',
# u'logs': u'/api/v0002/device/types/xxx/devices/xxxx/diag/logs'},
# u'location': u'/api/v0002/device/types/xxxx/devices/xxxx/location',
# u'mgmt': u'/api/v0002/device/types/xx/devices/xxxx/mgmt'},
# u'registration': {u'auth': {u'id': u'xxxxxx',
# u'type': u'person'},
# u'date': u'2015-09-18T06:44:02.000Z'},
# u'status': {u'alert': {u'enabled': False,
# u'timestamp': u'2016-01-21T02:25:55.543Z'}},
# u'typeId': u'vm'}
@property
def clientId(self):
return self["clientId"]
@property
def deviceId(self):
return self["deviceId"]
@property
def authToken(self):
if "authToken" in self:
return self["authToken"]
else:
return None
@property
def metadata(self):
if "metadata" in self:
return self["metadata"]
else:
return None
@property
def total_rows(self):
return self["total_rows"]
@property
def deviceInfo(self):
# Unpack the deviceInfo dictionary into keyword arguments so that we
# can return a DeviceInfo object instead of a plain dictionary
return DeviceInfo(**self["deviceInfo"])
@property
def typeId(self):
return self["typeId"]
def __str__(self):
return "[%s] %s" % (self.clientId, self.deviceInfo.description or "<No description>")
def __repr__(self):
return json.dumps(self, sort_keys=True, indent=2)
def json(self):
return dict(self)
# Extended properties
def getMgmt(self):
r = self._apiClient.get("api/v0002/device/types/%s/devices/%s/mgmt" % (self.typeId, self.deviceId))
if r.status_code == 200:
return r.json()
if r.status_code == 404:
# It's perfectly valid for a device to not have a location set, if this is the case, set response to None
return None
else:
raise ApiException(r)
def getLocation(self):
r = self._apiClient.get("api/v0002/device/types/%s/devices/%s/location" % (self.typeId, self.deviceId))
if r.status_code == 200:
return DeviceLocation(**r.json())
if r.status_code == 404:
# It's perfectly valid for a device to not have a location set, if this is the case, set response to None
return None
else:
raise ApiException(r)
def setLocation(self, value):
r = self._apiClient.put("api/v0002/device/types/%s/devices/%s/location" % (self.typeId, self.deviceId), value)
if r.status_code == 200:
return DeviceLocation(**r.json())
else:
raise ApiException(r)
def getConnectionLogs(self):
r = self._apiClient.get(
"api/v0002/logs/connection", parameters={"typeId": self.typeId, "deviceId": self.deviceId}
)
if r.status_code == 200:
responseList = []
for entry in r.json():
responseList.append(LogEntry(**entry))
return responseList
else:
raise ApiException(r)
class IterableDeviceList(IterableList):
def __init__(self, apiClient, typeId=None):
if typeId is None:
super(IterableDeviceList, self).__init__(apiClient, Device, "api/v0002/bulk/devices", "typeId,deviceId")
else:
super(IterableDeviceList, self).__init__(
apiClient, Device, "api/v0002/device/types/%s/devices/" % (typeId), "deviceId"
)
class Devices(defaultdict):
"""
Use the global unique identifier of a device, it's `clientId` to address devices.
# Delete
```python
del devices["d:orgId:typeId:deviceId"]
```
# Get
Use the global unique identifier of a device, it's `clientId`.
```python
device = devices["d:orgId:typeId:deviceId"]
print(device.clientId)
print(device)
# Is a device registered?
```python
if "d:orgId:typeId:deviceId" in devices:
print("The device exists")
```
# Iterate through all registered devices
```python
for device in devices:
print(device)
```
"""
# https://docs.python.org/2/library/collections.html#defaultdict-objects
def __init__(self, apiClient, typeId=None):
self._apiClient = apiClient
self.typeId = typeId
def __contains__(self, key):
"""
Does a device exist?
"""
if self.typeId is None:
(classIdentifier, orgId, typeId, deviceId) = key.split(":")
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (typeId, deviceId)
else:
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (self.typeId, key)
r = self._apiClient.get(deviceUrl)
if r.status_code == 200:
return True
elif r.status_code == 404:
return False
else:
raise ApiException(r)
def __getitem__(self, key):
"""
Get a device from the registry
"""
if self.typeId is None:
(classIdentifier, orgId, typeId, deviceId) = key.split(":")
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (typeId, deviceId)
else:
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (self.typeId, key)
r = self._apiClient.get(deviceUrl)
if r.status_code == 200:
return Device(apiClient=self._apiClient, **r.json())
elif r.status_code == 404:
self.__missing__(key)
else:
raise ApiException(r)
def __setitem__(self, key, value):
"""
Register a new device - not currently supported via this interface, use: `registry.devices.create()`
"""
raise Exception("Unable to register or update a device via this interface at the moment.")
def __delitem__(self, key):
"""
Delete a device
"""
if self.typeId is None:
(classIdentifier, orgId, typeId, deviceId) = key.split(":")
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (typeId, deviceId)
else:
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (self.typeId, key)
r = self._apiClient.delete(deviceUrl)
if r.status_code == 404:
self.__missing__(key)
elif r.status_code != 204:
raise ApiException(r)
def __missing__(self, key):
"""
Device does not exist
"""
raise KeyError("Device %s does not exist" % (key))
def __iter__(self, *args, **kwargs):
"""
Iterate through all devices
"""
return IterableDeviceList(self._apiClient, self.typeId)
@property
def total_rows(self):
"""
Returns total devices
"""
return self["total_rows"]
def create(self, devices):
"""
Register one or more new devices, each request can contain a maximum of 512KB.
The response body will contain the generated authentication tokens for all devices.
You must make sure to record these tokens when processing the response.
We are not able to retrieve lost authentication tokens
It accepts accepts a list of devices (List of Dictionary of Devices), or a single device
If you provide a list as the parameter it will return a list in response
If you provide a singular device it will return a singular response
"""
if not isinstance(devices, list):
listOfDevices = [devices]
returnAsAList = False
else:
listOfDevices = devices
returnAsAList = True
r = self._apiClient.post("api/v0002/bulk/devices/add", listOfDevices)
if r.status_code in [201, 202]:
if returnAsAList:
responseList = []
for entry in r.json():
responseList.append(DeviceCreateResponse(**entry))
return responseList
else:
return DeviceCreateResponse(**r.json()[0])
else:
raise ApiException(r)
def update(self, deviceUid, metadata=None, deviceInfo=None, status=None):
"""
Update an existing device
"""
if not isinstance(deviceUid, DeviceUid) and isinstance(deviceUid, dict):
deviceUid = DeviceUid(**deviceUid)
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (deviceUid.typeId, deviceUid.deviceId)
data = {"status": status, "deviceInfo": deviceInfo, "metadata": metadata}
r = self._apiClient.put(deviceUrl, data)
if r.status_code == 200:
return Device(apiClient=self._apiClient, **r.json())
else:
raise ApiException(r)
def delete(self, devices):
"""
Delete one or more devices, each request can contain a maximum of 512Kb
It accepts accepts a list of devices (List of Dictionary of Devices)
In case of failure it throws APIException
"""
if not isinstance(devices, list):
listOfDevices = [devices]
else:
listOfDevices = devices
r = self._apiClient.post("api/v0002/bulk/devices/remove", listOfDevices)
if r.status_code in [200, 202]:
return r.json()
else:
raise ApiException(r)
| epl-1.0 |
ibm-watson-iot/iot-python | src/wiotp/sdk/device/config.py | 2 | 10296 | # *****************************************************************************
# Copyright (c) 2014, 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from collections import defaultdict
import os
import yaml
import logging
from wiotp.sdk import ConfigurationException
class DeviceClientConfig(defaultdict):
def __init__(self, **kwargs):
# Validate the arguments
if "identity" not in kwargs:
raise ConfigurationException("Missing identity from configuration")
if "orgId" not in kwargs["identity"] or kwargs["identity"]["orgId"] is None:
raise ConfigurationException("Missing identity.orgId from configuration")
if "typeId" not in kwargs["identity"] or kwargs["identity"]["typeId"] is None:
raise ConfigurationException("Missing identity.typeId from configuration")
if "deviceId" not in kwargs["identity"] or kwargs["identity"]["deviceId"] is None:
raise ConfigurationException("Missing identity.deviceId from configuration")
# Authentication is not supported for quickstart
if kwargs["identity"]["orgId"] == "quickstart":
if "auth" in kwargs:
raise ConfigurationException("Quickstart service does not support device authentication")
else:
if "auth" not in kwargs:
raise ConfigurationException("Missing auth from configuration")
if "token" not in kwargs["auth"] or kwargs["auth"]["token"] is None:
raise ConfigurationException("Missing auth.token from configuration")
if "options" in kwargs and "mqtt" in kwargs["options"]:
# validate port
if "port" in kwargs["options"]["mqtt"] and kwargs["options"]["mqtt"]["port"] is not None:
if not isinstance(kwargs["options"]["mqtt"]["port"], int):
raise ConfigurationException("Optional setting options.mqtt.port must be a number if provided")
# Validate cleanStart
if "cleanStart" in kwargs["options"]["mqtt"] and not isinstance(
kwargs["options"]["mqtt"]["cleanStart"], bool
):
raise ConfigurationException("Optional setting options.mqtt.cleanStart must be a boolean if provided")
# Set defaults for optional configuration
if "options" not in kwargs:
kwargs["options"] = {}
if "domain" not in kwargs["options"] or kwargs["options"]["domain"] is None:
kwargs["options"]["domain"] = "internetofthings.ibmcloud.com"
if "logLevel" not in kwargs["options"] or kwargs["options"]["logLevel"] is None:
kwargs["options"]["logLevel"] = logging.INFO
if "mqtt" not in kwargs["options"]:
kwargs["options"]["mqtt"] = {}
if "port" not in kwargs["options"]["mqtt"]:
kwargs["options"]["mqtt"]["port"] = None
if "transport" not in kwargs["options"]["mqtt"] or kwargs["options"]["mqtt"]["transport"] is None:
kwargs["options"]["mqtt"]["transport"] = "tcp"
if "cleanStart" not in kwargs["options"]["mqtt"]:
kwargs["options"]["mqtt"]["cleanStart"] = False
if "sessionExpiry" not in kwargs["options"]["mqtt"]:
kwargs["options"]["mqtt"]["sessionExpiry"] = 3600
if "keepAlive" not in kwargs["options"]["mqtt"]:
kwargs["options"]["mqtt"]["keepAlive"] = 60
if "caFile" not in kwargs["options"]["mqtt"]:
kwargs["options"]["mqtt"]["caFile"] = None
dict.__init__(self, **kwargs)
def isQuickstart(self):
return self["identity"]["orgId"] == "quickstart"
@property
def orgId(self):
return self["identity"]["orgId"]
@property
def typeId(self):
return self["identity"]["typeId"]
@property
def deviceId(self):
return self["identity"]["deviceId"]
@property
def clientId(self):
return "d:%s:%s:%s" % (self["identity"]["orgId"], self["identity"]["typeId"], self["identity"]["deviceId"])
@property
def username(self):
return "use-token-auth" if ("auth" in self) else None
@property
def password(self):
return self["auth"]["token"] if ("auth" in self) else None
@property
def domain(self):
return self["options"]["domain"]
@property
def logLevel(self):
return self["options"]["logLevel"]
@property
def port(self):
return self["options"]["mqtt"]["port"]
@property
def transport(self):
return self["options"]["mqtt"]["transport"]
@property
def cleanStart(self):
return self["options"]["mqtt"]["cleanStart"]
@property
def sessionExpiry(self):
return self["options"]["mqtt"]["sessionExpiry"]
@property
def keepAlive(self):
return self["options"]["mqtt"]["keepAlive"]
@property
def caFile(self):
return self["options"]["mqtt"]["caFile"]
def parseEnvVars():
"""
Parse environment variables into a Python dictionary suitable for passing to the
device client constructor as the `options` parameter
- `WIOTP_IDENTITY_ORGID`
- `WIOTP_IDENTITY_TYPEID`
- `WIOTP_IDENTITY_DEVICEID`
- `WIOTP_AUTH_TOKEN`
- `WIOTP_OPTIONS_DOMAIN` (optional)
- `WIOTP_OPTIONS_LOGLEVEL` (optional)
- `WIOTP_OPTIONS_MQTT_PORT` (optional)
- `WIOTP_OPTIONS_MQTT_TRANSPORT` (optional)
- `WIOTP_OPTIONS_MQTT_CAFILE` (optional)
- `WIOTP_OPTIONS_MQTT_CLEANSTART` (optional)
- `WIOTP_OPTIONS_MQTT_SESSIONEXPIRY` (optional)
- `WIOTP_OPTIONS_MQTT_KEEPALIVE` (optional)
"""
# Identify
orgId = os.getenv("WIOTP_IDENTITY_ORGID", None)
typeId = os.getenv("WIOTP_IDENTITY_TYPEID", None)
deviceId = os.getenv("WIOTP_IDENTITY_DEVICEID", None)
# Auth
authToken = os.getenv("WIOTP_AUTH_TOKEN", None)
# Options
domain = os.getenv("WIOTP_OPTIONS_DOMAIN", None)
logLevel = os.getenv("WIOTP_OPTIONS_LOGLEVEL", "info")
port = os.getenv("WIOTP_OPTIONS_MQTT_PORT", None)
transport = os.getenv("WIOTP_OPTIONS_MQTT_TRANSPORT", None)
caFile = os.getenv("WIOTP_OPTIONS_MQTT_CAFILE", None)
cleanStart = os.getenv("WIOTP_OPTIONS_MQTT_CLEANSTART", "False")
sessionExpiry = os.getenv("WIOTP_OPTIONS_MQTT_SESSIONEXPIRY", "3600")
keepAlive = os.getenv("WIOTP_OPTIONS_MQTT_KEEPALIVE", "60")
caFile = os.getenv("WIOTP_OPTIONS_MQTT_CAFILE", None)
if orgId is None:
raise ConfigurationException("Missing WIOTP_IDENTITY_ORGID environment variable")
if typeId is None:
raise ConfigurationException("Missing WIOTP_IDENTITY_TYPEID environment variable")
if deviceId is None:
raise ConfigurationException("Missing WIOTP_IDENTITY_DEVICEID environment variable")
if orgId != "quickstart" and authToken is None:
raise ConfigurationException("Missing WIOTP_AUTH_TOKEN environment variable")
if port is not None:
try:
port = int(port)
except ValueError as e:
raise ConfigurationException("WIOTP_OPTIONS_MQTT_PORT must be a number")
try:
sessionExpiry = int(sessionExpiry)
except ValueError as e:
raise ConfigurationException("WIOTP_OPTIONS_MQTT_SESSIONEXPIRY must be a number")
try:
keepAlive = int(keepAlive)
except ValueError as e:
raise ConfigurationException("WIOTP_OPTIONS_MQTT_KEEPALIVE must be a number")
if logLevel not in ["error", "warning", "info", "debug"]:
raise ConfigurationException("WIOTP_OPTIONS_LOGLEVEL must be one of error, warning, info, debug")
else:
# Convert log levels from string to int (we need to upper case our strings from the config)
logLevel = logging.getLevelName(logLevel.upper())
cfg = {
"identity": {"orgId": orgId, "typeId": typeId, "deviceId": deviceId},
"options": {
"domain": domain,
"logLevel": logLevel,
"mqtt": {
"port": port,
"transport": transport,
"caFile": caFile,
"cleanStart": cleanStart in ["True", "true", "1"],
"sessionExpiry": sessionExpiry,
"keepAlive": keepAlive,
},
},
}
# Quickstart doesn't support auth, so ensure we only add this if it's defined
if authToken is not None:
cfg["auth"] = {"token": authToken}
return DeviceClientConfig(**cfg)
def parseConfigFile(configFilePath):
"""
Parse a yaml configuration file into a Python dictionary suitable for passing to the
device client constructor as the `options` parameter
# Example Configuration File
identity:
orgId: org1id
typeId: raspberry-pi-3
deviceId: 00ef08ac05
auth:
token: Ab$76s)asj8_s5
options:
domain: internetofthings.ibmcloud.com
logLevel: error|warning|info|debug
mqtt:
port: 8883
transport: tcp
cleanStart: true
sessionExpiry: 3600
keepAlive: 60
caFile: /path/to/certificateAuthorityFile.pem
"""
try:
with open(configFilePath) as f:
data = yaml.full_load(f)
except (OSError, IOError) as e:
# In 3.3, IOError became an alias for OSError, and FileNotFoundError is a subclass of OSError
reason = "Error reading device configuration file '%s' (%s)" % (configFilePath, e)
raise ConfigurationException(reason)
if "options" in data and "logLevel" in data["options"]:
if data["options"]["logLevel"] not in ["error", "warning", "info", "debug"]:
raise ConfigurationException("Optional setting options.logLevel must be one of error, warning, info, debug")
else:
# Convert log levels from string to int (we need to upper case our strings from the config)
data["options"]["logLevel"] = logging.getLevelName(data["options"]["logLevel"].upper())
return DeviceClientConfig(**data)
| epl-1.0 |
ibm-watson-iot/iot-python | test/test_api_state_schemas.py | 2 | 7315 | # *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
#
import uuid
from datetime import datetime
import testUtils
import time
import pytest
from wiotp.sdk.exceptions import ApiException
import string
import json
@testUtils.oneJobOnlyTest
class TestSchemas(testUtils.AbstractTest):
testSchemaName = "python-api-test-schema"
updatedTestSchemaName = testSchemaName + "-updated"
testEventSchema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"title": "Sensor Event Schema",
"properties": {
"temperature": {
"description": "temperature in degrees Celsius",
"type": "number",
"minimum": -237.15,
"default": 0.0,
},
"humidity": {"description": "relative humidty (%)", "type": "number", "minimum": 0.0, "default": 0.0},
"publishTimestamp": {"description": "publishTimestamp", "type": "number", "minimum": 0.0, "default": 0.0},
},
"required": ["temperature", "humidity", "publishTimestamp"],
}
testEventSchemaUpdated = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"title": "Sensor Event Schema with temperature removed",
"properties": {
"humidity": {"description": "relative humidty (%)", "type": "number", "minimum": 0.0, "default": 0.0},
"publishTimestamp": {"description": "publishTimestamp", "type": "number", "minimum": 0.0, "default": 0.0},
},
"required": ["humidity", "publishTimestamp"],
}
# =========================================================================
# Set up services
# =========================================================================
def testCleanup(self):
# print("Cleaning up old test schema instances")
for a in self.appClient.state.draft.schemas:
if a.name == TestSchemas.testSchemaName:
# print("Deleting old test schema instance: %s" % (a))
del self.appClient.state.draft.schemas[a.id]
# TBD debug else:
# print("Found a non matching test schema instance: %s" % (a))
def checkSchema(self, schema, name, schemaFileName, schemaContents, description, version="draft"):
assert schema.name == name
assert schema.description == description
assert schema.schemaType == "json-schema"
assert schema.schemaFileName == schemaFileName
assert schema.contentType == "application/json"
assert schema.content == schemaContents
assert schema.version == version
assert isinstance(schema.created, datetime)
assert isinstance(schema.createdBy, str)
assert isinstance(schema.updated, datetime)
assert isinstance(schema.updatedBy, str)
def doesDraftSchemaNameExist(self, name):
for a in self.appClient.state.draft.schemas.find({"name": name}):
if a.name == name:
return True
return False
def doesActiveSchemaNameExist(self, name):
for a in self.appClient.state.active.schemas.find({"name": name}):
if a.name == name:
return True
return False
def createAndCheckSchema(self, name, schemaFileName, schemaContents, description):
jsonSchemaContents = json.dumps(schemaContents)
createdSchema = self.appClient.state.draft.schemas.create(name, schemaFileName, jsonSchemaContents, description)
self.checkSchema(createdSchema, name, schemaFileName, schemaContents, description)
# now actively refetch the schema to check it is stored
fetchedSchema = self.appClient.state.draft.schemas.__getitem__(createdSchema.id)
assert createdSchema == fetchedSchema
return createdSchema
def testCreateDeleteSchema1(self):
test_schema_name = TestSchemas.testSchemaName
assert self.doesDraftSchemaNameExist(test_schema_name) == False
assert self.doesActiveSchemaNameExist(test_schema_name) == False
# Create a schema
createdSchema = self.createAndCheckSchema(
test_schema_name, "eventSchema.json", TestSchemas.testEventSchema, "Test schema description"
)
# Can we search for it
assert self.doesDraftSchemaNameExist(test_schema_name) == True
# Creating the draft shouldn't create the active
assert self.doesActiveSchemaNameExist(test_schema_name) == False
# Delete the schema
del self.appClient.state.draft.schemas[createdSchema.id]
# It should be gone
assert self.doesDraftSchemaNameExist(test_schema_name) == False
def testCreateUpdateDeleteSchema1(self):
test_schema_name = TestSchemas.testSchemaName
assert self.doesDraftSchemaNameExist(test_schema_name) == False
# Create a schema
createdSchema = self.createAndCheckSchema(
test_schema_name, "eventSchema.json", TestSchemas.testEventSchema, "Test schema description"
)
# Can we search for it
assert self.doesDraftSchemaNameExist(test_schema_name) == True
# Creating the draft shouldn't create the active
assert self.doesActiveSchemaNameExist(test_schema_name) == False
# Update the schema
updated_schema_name = TestSchemas.updatedTestSchemaName
updatedSchema = self.appClient.state.draft.schemas.update(
createdSchema.id,
{"id": createdSchema.id, "name": updated_schema_name, "description": "Test schema updated description"},
)
self.checkSchema(
updatedSchema,
updated_schema_name,
"eventSchema.json",
TestSchemas.testEventSchema,
"Test schema updated description",
)
# Update the schema content
updated_schema_name = TestSchemas.updatedTestSchemaName
result = self.appClient.state.draft.schemas.updateContent(
createdSchema.id, "newEventSchema.json", TestSchemas.testEventSchemaUpdated
)
assert result == True
updatedSchema = self.appClient.state.draft.schemas[createdSchema.id]
self.checkSchema(
updatedSchema,
updated_schema_name,
"newEventSchema.json",
TestSchemas.testEventSchemaUpdated,
"Test schema updated description",
)
# Delete the schema
del self.appClient.state.draft.schemas[createdSchema.id]
# It should be gone
assert self.doesDraftSchemaNameExist(test_schema_name) == False
# ==================================================================================
# We'll test the presence of active schemas as part of device type activation tests.
# ==================================================================================
| epl-1.0 |
ibm-watson-iot/iot-python | src/wiotp/sdk/api/dsc/destinations.py | 2 | 4384 | # *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from collections import defaultdict
import iso8601
from wiotp.sdk.exceptions import ApiException
from wiotp.sdk.api.common import IterableList, RestApiDict
# See docs @ https://orgid.internetofthings.ibmcloud.com/docs/v0002/historian-connector.html
class Destination(defaultdict):
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@property
def name(self):
# Unlike most other resources name == the UUID, there is no seperate id property
return self["name"]
@property
def destinationType(self):
return self["type"]
@property
def configuration(self):
return self["configuration"]
# EventStreams only configuration
@property
def partitions(self):
if self["type"] == "eventstreams":
return self["configuration"]["partitions"]
else:
return None
# Cloudant only configuration
@property
def bucketInterval(self):
if self["type"] == "cloudant":
return self["configuration"]["bucketInterval"]
else:
return None
# Cloudant only configuration
@property
def retentionDays(self):
# this is an optional parameter so check if it exists
if "configuration" in self and "retentionDays" in self["configuration"]:
return self["configuration"]["retentionDays"]
else:
return None
# DB2/Postgres only configuration
@property
def columns(self):
# this is an optional parameter so check if it exists
if "configuration" in self and "columns" in self["configuration"]:
return self["configuration"]["columns"]
else:
return None
class IterableDestinationList(IterableList):
def __init__(self, apiClient, url, filters=None):
# This API does not support sorting
super(IterableDestinationList, self).__init__(
apiClient, Destination, url, sort=None, filters=filters, passApiClient=False
)
class Destinations(RestApiDict):
def __init__(self, apiClient, connectorId, connectorType):
super(Destinations, self).__init__(
apiClient,
Destination,
IterableDestinationList,
"api/v0002/historianconnectors/%s/destinations" % connectorId,
)
self.connectorId = connectorId
self.connectorType = connectorType
self.allDestinationsUrl = "api/v0002/historianconnectors/%s/destinations" % connectorId
def find(self, nameFilter=None):
queryParms = {}
if nameFilter:
queryParms["name"] = nameFilter
return IterableDestinationList(self._apiClient, self.allDestinationsUrl, filters=queryParms)
def create(self, name, **kwargs):
if self.connectorType == "cloudant":
if "bucketInterval" not in kwargs.keys():
raise Exception("You must specify bucketInterval parameter on create for a Cloudant destination")
if self.connectorType == "eventstreams":
if "partitions" not in kwargs.keys():
raise Exception("You must specify partitions parameter on create for an EventStreams destination")
if self.connectorType == "db2" or self.connectorType == "postgres":
if "columns" not in kwargs.keys():
raise Exception("You must specify a columns parameter on create for a DB2 or Postgres destination")
destination = {"name": name, "type": self.connectorType, "configuration": kwargs}
r = self._apiClient.post(self.allDestinationsUrl, data=destination)
if r.status_code == 201:
return Destination(**r.json())
else:
raise ApiException(r)
def update(self, key, item):
"""
Create an Item - not supported for CTIVE item
"""
raise Exception("The API doesn't support updating a destination.")
| epl-1.0 |
ibm-watson-iot/iot-python | src/wiotp/sdk/api/state/state.py | 2 | 2752 | # *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from collections import defaultdict
import iso8601
from wiotp.sdk.exceptions import ApiException
from wiotp.sdk.api.common import IterableList
from wiotp.sdk.api.common import RestApiDict
from wiotp.sdk.api.common import RestApiItemBase
from wiotp.sdk.api.common import RestApiDictReadOnly
# See docs @ https://orgid.internetofthings.ibmcloud.com/docs/v0002-beta/State-mgr-beta.html
class State(defaultdict):
def __init__(self, apiClient, url, **kwargs):
self._apiClient = apiClient
self._url = url
dict.__init__(self, **kwargs)
@property
def state(self):
return self["state"]
@property
def timestamp(self):
return iso8601.parse_date(self["timestamp"])
@property
def updated(self):
return iso8601.parse_date(self["updated"])
def __callPatchOperation__(self, body):
r = self._apiClient.patch(self._url, body)
if r.status_code == 200:
return r.json()
else:
raise Exception("Unexpected response from API (%s) = %s %s" % (self._url, r.status_code, r.text))
def reset(self):
return self.__callPatchOperation__({"operation": "reset-state"})
class States(RestApiDictReadOnly):
def __init__(self, apiClient, typeId, instanceId):
url = "api/v0002/device/types/%s/devices/%s/state" % (typeId, instanceId)
super(States, self).__init__(apiClient, State, None, url)
# TBD this method overrides the base class method to pass the state URL to the constructed state
# without this, we can't invoke reset-state api call.
def __getitem__(self, key):
url = self._singleItemUrl % (key)
r = self._apiClient.get(url)
if r.status_code == 200:
return self._castToClass(apiClient=self._apiClient, url=url, **r.json())
if r.status_code == 404:
self.__missing__(key)
else:
raise ApiException(r)
# override the standard iterator as there is no api to get all state itetrating over LIs
def __iter__(self, *args, **kwargs):
raise Exception("Unable to iterate through device state. Retrieve it for a specific LI.")
def find(self, query_params={}):
raise Exception("Unable to find device state. Retrieve it for a specific LI.")
| epl-1.0 |
ibm-watson-iot/iot-python | samples/simpleApp/simpleApp.py | 2 | 5201 | # *****************************************************************************
# Copyright (c) 2014, 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import getopt
import signal
import time
import sys
import json
try:
import wiotp.sdk
except ImportError:
# This part is only required to run the sample from within the samples
# directory when the module itself is not installed.
#
# If you have the module installed, just use "import wiotp.sdk"
import os
import inspect
cmd_subfolder = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "../../src"))
)
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import wiotp.sdk
tableRowTemplate = "%-33s%-30s%s"
def mySubscribeCallback(mid, qos):
if mid == statusMid:
print("<< Subscription established for status messages at qos %s >> " % qos[0])
elif mid == eventsMid:
print("<< Subscription established for event messages at qos %s >> " % qos[0])
def myEventCallback(event):
print("%-33s%-30s%s" % (event.timestamp.isoformat(), event.device, event.eventId + ": " + json.dumps(event.data)))
def myStatusCallback(status):
if status.action == "Disconnect":
summaryText = "%s %s (%s)" % (status.action, status.clientAddr, status.reason)
else:
summaryText = "%s %s" % (status.action, status.clientAddr)
print(tableRowTemplate % (status.time.isoformat(), status.device, summaryText))
def interruptHandler(signal, frame):
client.disconnect()
sys.exit(0)
def usage():
print(
"simpleApp: Basic application connected to the Watson Internet of Things Platform."
+ "\n"
+ "\n"
+ "Options: "
+ "\n"
+ " -h, --help Display help information"
+ "\n"
+ " -o, --organization Connect to the specified organization"
+ "\n"
+ " -i, --id Application identifier (must be unique within the organization)"
+ "\n"
+ " -k, --key API key"
+ "\n"
+ " -t, --token Authentication token for the API key specified"
+ "\n"
+ " -c, --config Load application configuration file (ignore -o, -i, -k, -t options)"
+ "\n"
+ " -T, --typeId Restrict subscription to events from devices of the specified type"
+ "\n"
+ " -I, --deviceId Restrict subscription to events from devices of the specified id"
+ "\n"
+ " -E, --event Restrict subscription to a specific event"
)
if __name__ == "__main__":
signal.signal(signal.SIGINT, interruptHandler)
try:
opts, args = getopt.getopt(
sys.argv[1:],
"h:o:i:k:t:c:T:I:E:",
["help", "org=", "id=", "key=", "token=", "config=", "typeId", "deviceId", "event"],
)
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
configFilePath = None
typeId = "+"
deviceId = "+"
event = "+"
for o, a in opts:
if o in ("-c", "--cfg"):
configFilePath = a
elif o in ("-T", "--typeId"):
typeId = a
elif o in ("-I", "--deviceid"):
deviceId = a
elif o in ("-E", "--event"):
event = a
elif o in ("-h", "--help"):
usage()
sys.exit()
else:
assert False, "unhandled option" + o
client = None
if configFilePath is not None:
options = wiotp.sdk.application.parseConfigFile(configFilePath)
else:
options = wiotp.sdk.application.parseEnvVars()
try:
client = wiotp.sdk.application.ApplicationClient(options)
# If you want to see more detail about what's going on, set log level to DEBUG
# import logging
# client.logger.setLevel(logging.DEBUG)
client.connect()
except wiotp.sdk.ConfigurationException as e:
print(str(e))
sys.exit()
except wiotp.sdk.UnsupportedAuthenticationMethod as e:
print(str(e))
sys.exit()
except wiotp.sdk.ConnectionException as e:
print(str(e))
sys.exit()
print("(Press Ctrl+C to disconnect)")
client.deviceEventCallback = myEventCallback
client.deviceStatusCallback = myStatusCallback
client.subscriptionCallback = mySubscribeCallback
statusMid = client.subscribeToDeviceStatus(typeId, deviceId)
eventsMid = client.subscribeToDeviceEvents(typeId, deviceId, event)
print("=============================================================================")
print(tableRowTemplate % ("Timestamp", "Device", "Event"))
print("=============================================================================")
while True:
time.sleep(1)
| epl-1.0 |
ibm-watson-iot/iot-python | src/wiotp/sdk/api/usage/__init__.py | 2 | 2241 | # *****************************************************************************
# Copyright (c) 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from datetime import datetime
from collections import defaultdict
from wiotp.sdk.exceptions import ApiException
class DataTransferSummary(defaultdict):
def __init__(self, **kwargs):
daysAsObj = []
if "days" in kwargs and kwargs["days"] is not None:
for day in kwargs["days"]:
daysAsObj.append(DayDataTransfer(**day))
del kwargs["days"]
dict.__init__(self, days=daysAsObj, **kwargs)
@property
def start(self):
return datetime.strptime(self["start"], "%Y-%m-%d").date()
@property
def end(self):
return datetime.strptime(self["end"], "%Y-%m-%d").date()
@property
def average(self):
return self["average"]
@property
def total(self):
return self["total"]
@property
def days(self):
return self["days"]
class DayDataTransfer(defaultdict):
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@property
def date(self):
return datetime.strptime(self["date"], "%Y-%m-%d").date()
@property
def total(self):
return self["total"]
class Usage:
def __init__(self, apiClient):
self._apiClient = apiClient
def dataTransfer(self, start, end, detail=False):
"""
Retrieve the organization-specific status of each of the services offered by the IBM Watson IoT Platform.
In case of failure it throws APIException
"""
r = self._apiClient.get(
"api/v0002/usage/data-traffic?start=%s&end=%s&detail=%s"
% (start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d"), detail)
)
if r.status_code == 200:
return DataTransferSummary(**r.json())
else:
raise ApiException(r)
| epl-1.0 |
ibm-watson-iot/iot-python | test/test_device_mgd.py | 2 | 11230 | # *****************************************************************************
# Copyright (c) 2016,2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import pytest
import testUtils
import uuid
import os
import wiotp.sdk
import time
from wiotp.sdk.device import ManagedDeviceClient
from wiotp.sdk import Utf8Codec
class TestDeviceMgd(testUtils.AbstractTest):
def testManagedDeviceQSException(self):
with pytest.raises(wiotp.sdk.ConfigurationException) as e:
options = {"identity": {"orgId": "quickstart", "typeId": "xxx", "deviceId": "xxx"}}
wiotp.sdk.device.ManagedDeviceClient(options)
assert "QuickStart does not support device management" == e.value.reason
def testManagedDeviceConnectException(self, device):
badOptions = {
"identity": {"orgId": self.ORG_ID, "typeId": device.typeId, "deviceId": device.deviceId},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
deviceInfoObj = wiotp.sdk.device.DeviceInfo()
managedDevice = wiotp.sdk.device.ManagedDeviceClient(badOptions, deviceInfo=deviceInfoObj)
assert isinstance(managedDevice, wiotp.sdk.device.ManagedDeviceClient)
with pytest.raises(wiotp.sdk.ConnectionException) as e:
managedDevice.connect()
assert managedDevice.isConnected() == False
def testManagedDeviceConnect(self, device):
badOptions = {
"identity": {"orgId": self.ORG_ID, "typeId": device.typeId, "deviceId": device.deviceId},
"auth": {"token": device.authToken},
}
deviceInfoObj = wiotp.sdk.device.DeviceInfo()
managedDevice = wiotp.sdk.device.ManagedDeviceClient(badOptions, deviceInfo=deviceInfoObj)
assert isinstance(managedDevice, wiotp.sdk.device.ManagedDeviceClient)
managedDevice.connect()
assert managedDevice.isConnected() == True
managedDevice.disconnect()
assert managedDevice.isConnected() == False
def testManagedDeviceSetPropertyNameNone(self):
with pytest.raises(Exception) as e:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
managedDeviceClientValue.setProperty(value=1)
assert "Unsupported property name: " in str(e.value)
def testManagedDeviceSetPropertyValue(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
testName = "model"
testValue = 2
test = managedDeviceClientValue.setProperty(name=testName, value=testValue)
assert managedDeviceClientValue._deviceInfo[testName] == testValue
except:
assert False == True
# TO DO Rest of SetProperty and Notifyfieldchange (onSubscribe put variables)
# Code in comments hangs when running but improves percentage
# Look into later
# def testManagedDeviceManageOnSubscribe(self):
# try:
# config = {
# "identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
# "auth": {"token": "xxxxxxxxxxxxxxxxxx"},
# }
# managedDeviceClientValue = ManagedDeviceClient(config)
# test = managedDeviceClientValue._onSubscribe(mqttc=1, userdata=2, mid=3, granted_qos=4)
# assert True
# except:
# assert False == True
def testManagedDeviceManageLifetimeValueZero(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.manage(lifetime=3000)
assert True
except:
assert False == True
def testManagedDeviceUnManage(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.unmanage()
assert True
except:
assert False == True
def testManagedDeviceSetLocationLongitude(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setLocation(longitude=1, latitude=2)
assert managedDeviceClientValue._location["longitude"] == 1
except:
assert False == True
def testManagedDeviceSetLocationLatitude(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setLocation(longitude=1, latitude=2)
assert managedDeviceClientValue._location["latitude"] == 2
except:
assert False == True
def testManagedDeviceSetLocationElevation(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setLocation(longitude=1, latitude=2, elevation=3)
assert managedDeviceClientValue._location["elevation"] == 3
except:
assert False == True
def testManagedDeviceSetLocationAccuracy(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setLocation(longitude=1, latitude=2, elevation=3, accuracy=4)
assert managedDeviceClientValue._location["accuracy"] == 4
except:
assert False == True
def testManagedDeviceSetErrorCodeNone(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setErrorCode(errorCode=None)
assert managedDeviceClientValue._errorCode == 0
except:
assert False == True
def testManagedDeviceSetErrorCode(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setErrorCode(errorCode=15)
assert True
except:
assert False == True
def testManagedDeviceClearErrorCodes(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.clearErrorCodes()
assert managedDeviceClientValue._errorCode == None
except:
assert False == True
def testManagedDeviceAddLog(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.addLog(msg="h", data="e")
assert True
except:
assert False == True
def testManagedDeviceClearLog(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.clearLog()
assert True
except:
assert False == True
def testManagedDeviceRespondDeviceAction(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.respondDeviceAction(reqId=1)
assert True
except:
assert False == True
# Do line 337 - 571
def testManagedDeviceSetState(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setState(status=1)
assert True
except:
assert False == True
def testManagedDeviceSetUpdateStatus(self):
try:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDeviceClientValue = ManagedDeviceClient(config)
test = managedDeviceClientValue.setUpdateStatus(status=1)
except:
assert False == True
# Use template for rest __functions
def testManagedDeviceMgmtResponseError(self):
with pytest.raises(Exception) as e:
config = {
"identity": {"orgId": "1", "typeId": "xxx", "deviceId": "xxx"},
"auth": {"token": "xxxxxxxxxxxxxxxxxx"},
}
managedDevice = ManagedDeviceClient(config)
testValue = "Test"
encodedPayload = Utf8Codec.encode(testValue)
managedDevice._ManagedDeviceClient__onDeviceMgmtResponse(client=1, userdata=2, pahoMessage=encodedPayload)
assert "Unable to parse JSON. payload=" " error" in str(e.value)
| epl-1.0 |
mbj4668/pyang | pyang/repository.py | 1 | 5853 | """A repository for searching and holding loaded pyang modules"""
import os
import sys
import io
from . import util
from . import syntax
class Repository(object):
"""Abstract base class that represents a module repository"""
def get_modules_and_revisions(self, ctx):
"""Return a list of all modules and their revisons
Returns a tuple (`modulename`, `revision`, `handle`), where
`handle' is used in the call to get_module_from_handle() to
retrieve the module.
"""
def get_module_from_handle(self, handle):
"""Return the raw module text from the repository
Returns (`ref`, `in_format`, `text`) if found, or None if not found.
`ref` is a string which is used to identify the source of
the text for the user. used in error messages
`in_format` is one of 'yang' or 'yin' or None.
`text` is the raw text data
Raises `ReadError`
"""
class ReadError(Exception):
"""Signals that an error occured during module retrieval"""
class FileRepository(Repository):
def __init__(self, path="", use_env=True, no_path_recurse=False,
verbose=False):
"""Create a Repository which searches the filesystem for modules
`path` is a `os.pathsep`-separated string of directories
"""
Repository.__init__(self)
self.dirs = []
self.no_path_recurse = no_path_recurse
self.modules = None
self.verbose = verbose
for directory in path.split(os.pathsep):
self._add_directory(directory)
while use_env:
use_env = False
modpath = os.getenv('YANG_MODPATH')
if modpath is not None:
for directory in modpath.split(os.pathsep):
self._add_directory(directory)
home = os.getenv('HOME')
if home is not None:
self._add_directory(os.path.join(home, 'yang', 'modules'))
inst = os.getenv('YANG_INSTALL')
if inst is not None:
self._add_directory(os.path.join(inst, 'yang', 'modules'))
break # skip search if install location is indicated
default_install = os.path.join(
sys.prefix, 'share', 'yang', 'modules')
if os.path.exists(default_install):
self._add_directory(default_install)
break # end search if default location exists
# for some systems, sys.prefix returns `/usr`
# but the real location is `/usr/local`
# if the package is installed with pip
# this information can be easily retrieved
import pkgutil
if not pkgutil.find_loader('pip'):
break # abort search if pip is not installed
# hack below to handle pip 10 internals
# if someone knows pip and how to fix this, it would be great!
location = None
try:
import pip.locations as locations
location = locations.distutils_scheme('pyang')
except:
try:
import pip._internal.locations as locations
location = locations.distutils_scheme('pyang')
except:
pass
if location is not None:
self._add_directory(
os.path.join(location['data'], 'share', 'yang', 'modules'))
if verbose:
sys.stderr.write('# module search path: %s\n'
% os.pathsep.join(self.dirs))
def _add_directory(self, directory):
if (not directory
or directory in self.dirs
or not os.path.isdir(directory)):
return False
self.dirs.append(directory)
return True
def _setup(self, ctx):
# check all dirs for yang and yin files
self.modules = []
def add_files_from_dir(d):
try:
files = os.listdir(d)
except OSError:
files = []
for fname in files:
absfilename = os.path.join(d, fname)
if os.path.isfile(absfilename):
m = syntax.re_filename.search(fname)
if m is not None:
name, rev, in_format = m.groups()
if not os.access(absfilename, os.R_OK):
continue
if absfilename.startswith("./"):
absfilename = absfilename[2:]
handle = in_format, absfilename
self.modules.append((name, rev, handle))
elif (not self.no_path_recurse
and d != '.' and os.path.isdir(absfilename)):
add_files_from_dir(absfilename)
for d in self.dirs:
add_files_from_dir(d)
def get_modules_and_revisions(self, ctx):
if self.modules is None:
self._setup(ctx)
return self.modules
def get_module_from_handle(self, handle):
in_format, absfilename = handle
fd = None
try:
fd = io.open(absfilename, "r", encoding="utf-8")
text = fd.read()
if self.verbose:
util.report_file_read(absfilename)
except IOError as ex:
raise self.ReadError("%s: %s" % (absfilename, ex))
except UnicodeDecodeError as ex:
s = str(ex).replace('utf-8', 'utf8')
raise self.ReadError("%s: unicode error: %s" % (absfilename, s))
finally:
if fd is not None:
fd.close()
if in_format is None:
in_format = util.guess_format(text)
return absfilename, in_format, text
| isc |
mbj4668/pyang | test/test_issues/test_i225/test_prefix_deviation.py | 1 | 3107 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=redefined-outer-name
"""
tests for PYANG data files
"""
import os
import sys
# hack to handle pip 10 internals
try:
import pip.locations as locations
except ImportError:
import pip._internal.locations as locations
from pyang.context import Context
from pyang.repository import FileRepository
EXISTING_MODULE = 'ietf-yang-types'
DEFAULT_OPTIONS = {
'format': 'yang',
'verbose': True,
'list_errors': True,
'print_error_code': True,
'yang_remove_unused_imports': True,
'yang_canonical': True,
'trim_yin': False,
'keep_comments': True,
'features': [],
'deviations': [],
'path': []
}
"""Default options for pyang command line"""
class objectify(object):
"""Utility for providing object access syntax (.attr) to dicts"""
def __init__(self, *args, **kwargs):
for entry in args:
self.__dict__.update(entry)
self.__dict__.update(kwargs)
def __getattr__(self, _):
return None
def __setattr__(self, attr, value):
self.__dict__[attr] = value
def create_context(path='.', *options, **kwargs):
"""Generates a pyang context
Arguments:
path (str): location of YANG modules.
*options: list of dicts, with options to be passed to context.
**kwargs: similar to ``options`` but have a higher precedence.
Returns:
pyang.Context: Context object for ``pyang`` usage
"""
opts = objectify(DEFAULT_OPTIONS, *options, **kwargs)
repo = FileRepository(path, no_path_recurse=opts.no_path_recurse)
ctx = Context(repo)
ctx.opts = opts
return ctx
def test_can_find_modules_with_pip_install():
"""
context should find the default installed modules even when pyang
is installed using pip
"""
# remove obfuscation from env vars
if os.environ.get('YANG_INSTALL'):
del os.environ['YANG_INSTALL']
if os.environ.get('YANG_MODPATH'):
del os.environ['YANG_MODPATH']
ctx = create_context()
module = ctx.search_module(None, EXISTING_MODULE)
assert module is not None
def test_can_find_modules_when_prefix_differ(monkeypatch):
"""
context should find the default installed modules, without the help
of environment variables, even of the pip install location
differs from ``sys.prefix``
"""
# store pip location.
# monkeypatching sys.prefix will side_effect scheme.
try:
scheme = locations.distutils_scheme('pyang')
monkeypatch.setattr(
locations, 'distutils_scheme', lambda *_: scheme)
except:
print("cannot get scheme from pip, skipping")
return
# simulate #225 description
monkeypatch.setattr(sys, 'prefix', '/usr')
# remove obfuscation from env vars
if os.environ.get('YANG_INSTALL'):
del os.environ['YANG_INSTALL']
if os.environ.get('YANG_MODPATH'):
del os.environ['YANG_MODPATH']
ctx = create_context()
module = ctx.search_module(None, EXISTING_MODULE)
assert module is not None
| isc |
mbj4668/pyang | pyang/plugins/omni.py | 1 | 11901 |
import optparse
from pyang import plugin
paths_in_module = []
leafrefs = []
key = ''
class_keywords = ["container", "list", "case", "choice", "augment"]
servicepoints = ["servicepoint", "productpoint"]
classnamecolor = " {0.113725, 0.352941, 0.670588}"
mandatoryconfig = " {0.600000, 0.152941, 0.152941}"
optionalconfig = " {0.129412, 0.501961, 0.254902}"
notconfig = " {0.549020, 0.486275, 0.133333}"
#which line for containment, omnigraffles makes some bezier, override this
containsline = " tail type: \"FilledDiamond\", head type: \"None\", line type: \"Straight\" "
leafrefline = " line type: \"Straight\", head type: \"FilledArrow\" "
def pyang_plugin_init():
plugin.register_plugin(OmniPlugin())
class OmniPlugin(plugin.PyangPlugin):
def add_output_format(self, fmts):
self.multiple_modules = True
fmts['omni'] = self
def add_opts(self, optparser):
optlist = [
optparse.make_option("--omni-path",
dest="omni_tree_path",
help="Subtree to print"),
]
g = optparser.add_option_group("OmniGraffle output specific options")
g.add_options(optlist)
def setup_fmt(self, ctx):
ctx.implicit_errors = False
def emit(self, ctx, modules, fd):
if ctx.opts.omni_tree_path is not None:
path = ctx.opts.omni_tree_path.split('/')
if path[0] == '':
path = path[1:]
else:
path = None
print_omni_header(modules, fd, path, ctx)
emit_modules(modules, fd, path, ctx)
post_process(fd, ctx)
print_omni_footer(modules, fd, path, ctx)
def print_omni_header(modules, fd, path, ctx):
# Build doc name from module names
name = ''
for m in modules:
name += m.arg
name = name[:32]
fd.write("""
tell application id "com.omnigroup.OmniGraffle6"
activate
make new document with properties {name:\"%s\"}
set bounds of window 1 to {50, 50, 1200, 800}
tell first canvas of document \"%s\"
set canvasSize to {600, 600}
set name to \"YANG Model\"
set adjusts pages to true
make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {32.000000, 20.000000}, text: {size: 8, alignment: center, font: "HelveticaNeue", text: "leafref"}, origin: {2403.202333, 169.219094}}
make new line at end of graphics with properties {point list: {{2513.245592418806, 185.5962102698529}, {2373.745592418806, 185.3149602698529}}, draws shadow: false, head type: "FilledArrow"}
make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {105.000000, 20.000000}, text: {size: 8, alignment: center, font: "HelveticaNeue", text: "Schema tree, containment"}, origin: {2397.741930, 138.863190}}
make new line at end of graphics with properties {point list: {{2374.993645107464, 154.4881903780727}, {2514.493645107464, 154.4881903780727}}, draws shadow: false, tail type: "FilledDiamond"}
make new shape at end of graphics with properties {autosizing: vertically only, size: {139.500000, 14.000000}, text: {alignment: center, font: "Helvetica-Bold", text: "Legend"}, text placement: top, origin: {2366.929155, 43.937008}, vertical padding: 0}
make new shape at end of graphics with properties {autosizing: vertically only, size: {139.500000, 56.000000}, text: {{color: {0.600000, 0.152941, 0.152941}, text: "Mandatory config
"}, {color: {0.129412, 0.501961, 0.254902}, text: "Optional config
"}, {color: {0.129412, 0.501961, 0.254902}, text: "Key leaf", underlined: true}, {color: {0.129412, 0.501961, 0.254902}, text: "
"}, {color: {0.549020, 0.486275, 0.133333}, text: "Not config"}}, text placement: top, origin: {2366.929155, 57.937008}, vertical padding: 0}
assemble graphics -2 through -1 table shape { 2, 1 }
assemble graphics -5 through -1
""" %(name, name))
def post_process(fd, ctx):
for s in leafrefs:
# dont try to connect to class not given as input to pyang
if s.strip().split(" to ")[1].split(" with ")[0] in paths_in_module:
fd.write(s)
def print_omni_footer(modules, fd, path, ctx):
fd.write("""
layout
end tell
end tell
""")
def print_module_info(module, fd, ctx):
title = module.arg
print_text(title, fd, ctx)
def emit_modules(modules, fd, path, ctx):
for module in modules:
print_module_info(module, fd, ctx)
chs = [ch for ch in module.i_children]
if path is not None and len(path) > 0:
chs = [ch for ch in chs
if ch.arg == path[0]]
path = path[1:]
# TEST
for ch in chs:
print_node(module, ch, module, fd, path, ctx, 'true')
for augment in module.search('augment'):
print_node(module, augment, module, fd, path, ctx, 'true')
def iterate_children(parent, s, module, fd, path, ctx):
if hasattr(s, 'i_children'):
for ch in s.i_children:
print_node(s, ch, module, fd, path, ctx)
def print_class_header(s, fd, ctx, root='false'):
global servicepoints
service = ""
for sub in s.substmts:
if sub.keyword[1] in servicepoints:
service = "SERVICE\n"
fd.write("make new shape at end of graphics with properties {autosizing: full, size: {187.500000, 14.000000}, text: {{alignment: center, font: \"Helvetica-Bold\", text: \"%s \"}, {alignment: center, color:%s, font: \"Helvetica-Bold\", text: \"%s \"}}, text placement: top, origin: {150.000000, 11.500000}, vertical padding: 0}\n" %(service + s.keyword, classnamecolor, s.arg))
def print_class_stuff(s, fd, ctx):
number = print_attributes(s, fd, ctx)
#print_actions(s,fd, ctx)
close_class(number, s, fd, ctx)
print_associations(s,fd, ctx)
def print_attributes(s,fd, ctx):
global key
if s.keyword == 'list':
keystring = s.search_one('key')
if keystring is not None:
key = keystring.arg.split(" ")
else:
key = ''
if hasattr(s, 'i_children'):
found_attrs = False
found_actions = False
index = False
# Search attrs
for ch in s.i_children:
index = False
if ch.keyword in ["leaf", "leaf-list"]:
if not found_attrs:
# first attr in attr section
fd.write("make new shape at end of graphics with properties {autosizing:full, size:{187.5, 28.0}, text:{")
found_attrs = True
else:
# comma before new textitem
fd.write(", ")
if ch.keyword == "leaf-list":
append = "[]"
else:
append = ""
if ch.arg in key:
index = True
print_leaf(ch, append, index, fd, ctx)
if found_attrs:
# close attr section
fd.write("}, text placement:top, origin:{150.0, 25.5}, vertical padding:0}\n")
# Search actions
for ch in s.i_children:
if ch.keyword == ('tailf-common', 'action'):
if not found_actions:
fd.write("make new shape at end of graphics with properties {autosizing:full, size:{187.5, 28.0}, text:{text:\"")
found_actions = True
print_action(ch, fd, ctx)
if found_actions:
fd.write("\"}, text placement:top, origin:{150.0, 25.5}, vertical padding:0}\n")
# return number of sections in class
return (found_attrs + found_actions) + 1
def close_class(number, s, fd, ctx):
fd.write("local %s\n" % fullpath(s))
fd.write("set %s to assemble ( graphics -%s through -1 ) table shape {%s, 1}\n"
% (fullpath(s), number, number))
def print_node(parent, s, module, fd, path, ctx, root='false'):
# We have a class
if s.keyword in class_keywords:
print_class_header(s, fd, ctx, root)
paths_in_module.append(fullpath(s))
print_class_stuff(s, fd, ctx)
# Do not try to create relationship to module
if parent != module:
presence = s.search_one("presence")
if presence is not None:
print_aggregation(parent, s, fd, "0", "1", ctx)
else:
print_aggregation(parent, s, fd, "1", "1", ctx)
iterate_children(parent, s, module, fd, path, ctx)
def print_associations(s, fd, ctx):
# find leafrefs and identityrefs
if hasattr(s, 'i_children'):
for ch in s.i_children:
if hasattr(ch, 'i_leafref_ptr') and (ch.i_leafref_ptr is not None):
to = ch.i_leafref_ptr[0]
print_association(s, to.parent, ch, to, "leafref", fd, ctx)
def print_aggregation(parent, this, fd, lower, upper, ctx):
fd.write("connect %s to %s with properties {%s} \n" %(fullpath(parent),fullpath(this), containsline))
def print_rpc(rpc, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s\' " %(fullpath(rpc), rpc.arg))
def print_action(action, fd, ctx, root='false'):
fd.write("%s()\n" %action.arg)
def print_notification(notification, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s\' " %(fullpath(notification), notification.arg))
def print_inout(parent, s, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s-%s\' " %(fullpath(s), parent.arg, s.keyword))
def print_leaf(leaf, append, index, fd, ctx):
if leaf.i_config:
c = '(rw)'
color = optionalconfig
else:
c = '(ro)'
color = notconfig
m = leaf.search_one('mandatory')
if m is None or m.arg == 'false':
mand = '?'
else:
mand = ''
color = mandatoryconfig
if not index:
fd.write("{font: \"Helvetica-Oblique\", color: %s, text: \"%s%s%s %s %s\n\"}"
% (color, leaf.arg, append, mand, c, get_typename(leaf)))
else:
fd.write("{font: \"Helvetica-Oblique\", color: %s, underlined: true, text: \"%s%s%s %s %s\n\"}"
% (color, leaf.arg, append, mand, c, get_typename(leaf)))
def print_association(fromclass, toclass, fromleaf, toleaf, association, fd, ctx):
leafrefs.append("connect " + (fullpath(fromclass)) + " to " + fullpath(toclass) + " with properties {" + leafrefline + "}\n", )
def print_text(t, fd, ctx):
fd.write("make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {57.000000, 30.000000}, text: {size: 16, alignment: center, font: \"HelveticaNeue\", text: \"%s\"}, origin: {100, 4.500000}}\n" %t)
def get_typename(s):
t = s.search_one('type')
if t is not None:
s = t.arg
# if t.arg == 'enumeration':
# s = s + ' : {'
# for enums in t.substmts[:10]:
# s = s + enums.arg + ','
# if len(t.substmts) > 3:
# s = s + "..."
# s = s + '}'
# elif t.arg == 'leafref':
# s = s + ' : '
# p = t.search_one('path')
# if p is not None:
# s = s + p.arg
return s
def fullpath(stmt):
pathsep = "_"
path = stmt.arg
# for augment paths we need to remove initial /
if path.startswith("/"):
path = path[1:]
else:
if stmt.keyword == 'case':
path = path + '-case'
elif stmt.keyword == 'grouping':
path = path + '-grouping'
while stmt.parent is not None:
stmt = stmt.parent
if stmt.arg is not None:
path = stmt.arg + pathsep + path
path = path.replace('-', '_')
path = path.replace(':', '_')
path = path.replace('/', '_')
return path
| isc |
mbj4668/pyang | pyang/yacc.py | 1 | 137902 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2019
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Latest version: https://github.com/dabeaz/ply
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammar is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
from __future__ import absolute_import # mbj: handle 'types' name collision
import re
import types
import sys
import os.path
import inspect
import warnings
__version__ = '3.11'
__tabversion__ = '3.10'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = True # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = False # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
# String type-checking compatibility
if sys.version_info[0] < 3:
string_types = basestring # noqa: pyflakes on py3 doesn't know this
else:
string_types = str
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
# Panic mode error recovery support. This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition
_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def errok():
warnings.warn(_warnmsg)
return _errok()
def restart():
warnings.warn(_warnmsg)
return _restart()
def token():
warnings.warn(_warnmsg)
return _token()
# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def set_lexpos(self, n, lexpos):
self.slice[n].lexpos = lexpos
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = self.Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return 'MiniProduction(%s)' % self.str
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from rule being reduced (p)
rprec, rlevel = Productions[p.number].prec
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from the rule that could have been reduced
rprec, rlevel = Productions[st_actionp[a].number].prec
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
parts = []
try:
if self.start:
parts.append(self.start)
if self.prec:
parts.append(''.join([''.join(p) for p in self.prec]))
if self.tokens:
parts.append(' '.join(self.tokens))
for f in self.pfuncs:
if f[3]:
parts.append(f[3])
except (TypeError, ValueError):
pass
return ''.join(parts)
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
continue
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = sorted(tokens)
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ or __package__ attributes are available, try to obtain them
# from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
if '__package__' not in pdict and '__module__' in pdict:
if hasattr(sys.modules[pdict['__module__']], '__package__'):
pdict['__package__'] = sys.modules[pdict['__module__']].__package__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
if tabmodule in sys.modules:
del sys.modules[tabmodule]
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
| isc |
rdegges/django-twilio | django_twilio/models.py | 1 | 1827 | # -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from phonenumber_field.modelfields import PhoneNumberField
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class Caller(models.Model):
"""
A caller is defined uniquely by their phone number.
:param bool blacklisted: Designates whether the caller can use our
services.
:param char phone_number: Unique phone number in `E.164
<http://en.wikipedia.org/wiki/E.164>`_ format.
"""
blacklisted = models.BooleanField(default=False)
phone_number = PhoneNumberField(unique=True)
def __str__(self):
return '{phone_number}{blacklist_status}'.format(
phone_number=str(self.phone_number),
blacklist_status=' (blacklisted)' if self.blacklisted else '',
)
class Meta:
app_label = 'django_twilio'
class Credential(models.Model):
"""
A Credential model is a set of SID / AUTH tokens for the Twilio.com API
The Credential model can be used if a project uses more than one
Twilio account, or provides Users with access to Twilio powered
web apps that need their own custom credentials.
:param char name: The name used to distinguish this credential
:param char account_sid: The Twilio account_sid
:param char auth_token: The Twilio auth_token
:param key user: The user linked to this Credential
"""
def __str__(self):
return '{name} - {sid}'.format(name=self.name, sid=self.account_sid)
name = models.CharField(max_length=30)
user = models.OneToOneField(AUTH_USER_MODEL, on_delete=models.CASCADE)
account_sid = models.CharField(max_length=34)
auth_token = models.CharField(max_length=32)
class Meta:
app_label = 'django_twilio'
| unlicense |
rdegges/django-twilio | test_project/test_app/models.py | 1 | 1869 | # -*- coding: utf-8 -*-
from types import MethodType
from django.test import TestCase
from django.contrib.auth.models import User
from django_dynamic_fixture import G
from django_twilio.models import Caller, Credential
class CallerTestCase(TestCase):
"""
Run tests against the :class:`django_twilio.models.Caller` model.
"""
def setUp(self):
self.caller = G(
Caller,
phone_number='+15005550000',
blacklisted=False,
)
def test_has_str_method(self):
self.assertIsInstance(self.caller.__str__, MethodType)
def test_str_returns_a_string(self):
self.assertIsInstance(self.caller.__str__(), str)
def test_str_doesnt_contain_blacklisted(self):
self.assertNotIn('blacklisted', self.caller.__str__())
def test_unicode_contains_blacklisted(self):
self.caller.blacklisted = True
self.caller.save()
self.assertIn('blacklisted', self.caller.__str__())
class CredentialTests(TestCase):
def setUp(self):
self.user = G(User, username='test', password='pass')
self.credentials = G(
Credential,
name='Test Credentials',
account_sid='XXX',
auth_token='YYY',
user=self.user,
)
def test_str(self):
"""
Assert that str renders how we'd like it too
"""
self.assertEqual(
self.credentials.__str__(),
'Test Credentials - XXX',
)
def test_credentials_fields(self):
"""
Assert the fields are working correctly
"""
self.assertEqual(self.credentials.name, 'Test Credentials')
self.assertEqual(self.credentials.account_sid, 'XXX')
self.assertEqual(self.credentials.auth_token, 'YYY')
self.assertEqual(self.credentials.user, self.user)
| unlicense |
mozilla-services/buildhub | jobs/buildhub/s3_inventory_to_kinto.py | 1 | 12262 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import re
import asyncio
import datetime
import glob
import json
import logging
import os
import pkgutil
import tempfile
import time
import zlib
from concurrent.futures import ThreadPoolExecutor
import aiofiles
import aiobotocore
import botocore
from decouple import config, Csv
from aiohttp.client_exceptions import ClientPayloadError
import kinto_http
import raven
from raven.handlers.logging import SentryHandler
from ruamel import yaml
from kinto_wizard.async_kinto import AsyncKintoClient
from kinto_wizard.yaml2kinto import initialize_server
from buildhub.inventory_to_records import (
__version__,
NB_RETRY_REQUEST,
csv_to_records,
)
from buildhub.to_kinto import fetch_existing, main as to_kinto_main
from buildhub.configure_markus import get_metrics
REGION_NAME = 'us-east-1'
BUCKET = 'net-mozaws-prod-delivery-inventory-us-east-1'
FOLDER = (
'public/inventories/net-mozaws-prod-delivery-{inventory}/'
'delivery-{inventory}/'
)
CHUNK_SIZE = 1024 * 256 # 256 KB
MAX_CSV_DOWNLOAD_AGE = 60 * 60 * 24 * 2 # two days
INITIALIZE_SERVER = config('INITIALIZE_SERVER', default=True, cast=bool)
# Minimum number of hours old an entry in the CSV files need to be
# to NOT be skipped.
MIN_AGE_LAST_MODIFIED_HOURS = config(
'MIN_AGE_LAST_MODIFIED_HOURS', default=0, cast=int
)
CSV_DOWNLOAD_DIRECTORY = config(
'CSV_DOWNLOAD_DIRECTORY',
default=tempfile.gettempdir()
)
INVENTORIES = tuple(config(
'INVENTORIES',
default='firefox, archive',
cast=Csv()
))
LOG_LEVEL = config('LOG_LEVEL', default='INFO')
STORE_DAILY_MANIFEST = config('STORE_DAILY_MANIFEST', default=False, cast=bool)
# Optional Sentry with synchronuous client.
SENTRY_DSN = config('SENTRY_DSN', default=None)
sentry = raven.Client(
SENTRY_DSN,
transport=raven.transport.http.HTTPTransport,
release=__version__,
)
logger = logging.getLogger() # root logger.
metrics = get_metrics('buildhub')
async def initialize_kinto(loop, kinto_client, bucket, collection):
"""
Initialize the remote server with the initialization.yml file.
"""
# Leverage kinto-wizard async client.
thread_pool = ThreadPoolExecutor()
async_client = AsyncKintoClient(kinto_client, loop, thread_pool)
initialization_manifest = pkgutil.get_data(
'buildhub',
'initialization.yml'
)
config = yaml.safe_load(initialization_manifest)
# Check that we push the records at the right place.
if bucket not in config:
raise ValueError(
f"Bucket '{bucket}' not specified in `initialization.yml`."
)
if collection not in config[bucket]['collections']:
raise ValueError(
f"Collection '{collection}' not specified in `initialization.yml`."
)
await initialize_server(async_client,
config,
bucket=bucket,
collection=collection,
force=False)
# A regular expression corresponding to the date format in use in
# delivery-firefox paths.
DATE_RE = re.compile(r'\d{4}-\d{2}-\d{2}T\d{2}-\d{2}Z')
def ends_with_date(prefix):
"""Predicate to let us inspect prefixes such as:
public/inventories/net-mozaws-prod-delivery-firefox/delivery-firefox/2017-07-01T03-09Z/
while excluding those such as:
public/inventories/net-mozaws-prod-delivery-firefox/delivery-firefox/hive/
"""
parts = prefix.strip('/').split('/')
return DATE_RE.match(parts[-1])
async def list_manifest_entries(loop, s3_client, inventory):
"""Fetch the latest S3 inventory manifest, and the keys of every
*.csv.gz file it contains.
:param loop: asyncio event loop.
:param s3_client: Initialized S3 client.
:param inventory str: Either "archive" or "firefox".
"""
if STORE_DAILY_MANIFEST:
today_utc = datetime.datetime.utcnow().strftime('%Y%m%d')
manifest_content_file_path = f'.manifest-{today_utc}.json'
if STORE_DAILY_MANIFEST and os.path.isfile(manifest_content_file_path):
logger.info(f"Using stored manifest file {manifest_content_file_path}")
with open(manifest_content_file_path) as f:
manifest_content = json.load(f)
else:
prefix = FOLDER.format(inventory=inventory)
paginator = s3_client.get_paginator('list_objects')
manifest_folders = []
async for result in paginator.paginate(
Bucket=BUCKET,
Prefix=prefix,
Delimiter='/'
):
# Take latest inventory.
files = list(result.get('CommonPrefixes', []))
prefixes = [f['Prefix'] for f in files]
manifest_folders += [
prefix for prefix in prefixes if ends_with_date(prefix)
]
# Download latest manifest.json
last_inventory = sorted(manifest_folders)[-1]
logger.info('Latest inventory is {}'.format(last_inventory))
key = last_inventory + 'manifest.json'
manifest = await s3_client.get_object(Bucket=BUCKET, Key=key)
async with manifest['Body'] as stream:
body = await stream.read()
manifest_content = json.loads(body.decode('utf-8'))
if STORE_DAILY_MANIFEST:
logger.info(
f"Writing stored manifest file {manifest_content_file_path}"
)
with open(manifest_content_file_path, 'w') as f:
json.dump(manifest_content, f, indent=3)
for f in manifest_content['files']:
# Here, each 'f' is a dictionary that looks something like this:
#
# {
# "key" : "inventories/net-mozaw...f-b1a0-5fb25bb83752.csv.gz",
# "size" : 7945521,
# "MD5checksum" : "7454b0d773000f790f15b867ee152049"
# }
#
# We yield the whole thing. The key is used to download from S3.
# The MD5checksum is used to know how to store the file on
# disk for caching.
yield f
async def download_csv(
loop,
s3_client,
files_stream,
chunk_size=CHUNK_SIZE,
download_directory=CSV_DOWNLOAD_DIRECTORY,
):
"""
Download the S3 object of each key and return deflated data chunks (CSV).
:param loop: asyncio event loop.
:param s3_client: Initialized S3 client.
:param keys_stream async generator: List of object keys for
the csv.gz manifests.
"""
# Make sure the directory exists if it wasn't already created.
if not os.path.isdir(download_directory):
os.makedirs(download_directory, exist_ok=True)
# Look for old download junk in the download directory.
too_old = MAX_CSV_DOWNLOAD_AGE
for file_path in glob.glob(os.path.join(download_directory, '*.csv.gz')):
age = time.time() - os.stat(file_path).st_mtime
if age > too_old:
logger.info(
f'Delete old download file {file_path} '
f'({age} seconds old)'
)
os.remove(file_path)
async for files in files_stream:
# If it doesn't exist on disk, download to disk.
file_path = os.path.join(
download_directory,
files['MD5checksum'] + '.csv.gz'
)
# The file neither exists or has data.
if os.path.isfile(file_path) and os.stat(file_path).st_size:
logger.debug(f'{file_path} was already downloaded locally')
else:
key = 'public/' + files['key']
logger.info('Fetching inventory piece {}'.format(key))
file_csv_gz = await s3_client.get_object(Bucket=BUCKET, Key=key)
try:
async with aiofiles.open(file_path, 'wb') as destination:
async with file_csv_gz['Body'] as source:
while 'there are chunks to read':
gzip_chunk = await source.read(chunk_size)
if not gzip_chunk:
break # End of response.
await destination.write(gzip_chunk)
size = os.stat(file_path).st_size
logger.info(f'Downloaded {key} to {file_path} ({size} bytes)')
except ClientPayloadError:
if os.path.exists(file_path):
os.remove(file_path)
raise
# Now we expect the file to exist locally. Let's read it.
gzip = zlib.decompressobj(zlib.MAX_WBITS | 16)
async with aiofiles.open(file_path, 'rb') as stream:
while 'there are chunks to read':
gzip_chunk = await stream.read(chunk_size)
if not gzip_chunk:
break # End of response.
csv_chunk = gzip.decompress(gzip_chunk)
if csv_chunk:
# If the received doesn't have enough data to complete
# at least one block, the decompressor returns an
# empty string.
# A later chunk added to the compressor will then
# complete the block, it'll be decompressed and we
# get data then.
# Thanks Martijn Pieters http://bit.ly/2vbgQ3x
yield csv_chunk
async def main(loop, inventories=INVENTORIES):
"""
Trigger to populate kinto with the last inventories.
"""
server_url = config('SERVER_URL', default='http://localhost:8888/v1')
bucket = config('BUCKET', default='build-hub')
collection = config('COLLECTION', default='releases')
kinto_auth = tuple(config('AUTH', default='user:pass').split(':'))
kinto_client = kinto_http.Client(server_url=server_url, auth=kinto_auth,
bucket=bucket, collection=collection,
retry=NB_RETRY_REQUEST)
# Create bucket/collection and schemas.
if INITIALIZE_SERVER:
await initialize_kinto(loop, kinto_client, bucket, collection)
min_last_modified = None
# Convert the simple env var integer to a datetime.datetime instance.
if MIN_AGE_LAST_MODIFIED_HOURS:
assert MIN_AGE_LAST_MODIFIED_HOURS > 0, MIN_AGE_LAST_MODIFIED_HOURS
min_last_modified = datetime.datetime.utcnow() - datetime.timedelta(
hours=MIN_AGE_LAST_MODIFIED_HOURS
)
# Make it timezone aware (to UTC)
min_last_modified = min_last_modified.replace(
tzinfo=datetime.timezone.utc
)
# Fetch all existing records as a big dict from kinto
existing = fetch_existing(kinto_client)
# Download CSVs, deduce records and push to Kinto.
session = aiobotocore.get_session(loop=loop)
boto_config = botocore.config.Config(signature_version=botocore.UNSIGNED)
async with session.create_client(
's3', region_name=REGION_NAME, config=boto_config
) as client:
for inventory in inventories:
files_stream = list_manifest_entries(loop, client, inventory)
csv_stream = download_csv(loop, client, files_stream)
records_stream = csv_to_records(
loop,
csv_stream,
skip_incomplete=True,
min_last_modified=min_last_modified,
)
await to_kinto_main(
loop,
records_stream,
kinto_client,
existing=existing,
skip_existing=False
)
@metrics.timer_decorator('s3_inventory_to_kinto_run')
def run():
# Log everything to stderr.
logger.addHandler(logging.StreamHandler())
if LOG_LEVEL.lower() == 'debug':
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# Add Sentry (no-op if no configured).
handler = SentryHandler(sentry)
handler.setLevel(logging.ERROR)
logger.addHandler(handler)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main(loop))
except Exception:
logger.exception('Aborted.')
raise
finally:
loop.close()
| mpl-2.0 |
pikepdf/pikepdf | src/pikepdf/_cpphelpers.py | 1 | 2965 | # SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
"""Support functions called by the C++ library binding layer.
Not intended to be called from Python, and subject to change at any time.
"""
from __future__ import annotations
from typing import Callable
from warnings import warn
from pikepdf import Dictionary, Name, Pdf
def update_xmp_pdfversion(pdf: Pdf, version: str) -> None:
"""Update XMP metadata to specified PDF version."""
if Name.Metadata not in pdf.Root:
return # Don't create an empty XMP object just to store the version
with pdf.open_metadata(set_pikepdf_as_editor=False, update_docinfo=False) as meta:
if 'pdf:PDFVersion' in meta:
meta['pdf:PDFVersion'] = version
def _alpha(n: int) -> str:
"""Excel-style column numbering A..Z, AA..AZ..BA..ZZ.., AAA."""
if n < 1:
raise ValueError(f"Can't represent {n} in alphabetic numbering")
p = []
while n > 0:
n, r = divmod(n - 1, 26)
p.append(r)
base = ord('A')
ords = [(base + v) for v in reversed(p)]
return ''.join(chr(o) for o in ords)
def _roman(n: int) -> str:
"""Convert integer n to Roman numeral representation as a string."""
if not (1 <= n <= 5000):
raise ValueError(f"Can't represent {n} in Roman numerals")
roman_numerals = (
(1000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
)
roman = ""
for value, numeral in roman_numerals:
while n >= value:
roman += numeral
n -= value
return roman
LABEL_STYLE_MAP: dict[Name, Callable[[int], str]] = {
Name.D: str,
Name.A: _alpha,
Name.a: lambda x: _alpha(x).lower(),
Name.R: _roman,
Name.r: lambda x: _roman(x).lower(),
}
def label_from_label_dict(label_dict: int | Dictionary) -> str:
"""Convert a label dictionary returned by QPDF into a text string."""
if isinstance(label_dict, int):
return str(label_dict)
label = ''
if Name.P in label_dict:
prefix = label_dict[Name.P]
label += str(prefix)
# If there is no S, return only the P portion
if Name.S in label_dict:
# St defaults to 1
numeric_value = label_dict[Name.St] if Name.St in label_dict else 1
if not isinstance(numeric_value, int):
warn(
"Page label dictionary has invalid non-integer start value", UserWarning
)
numeric_value = 1
style = label_dict[Name.S]
if isinstance(style, Name):
style_fn = LABEL_STYLE_MAP[style]
value = style_fn(numeric_value)
label += value
else:
warn("Page label dictionary has invalid page label style", UserWarning)
return label
| mpl-2.0 |
pikepdf/pikepdf | tests/test_image_access.py | 1 | 35498 | # SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: CC0-1.0
from __future__ import annotations
import subprocess
import zlib
from contextlib import contextmanager
from io import BytesIO
from math import ceil
from os import fspath
from pathlib import Path
from subprocess import run
from typing import NamedTuple, Sequence
import PIL
import pytest
from conftest import needs_python_v
from hypothesis import assume, given, note, settings
from hypothesis import strategies as st
from packaging.version import Version
from PIL import Image, ImageChops, ImageCms
from PIL import features as PIL_features
import pikepdf
from pikepdf import (
Array,
Dictionary,
Name,
Object,
Operator,
Pdf,
PdfError,
PdfImage,
PdfInlineImage,
Stream,
StreamDecodeLevel,
parse_content_stream,
)
from pikepdf.models._transcoding import _next_multiple, unpack_subbyte_pixels
from pikepdf.models.image import (
DependencyError,
NotExtractableError,
PdfJpxImage,
UnsupportedImageTypeError,
)
# pylint: disable=redefined-outer-name
def has_pdfimages():
try:
run(['pdfimages', '-v'], check=True, capture_output=True)
except FileNotFoundError:
return False
else:
return True
requires_pdfimages = pytest.mark.skipif(
not has_pdfimages(), reason="pdfimages not installed"
)
@pytest.fixture
def first_image_in(resources, request):
pdf = None
def opener(filename):
nonlocal pdf
pdf = Pdf.open(resources / filename)
pdfimagexobj = next(iter(pdf.pages[0].images.values()))
return pdfimagexobj, pdf
def closer():
if pdf:
pdf.close()
request.addfinalizer(closer)
return opener
@pytest.fixture
def congress(first_image_in):
return first_image_in('congress.pdf')
@pytest.fixture
def sandwich(first_image_in):
return first_image_in('sandwich.pdf')
@pytest.fixture
def jbig2(first_image_in):
return first_image_in('jbig2.pdf')
@pytest.fixture
def trivial(first_image_in):
return first_image_in('pal-1bit-trivial.pdf')
@pytest.fixture
def inline(resources):
with Pdf.open(resources / 'image-mono-inline.pdf') as pdf:
for operands, _command in parse_content_stream(pdf.pages[0]):
if operands and isinstance(operands[0], PdfInlineImage):
yield operands[0], pdf
break
def test_image_from_nonimage(resources):
with Pdf.open(resources / 'congress.pdf') as pdf:
contents = pdf.pages[0].Contents
with pytest.raises(TypeError):
PdfImage(contents)
def test_image(congress):
xobj, _ = congress
pdfimage = PdfImage(xobj)
pillowimage = pdfimage.as_pil_image()
assert pillowimage.mode == pdfimage.mode
assert pillowimage.size == pdfimage.size
def test_imagemask(congress):
xobj, _ = congress
assert not PdfImage(xobj).image_mask
def test_imagemask_colorspace(trivial):
xobj, _ = trivial
rawimage = xobj
rawimage.ImageMask = True
pdfimage = PdfImage(rawimage)
assert pdfimage.image_mask
assert pdfimage.colorspace is None
def test_malformed_palette(trivial):
xobj, _ = trivial
rawimage = xobj
rawimage.ColorSpace = [Name.Indexed, 'foo', 'bar']
pdfimage = PdfImage(rawimage)
with pytest.raises(ValueError, match="interpret this palette"):
pdfimage.palette # pylint: disable=pointless-statement
def test_image_eq(trivial, congress, inline):
xobj_trivial, _ = trivial
xobj_congress, _ = congress
inline_image, _ = inline
# Note: JPX equality is tested in test_jp2 (if we have a jpeg2000 codec)
assert PdfImage(xobj_trivial) == PdfImage(xobj_trivial)
assert PdfImage(xobj_trivial).__eq__(42) is NotImplemented
assert PdfImage(xobj_trivial) != PdfImage(xobj_congress)
assert inline_image != PdfImage(xobj_congress)
assert inline_image.__eq__(42) is NotImplemented
def test_image_replace(congress, outdir):
xobj, pdf = congress
pdfimage = PdfImage(xobj)
pillowimage = pdfimage.as_pil_image()
grayscale = pillowimage.convert('L')
grayscale = grayscale.resize((4, 4)) # So it is not obnoxious on error
xobj.write(zlib.compress(grayscale.tobytes()), filter=Name("/FlateDecode"))
xobj.ColorSpace = Name("/DeviceGray")
pdf.save(outdir / 'congress_gray.pdf')
def test_lowlevel_jpeg(congress):
xobj, _pdf = congress
raw_bytes = xobj.read_raw_bytes()
with pytest.raises(PdfError):
xobj.read_bytes()
im = Image.open(BytesIO(raw_bytes))
assert im.format == 'JPEG'
pim = PdfImage(xobj)
b = BytesIO()
pim.extract_to(stream=b)
b.seek(0)
im = Image.open(b)
assert im.size == (xobj.Width, xobj.Height)
assert im.mode == 'RGB'
def test_lowlevel_replace_jpeg(congress, outdir):
xobj, pdf = congress
# This test will modify the PDF so needs its own image
raw_bytes = xobj.read_raw_bytes()
im = Image.open(BytesIO(raw_bytes))
grayscale = im.convert('L')
grayscale = grayscale.resize((4, 4)) # So it is not obnoxious on error
xobj.write(zlib.compress(grayscale.tobytes()[:10]), filter=Name("/FlateDecode"))
xobj.ColorSpace = Name('/DeviceGray')
pdf.save(outdir / 'congress_gray.pdf')
def test_inline(inline):
iimage, pdf = inline
assert iimage.width == 8
assert not iimage.image_mask
assert iimage.mode == 'RGB'
assert iimage.colorspace == '/DeviceRGB'
assert 'PdfInlineImage' in repr(iimage)
unparsed = iimage.unparse()
assert b'/W 8' in unparsed, "inline images should have abbreviated metadata"
assert b'/Width 8' not in unparsed, "abbreviations expanded in inline image"
cs = pdf.make_stream(unparsed)
for operands, command in parse_content_stream(cs):
if operands and isinstance(operands[0], PdfInlineImage):
assert command == Operator('INLINE IMAGE')
reparsed_iim = operands[0]
assert reparsed_iim == iimage
def test_inline_extract(inline):
iimage, _pdf = inline
bio = BytesIO()
iimage.extract_to(stream=bio)
bio.seek(0)
im = Image.open(bio)
assert im.size == (8, 8) and im.mode == iimage.mode
def test_inline_read(inline):
iimage, _pdf = inline
assert iimage.read_bytes()[0:6] == b'\xff\xff\xff\x00\x00\x00'
def test_inline_to_pil(inline):
iimage, _pdf = inline
im = iimage.as_pil_image()
assert im.size == (8, 8) and im.mode == iimage.mode
def test_bits_per_component_missing(congress):
cong_im, _ = congress
del cong_im.stream_dict['/BitsPerComponent']
assert PdfImage(cong_im).bits_per_component == 8
class ImageSpec(NamedTuple):
bpc: int
width: int
height: int
colorspace: pikepdf.Name
imbytes: bytes
def pdf_from_image_spec(spec: ImageSpec):
pdf = pikepdf.new()
pdfw, pdfh = 36 * spec.width, 36 * spec.height
pdf.add_blank_page(page_size=(pdfw, pdfh))
imobj = Stream(
pdf,
spec.imbytes,
BitsPerComponent=spec.bpc,
ColorSpace=spec.colorspace,
Width=spec.width,
Height=spec.height,
Type=Name.XObject,
Subtype=Name.Image,
)
pdf.pages[0].Contents = Stream(pdf, b'%f 0 0 %f 0 0 cm /Im0 Do' % (pdfw, pdfh))
pdf.pages[0].Resources = Dictionary(XObject=Dictionary(Im0=imobj))
pdf.pages[0].MediaBox = Array([0, 0, pdfw, pdfh])
return pdf
@st.composite
def valid_random_image_spec(
draw,
bpcs=st.sampled_from([1, 2, 4, 8, 16]),
widths=st.integers(min_value=1, max_value=16),
heights=st.integers(min_value=1, max_value=16),
colorspaces=st.sampled_from([Name.DeviceGray, Name.DeviceRGB, Name.DeviceCMYK]),
):
bpc = draw(bpcs)
width = draw(widths)
height = draw(heights)
colorspace = draw(colorspaces)
min_imbytes = width * height * (2 if bpc == 16 else 1)
if colorspace == Name.DeviceRGB:
min_imbytes *= 3
elif colorspace == Name.DeviceCMYK:
min_imbytes *= 4
imbytes = draw(st.binary(min_size=min_imbytes, max_size=2 * min_imbytes))
return ImageSpec(bpc, width, height, colorspace, imbytes)
@given(spec=valid_random_image_spec(bpcs=st.sampled_from([1, 2, 4, 8])))
@settings(deadline=None) # For PyPy
def test_image_save_compare(tmp_path_factory, spec):
pdf = pdf_from_image_spec(spec)
image = pdf.pages[0].Resources.XObject['/Im0']
w = image.Width
h = image.Height
cs = str(image.ColorSpace)
bpc = image.BitsPerComponent
pixeldata = image.read_bytes()
assume((bpc < 8 and cs == '/DeviceGray') or (bpc == 8))
outdir = tmp_path_factory.mktemp('image_roundtrip')
outfile = outdir / f'test{w}{h}{cs[1:]}{bpc}.pdf'
pdf.save(
outfile, compress_streams=False, stream_decode_level=StreamDecodeLevel.none
)
with Pdf.open(outfile) as p2:
pim = PdfImage(p2.pages[0].Resources.XObject['/Im0'])
assert pim.bits_per_component == bpc
assert pim.colorspace == cs
assert pim.width == w
assert pim.height == h
if cs == '/DeviceRGB':
assert pim.mode == 'RGB'
elif cs == '/DeviceGray' and bpc == 8:
assert pim.mode == 'L'
elif cs == '/DeviceCMYK':
assert pim.mode == 'CMYK'
elif bpc == 1:
assert pim.mode == '1'
assert not pim.palette
assert pim.filters == []
assert pim.read_bytes() == pixeldata
outstream = BytesIO()
pim.extract_to(stream=outstream)
outstream.seek(0)
im = Image.open(outstream)
assert pim.mode == im.mode
@pytest.mark.parametrize(
'filename,bpc,filters,ext,mode,format_',
[
('sandwich.pdf', 1, ['/CCITTFaxDecode'], '.tif', '1', 'TIFF'),
('congress-gray.pdf', 8, ['/DCTDecode'], '.jpg', 'L', 'JPEG'),
('congress.pdf', 8, ['/DCTDecode'], '.jpg', 'RGB', 'JPEG'),
('cmyk-jpeg.pdf', 8, ['/DCTDecode'], '.jpg', 'CMYK', 'JPEG'),
],
)
def test_direct_extract(first_image_in, filename, bpc, filters, ext, mode, format_):
xobj, _pdf = first_image_in(filename)
pim = PdfImage(xobj)
assert pim.bits_per_component == bpc
assert pim.filters == filters
outstream = BytesIO()
outext = pim.extract_to(stream=outstream)
assert outext == ext, 'unexpected output file'
outstream.seek(0)
im = Image.open(outstream)
assert im.mode == mode
assert im.format == format_
def pack_2bit_row(row: Sequence[int]) -> bytes:
assert len(row) % 4 == 0
im76 = [s << 6 for s in row[0::4]]
im54 = [s << 4 for s in row[1::4]]
im32 = [s << 2 for s in row[2::4]]
im10 = [s << 0 for s in row[3::4]]
return bytes(sum(s) for s in zip(im76, im54, im32, im10))
def pack_4bit_row(row: Sequence[int]) -> bytes:
assert len(row) % 2 == 0
upper = [s << 4 for s in row[0::2]]
lower = row[1::2]
return bytes(sum(s) for s in zip(upper, lower))
@st.composite
def imagelike_data(draw, width, height, bpc, sample_range=None):
bits_per_byte = 8 // bpc
stride = _next_multiple(width, bits_per_byte)
if not sample_range:
sample_range = (0, 2**bpc - 1)
if bpc in (2, 4, 8):
intdata = draw(
st.lists(
st.lists(
st.integers(*sample_range),
min_size=stride,
max_size=stride,
),
min_size=height,
max_size=height,
)
)
if bpc == 8:
imbytes = b''.join(bytes(row) for row in intdata)
elif bpc == 4:
imbytes = b''.join(pack_4bit_row(row) for row in intdata)
elif bpc == 2:
imbytes = b''.join(pack_2bit_row(row) for row in intdata)
assert len(imbytes) > 0
elif bpc == 1:
imdata = draw(
st.lists(
st.integers(0, 255 if sample_range[1] > 0 else 0),
min_size=height * _next_multiple(width, 8),
max_size=height * _next_multiple(width, 8),
)
)
imbytes = bytes(imdata)
return imbytes
class PaletteImageSpec(NamedTuple):
bpc: int
width: int
height: int
hival: int
colorspace: pikepdf.Name
palette: bytes
imbytes: bytes
def pdf_from_palette_image_spec(spec: PaletteImageSpec):
pdf = pikepdf.new()
pdfw, pdfh = 36 * spec.width, 36 * spec.height
pdf.add_blank_page(page_size=(pdfw, pdfh))
imobj = Stream(
pdf,
spec.imbytes,
BitsPerComponent=spec.bpc,
ColorSpace=Array([Name.Indexed, spec.colorspace, spec.hival, spec.palette]),
Width=spec.width,
Height=spec.height,
Type=Name.XObject,
Subtype=Name.Image,
)
pdf.pages[0].Contents = Stream(pdf, b'%f 0 0 %f 0 0 cm /Im0 Do' % (pdfw, pdfh))
pdf.pages[0].Resources = Dictionary(XObject=Dictionary(Im0=imobj))
pdf.pages[0].MediaBox = Array([0, 0, pdfw, pdfh])
return pdf
@st.composite
def valid_random_palette_image_spec(
draw,
bpcs=st.sampled_from([1, 2, 4, 8]),
widths=st.integers(min_value=1, max_value=16),
heights=st.integers(min_value=1, max_value=16),
colorspaces=st.sampled_from([Name.DeviceGray, Name.DeviceRGB, Name.DeviceCMYK]),
palette=None,
):
bpc = draw(bpcs)
width = draw(widths)
height = draw(heights)
colorspace = draw(colorspaces)
hival = draw(st.integers(min_value=0, max_value=(2**bpc) - 1))
imbytes = draw(imagelike_data(width, height, bpc, (0, hival)))
channels = (
1
if colorspace == Name.DeviceGray
else 3
if colorspace == Name.DeviceRGB
else 4
if colorspace == Name.DeviceCMYK
else 0
)
if not palette:
palette = draw(
st.binary(min_size=channels * (hival + 1), max_size=channels * (hival + 1))
)
return PaletteImageSpec(bpc, width, height, hival, colorspace, palette, imbytes)
@pytest.mark.parametrize(
'filename,bpc,rgb',
[
('pal.pdf', 8, (0, 0, 255)),
('pal-1bit-trivial.pdf', 1, (255, 255, 255)),
('pal-1bit-rgb.pdf', 1, (255, 128, 0)),
],
)
def test_image_palette(resources, filename, bpc, rgb):
pdf = Pdf.open(resources / filename)
pim = PdfImage(next(iter(pdf.pages[0].images.values())))
assert pim.palette[0] == 'RGB'
assert pim.colorspace == '/DeviceRGB'
assert pim.mode == 'P'
assert pim.bits_per_component == bpc
outstream = BytesIO()
pim.extract_to(stream=outstream)
im_pal = pim.as_pil_image()
im = im_pal.convert('RGB')
assert im.getpixel((1, 1)) == rgb
@contextmanager
def first_image_from_pdfimages(pdf, tmpdir):
if not has_pdfimages():
pytest.skip("Need pdfimages for this test")
pdf.save(tmpdir / 'in.pdf')
run(
['pdfimages', '-q', '-png', fspath(tmpdir / 'in.pdf'), fspath('pdfimage')],
cwd=fspath(tmpdir),
check=True,
)
outpng = tmpdir / 'pdfimage-000.png'
assert outpng.exists()
with Image.open(outpng) as im:
yield im
@given(spec=valid_random_palette_image_spec())
def test_image_palette2(spec, tmp_path_factory):
pdf = pdf_from_palette_image_spec(spec)
pim = PdfImage(pdf.pages[0].Resources.XObject['/Im0'])
im1 = pim.as_pil_image()
with first_image_from_pdfimages(
pdf, tmp_path_factory.mktemp('test_image_palette2')
) as im2:
if pim.palette.base_colorspace == 'CMYK' and im1.size == im2.size:
return # Good enough - CMYK is hard...
if im1.mode == im2.mode:
diff = ImageChops.difference(im1, im2)
else:
diff = ImageChops.difference(im1.convert('RGB'), im2.convert('RGB'))
if diff.getbbox():
if pim.palette.base_colorspace in ('L', 'RGB', 'CMYK') and im2.mode == '1':
note("pdfimages bug - 1bit image stripped of palette")
return
assert (
not diff.getbbox()
), f"{diff.getpixel((0, 0))}, {im1.getpixel((0,0))}, {im2.getpixel((0,0))}"
def test_bool_in_inline_image():
piim = PdfInlineImage(image_data=b'', image_object=(Name.IM, True))
assert piim.image_mask
@pytest.mark.skipif(
not PIL_features.check_codec('jpg_2000'), reason='no JPEG2000 codec'
)
def test_jp2(first_image_in):
xobj, _pdf = first_image_in('pike-jp2.pdf')
pim = PdfImage(xobj)
assert isinstance(pim, PdfJpxImage)
assert '/JPXDecode' in pim.filters
assert pim.colorspace == '/DeviceRGB'
assert not pim.indexed
assert pim.mode == 'RGB'
assert pim.bits_per_component == 8
assert pim.__eq__(42) is NotImplemented
assert pim == PdfImage(xobj)
outstream = BytesIO()
pim.extract_to(stream=outstream)
del pim
del xobj.ColorSpace
# If there is no explicit ColorSpace metadata we should get it from the
# compressed data stream
pim = PdfImage(xobj)
assert pim.colorspace == '/DeviceRGB'
assert pim.bits_per_component == 8
def test_extract_filepath(congress, outdir):
xobj, _pdf = congress
pim = PdfImage(xobj)
result = pim.extract_to(fileprefix=(outdir / 'image'))
assert Path(result).exists()
assert (outdir / 'image.jpg').exists()
def test_extract_direct_fails_nondefault_colortransform(congress):
xobj, _pdf = congress
xobj.DecodeParms = Dictionary(
ColorTransform=42 # Non standard (or allowed in the spec)
)
pim = PdfImage(xobj)
bio = BytesIO()
with pytest.raises(NotExtractableError):
pim._extract_direct(stream=bio)
with pytest.raises(UnsupportedImageTypeError):
pim.extract_to(stream=bio)
xobj.ColorSpace = Name.DeviceCMYK
pim = PdfImage(xobj)
with pytest.raises(NotExtractableError):
pim._extract_direct(stream=bio)
with pytest.raises(UnsupportedImageTypeError):
pim.extract_to(stream=bio)
def test_icc_use(first_image_in):
xobj, _pdf = first_image_in('1biticc.pdf')
pim = PdfImage(xobj)
assert pim.mode == 'L' # It may be 1 bit per pixel but it's more complex than that
assert pim.colorspace == '/ICCBased'
assert pim.bits_per_component == 1
assert pim.icc.profile.xcolor_space == 'GRAY'
def test_icc_extract(first_image_in):
xobj, _pdf = first_image_in('aquamarine-cie.pdf')
pim = PdfImage(xobj)
assert pim.as_pil_image().info['icc_profile'] == pim.icc.tobytes()
def test_icc_palette(first_image_in):
xobj, _pdf = first_image_in('pink-palette-icc.pdf')
pim = PdfImage(xobj)
assert pim.icc.profile.xcolor_space == 'RGB ' # with trailing space
b = BytesIO()
pim.extract_to(stream=b)
b.seek(0)
im = Image.open(b)
assert im.size == (xobj.Width, xobj.Height)
assert im.mode == 'P'
pil_icc = im.info.get('icc_profile')
pil_icc_stream = BytesIO(pil_icc)
pil_prf = ImageCms.ImageCmsProfile(pil_icc_stream)
assert pil_prf.tobytes() == pim.icc.tobytes()
def test_stacked_compression(first_image_in):
xobj, _pdf = first_image_in('pike-flate-jp2.pdf')
pim = PdfImage(xobj)
assert pim.mode == 'RGB'
assert pim.colorspace == '/DeviceRGB'
assert pim.bits_per_component == 8
assert pim.filters == ['/FlateDecode', '/JPXDecode']
def test_ccitt_photometry(sandwich):
xobj, _pdf = sandwich
pim = PdfImage(xobj)
im = pim.as_pil_image()
im = im.convert('L')
assert im.getpixel((0, 0)) == 255, "Expected white background"
xobj.DecodeParms.BlackIs1 = True
im = pim.as_pil_image()
im = im.convert('L')
assert im.getpixel((0, 0)) == 255, "Expected white background"
xobj.DecodeParms.BlackIs1 = False
im = pim.as_pil_image()
im = im.convert('L')
assert im.getpixel((0, 0)) == 255, "Expected white background"
def test_ccitt_encodedbytealign(sandwich):
xobj, _pdf = sandwich
# Pretend this is image is "EncodedByteAlign". We don't have a FOSS
# example of such an image.
xobj.DecodeParms.EncodedByteAlign = True
pim = PdfImage(xobj)
with pytest.raises(UnsupportedImageTypeError):
pim.as_pil_image()
def test_imagemagick_uses_rle_compression(first_image_in):
xobj, _rle = first_image_in('rle.pdf')
pim = PdfImage(xobj)
im = pim.as_pil_image()
assert im.getpixel((5, 5)) == (255, 128, 0)
# Unforuntately pytest cannot test for this using "with pytest.warns(...)".
# Suppression is the best we can manage
suppress_unraisable_jbigdec_error_warning = pytest.mark.filterwarnings(
"ignore:.*jbig2dec error.*:pytest.PytestUnraisableExceptionWarning"
)
@needs_python_v("3.8", reason="for pytest unraisable exception support")
@suppress_unraisable_jbigdec_error_warning
def test_jbig2_not_available(jbig2, monkeypatch):
xobj, _pdf = jbig2
pim = PdfImage(xobj)
class NotFoundJBIG2Decoder(pikepdf.jbig2.JBIG2DecoderInterface):
def check_available(self):
raise DependencyError('jbig2dec') from FileNotFoundError('jbig2dec')
def decode_jbig2(self, jbig2: bytes, jbig2_globals: bytes) -> bytes:
raise FileNotFoundError('jbig2dec')
monkeypatch.setattr(pikepdf.jbig2, 'get_decoder', NotFoundJBIG2Decoder)
assert not pikepdf.jbig2.get_decoder().available()
with pytest.raises(DependencyError):
pim.as_pil_image()
needs_jbig2dec = pytest.mark.skipif(
not pikepdf.jbig2.get_decoder().available(), reason="jbig2dec not installed"
)
@needs_jbig2dec
def test_jbig2_extractor(jbig2):
xobj, _pdf = jbig2
pikepdf.jbig2.get_decoder().decode_jbig2(xobj.read_raw_bytes(), b'')
@needs_jbig2dec
def test_jbig2(jbig2):
xobj, _pdf = jbig2
pim = PdfImage(xobj)
im = pim.as_pil_image()
assert im.size == (1000, 1520)
assert im.getpixel((0, 0)) == 0 # Ensure loaded
@needs_jbig2dec
def test_jbig2_decodeparms_null_issue317(jbig2):
xobj, _pdf = jbig2
xobj.stream_dict = Object.parse(
b'''<< /BitsPerComponent 1
/ColorSpace /DeviceGray
/Filter [ /JBIG2Decode ]
/DecodeParms null
/Height 1520
/Length 19350
/Subtype /Image
/Type /XObject
/Width 1000
>>'''
)
pim = PdfImage(xobj)
im = pim.as_pil_image()
assert im.size == (1000, 1520)
assert im.getpixel((0, 0)) == 0 # Ensure loaded
@needs_jbig2dec
def test_jbig2_global(first_image_in):
xobj, _pdf = first_image_in('jbig2global.pdf')
pim = PdfImage(xobj)
im = pim.as_pil_image()
assert im.size == (4000, 2864)
assert im.getpixel((0, 0)) == 255 # Ensure loaded
@needs_jbig2dec
def test_jbig2_global_palette(first_image_in):
xobj, _pdf = first_image_in('jbig2global.pdf')
xobj.ColorSpace = pikepdf.Array(
[Name.Indexed, Name.DeviceRGB, 1, b'\x00\x00\x00\xff\xff\xff']
)
pim = PdfImage(xobj)
im = pim.as_pil_image()
assert im.size == (4000, 2864)
assert im.getpixel((0, 0)) == 255 # Ensure loaded
@needs_python_v("3.8", reason="for pytest unraisable exception support")
@suppress_unraisable_jbigdec_error_warning
def test_jbig2_error(first_image_in, monkeypatch):
xobj, _pdf = first_image_in('jbig2global.pdf')
pim = PdfImage(xobj)
class BrokenJBIG2Decoder(pikepdf.jbig2.JBIG2DecoderInterface):
def check_available(self):
return
def decode_jbig2(self, jbig2: bytes, jbig2_globals: bytes) -> bytes:
raise subprocess.CalledProcessError(1, 'jbig2dec')
monkeypatch.setattr(pikepdf.jbig2, 'get_decoder', BrokenJBIG2Decoder)
pim = PdfImage(xobj)
with pytest.raises(PdfError, match="unfilterable stream"):
pim.as_pil_image()
@needs_python_v("3.8", reason="for pytest unraisable exception support")
@suppress_unraisable_jbigdec_error_warning
def test_jbig2_too_old(first_image_in, monkeypatch):
xobj, _pdf = first_image_in('jbig2global.pdf')
pim = PdfImage(xobj)
class OldJBIG2Decoder(pikepdf.jbig2.JBIG2Decoder):
def _version(self):
return Version('0.12')
monkeypatch.setattr(pikepdf.jbig2, 'get_decoder', OldJBIG2Decoder)
pim = PdfImage(xobj)
with pytest.raises(DependencyError, match='too old'):
pim.as_pil_image()
def test_ccitt_icc(first_image_in, resources):
xobj, pdf = first_image_in('sandwich.pdf')
pim = PdfImage(xobj)
assert pim.icc is None
bio = BytesIO()
output_type = pim.extract_to(stream=bio)
assert output_type == '.tif'
bio.seek(0)
assert b'GRAYXYZ' not in bio.read(1000)
bio.seek(0)
assert Image.open(bio)
icc_data = (resources / 'Gray.icc').read_bytes()
icc_stream = pdf.make_stream(icc_data)
icc_stream.N = 1
xobj.ColorSpace = pikepdf.Array([Name.ICCBased, icc_stream])
pim = PdfImage(xobj)
assert pim.icc.profile.xcolor_space == 'GRAY'
bio = BytesIO()
output_type = pim.extract_to(stream=bio)
assert output_type == '.tif'
bio.seek(0)
assert b'GRAYXYZ' in bio.read(1000)
bio.seek(0)
assert Image.open(bio)
def test_invalid_icc(first_image_in):
xobj, _pdf = first_image_in('pink-palette-icc.pdf')
cs = xobj.ColorSpace[1][1] # [/Indexed [/ICCBased <stream>]]
cs.write(b'foobar') # corrupt the ICC profile
with pytest.raises(
UnsupportedImageTypeError, match="ICC profile corrupt or not readable"
):
pim = PdfImage(xobj)
assert pim.icc is not None
def test_decodeparms_filter_alternates():
pdf = pikepdf.new()
imobj = Stream(
pdf,
b'dummy',
BitsPerComponent=1,
ColorSpace=Name.DeviceGray,
DecodeParms=Array(
[
Dictionary(
BlackIs1=False,
Columns=16,
K=-1,
)
]
),
Filter=Array([Name.CCITTFaxDecode]),
Height=16,
Width=16,
Type=Name.XObject,
Subtype=Name.Image,
)
pim = pikepdf.PdfImage(imobj)
assert pim.decode_parms[0].K == -1 # Check that array of dict is unpacked properly
CMYK_RED = b'\x00\xc0\xc0\x15'
CMYK_GREEN = b'\x90\x00\xc0\x15'
CMYK_BLUE = b'\xc0\xa0\x00\x15'
CMYK_PINK = b'\x04\xc0\x00\x15'
CMYK_PALETTE = CMYK_RED + CMYK_GREEN + CMYK_BLUE + CMYK_PINK
@pytest.mark.parametrize(
'base, hival, bits, palette, expect_type, expect_mode',
[
(Name.DeviceGray, 4, 8, b'\x00\x40\x80\xff', 'L', 'P'),
(Name.DeviceCMYK, 4, 8, CMYK_PALETTE, 'CMYK', 'P'),
(Name.DeviceGray, 4, 4, b'\x04\x08\x02\x0f', 'L', 'P'),
],
)
def test_palette_nonrgb(base, hival, bits, palette, expect_type, expect_mode):
pdf = pikepdf.new()
imobj = Stream(
pdf,
b'\x00\x01\x02\x03' * 16,
BitsPerComponent=bits,
ColorSpace=Array([Name.Indexed, base, hival, palette]),
Width=16,
Height=4,
Type=Name.XObject,
Subtype=Name.Image,
)
pim = pikepdf.PdfImage(imobj)
assert pim.palette == (expect_type, palette)
pim.extract_to(stream=BytesIO())
# To view images:
# pim.extract_to(fileprefix=f'palette_nonrgb_{expect_type}_{bits}')
assert pim.mode == expect_mode
def test_extract_to_mutex_params(sandwich):
pdfimage = PdfImage(sandwich[0])
with pytest.raises(ValueError, match="Cannot set both"):
pdfimage.extract_to(stream=BytesIO(), fileprefix='anything')
def test_separation():
# Manually construct a 2"x1" document with a Separation
# colorspace that devices a single "spot" color channel named
# "LogoGreen". Define a conversion to standard CMYK that assigns
# CMYK equivalents. Copied example from PDF RM.
# LogoGreen is a teal-ish green. First panel is white to full green,
# second is green to full white. RGB ~= (31, 202, 113)
pdf = pikepdf.new()
pdf.add_blank_page(page_size=(144, 72))
# pikepdf does not interpret this - it is for the PDF viewer
# Explanation:
# X is implicitly loaded to stack
# dup: X X
# 0.84 mul: X 0.84X
# exch: 0.84X X
# 0.00: 0.84X X 0.00
# exch: 0.84X 0.00 X
# dup: 0.84X 0.00 X X
# 0.44 mul: 0.84X 0.00 X 0.44X
# exch: 0.84X 0.00 0.44X X
# 0.21mul: 0.84X 0.00 0.44X 0.21X
# X -> {0.84X, 0, 0.44X, 0.21X}
tint_transform_logogreen_to_cmyk = b'''
{
dup 0.84 mul
exch 0.00 exch dup 0.44 mul
exch 0.21 mul
}
'''
cs = Array(
[
Name.Separation,
Name.LogoGreen,
Name.DeviceCMYK,
Stream(
pdf,
tint_transform_logogreen_to_cmyk,
FunctionType=4,
Domain=[0.0, 1.0],
Range=[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0],
),
]
)
def check_pim(imobj, idx):
pim = pikepdf.PdfImage(imobj)
assert pim.mode == 'Separation'
assert pim.is_separation
assert not pim.is_device_n
assert pim.indexed == idx
assert repr(pim)
with pytest.raises(pikepdf.models.image.HifiPrintImageNotTranscodableError):
pim.extract_to(stream=BytesIO())
imobj0 = Stream(
pdf,
bytes(range(0, 256)),
BitsPerComponent=8,
ColorSpace=cs,
Width=16,
Height=16,
Type=Name.XObject,
Subtype=Name.Image,
)
check_pim(imobj0, idx=False)
imobj1 = Stream(
pdf,
bytes(range(0, 256)),
BitsPerComponent=8,
ColorSpace=Array([Name.Indexed, cs, 255, bytes(range(255, -1, -1))]),
Width=16,
Height=16,
Type=Name.XObject,
Subtype=Name.Image,
)
check_pim(imobj1, idx=True)
pdf.pages[0].Contents = Stream(
pdf, b'72 0 0 72 0 0 cm /Im0 Do 1 0 0 1 1 0 cm /Im1 Do'
)
pdf.pages[0].Resources = Dictionary(XObject=Dictionary(Im0=imobj0, Im1=imobj1))
# pdf.save("separation.pdf")
def test_devicen():
# Manually construct a 2"x1" document with a DeviceN
# colorspace that devices a single "spot" color channel named
# "Black". Define a conversion to standard CMYK that assigns
# C=0 M=0 Y=0 and lets black through. The result should appear as a
# gradient from white (top left) to black (bottom right) in the
# left cell, and black to white in the right cell.
pdf = pikepdf.new()
pdf.add_blank_page(page_size=(144, 72))
# Postscript function to map X -> CMYK={0, 0, 0, X}
# Explanation:
# X is implicitly on the stack
# 0 0 0 <- load three zeros on to stack
# stack contains: X 0 0 0
# 4 -1 roll <- roll stack 4 elements -1 times, meaning the order is reversed
# stack contains: 0 0 0 X
# pikepdf currently does not interpret tint transformation functions. This
# is done so that the output test file can be checked in a PDF viewer.
tint_transform_k_to_cmyk = b'{0 0 0 4 -1 roll}'
cs = Array(
[
Name.DeviceN,
Array([Name.Black]),
Name.DeviceCMYK,
Stream(
pdf,
tint_transform_k_to_cmyk,
FunctionType=4,
Domain=[0.0, 1.0],
Range=[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0],
),
]
)
def check_pim(imobj, idx):
pim = pikepdf.PdfImage(imobj)
assert pim.mode == 'DeviceN'
assert pim.is_device_n
assert not pim.is_separation
assert pim.indexed == idx
assert repr(pim)
with pytest.raises(pikepdf.models.image.HifiPrintImageNotTranscodableError):
pim.extract_to(stream=BytesIO())
imobj0 = Stream(
pdf,
bytes(range(0, 256)),
BitsPerComponent=8,
ColorSpace=cs,
Width=16,
Height=16,
Type=Name.XObject,
Subtype=Name.Image,
)
check_pim(imobj0, idx=False)
imobj1 = Stream(
pdf,
bytes(range(0, 256)),
BitsPerComponent=8,
ColorSpace=Array([Name.Indexed, cs, 255, bytes(range(255, -1, -1))]),
Width=16,
Height=16,
Type=Name.XObject,
Subtype=Name.Image,
)
check_pim(imobj1, idx=True)
pdf.pages[0].Contents = Stream(
pdf, b'72 0 0 72 0 0 cm /Im0 Do 1 0 0 1 1 0 cm /Im1 Do'
)
pdf.pages[0].Resources = Dictionary(XObject=Dictionary(Im0=imobj0, Im1=imobj1))
# pdf.save('devicen.pdf')
@given(
spec=valid_random_image_spec(
bpcs=st.sampled_from([2, 4]),
colorspaces=st.just(Name.DeviceGray),
widths=st.integers(1, 7),
heights=st.integers(1, 7),
)
)
def test_grayscale_stride(spec):
pdf = pdf_from_image_spec(spec)
pim = PdfImage(pdf.pages[0].Resources.XObject.Im0)
assert pim.mode == 'L'
imdata = pim.read_bytes()
w = pim.width
imdata_unpacked_view, stride = unpack_subbyte_pixels(
imdata, pim.size, pim.bits_per_component
)
imdata_unpacked = bytes(imdata_unpacked_view)
bio = BytesIO()
pim.extract_to(stream=bio)
im = Image.open(bio)
assert im.mode == 'L' and im.size == pim.size
for n, pixel in enumerate(im.getdata()):
idx = stride * (n // w) + (n % w)
assert imdata_unpacked[idx] == pixel
@requires_pdfimages
@given(spec=valid_random_image_spec())
def test_random_image(spec, tmp_path_factory):
pdf = pdf_from_image_spec(spec)
pim = PdfImage(pdf.pages[0].Resources.XObject.Im0)
bio = BytesIO()
colorspace = pim.colorspace
width = pim.width
height = pim.height
bpc = pim.bits_per_component
imbytes = pim.read_bytes()
try:
result_extension = pim.extract_to(stream=bio)
assert result_extension in ('.png', '.tiff')
except ValueError as e:
if 'not enough image data' in str(e):
return
elif 'buffer is not large enough' in str(e):
ncomps = (
4
if colorspace == Name.DeviceCMYK
else 3
if colorspace == Name.DeviceRGB
else 1
)
assert ceil(bpc / 8) * width * height * ncomps > len(imbytes)
return
raise
except PIL.UnidentifiedImageError:
if len(imbytes) == 0:
return
raise
except UnsupportedImageTypeError:
if colorspace in (Name.DeviceRGB, Name.DeviceCMYK) and bpc < 8:
return
if bpc == 16:
return
raise
bio.seek(0)
im = Image.open(bio)
assert im.mode == pim.mode
assert im.size == pim.size
outprefix = f'{width}x{height}x{im.mode}-'
tmpdir = tmp_path_factory.mktemp(outprefix)
pdf.save(tmpdir / 'pdf.pdf')
# We don't have convenient CMYK checking tools
if im.mode == 'CMYK':
return
im.save(tmpdir / 'pikepdf.png')
Path(tmpdir / 'imbytes.bin').write_bytes(imbytes)
run(
[
'pdfimages',
'-png',
fspath('pdf.pdf'),
fspath('pdfimage'), # omit suffix
],
cwd=fspath(tmpdir),
check=True,
)
outpng = tmpdir / 'pdfimage-000.png'
assert outpng.exists()
im_roundtrip = Image.open(outpng)
assert im.size == im_roundtrip.size
diff = ImageChops.difference(im, im_roundtrip)
assert not diff.getbbox()
# if diff.getbbox():
# im.save('im1.png')
# im_roundtrip.save('im2.png')
# diff.save('imdiff.png')
# breakpoint()
# assert False
| mpl-2.0 |
pikepdf/pikepdf | tests/test_parsers.py | 1 | 9538 | # SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: CC0-1.0
from __future__ import annotations
import shutil
import sys
from subprocess import PIPE, run
import pytest
import pikepdf
from pikepdf import (
ContentStreamInlineImage,
ContentStreamInstruction,
Dictionary,
Name,
Object,
Operator,
Pdf,
PdfError,
PdfInlineImage,
PdfMatrix,
Stream,
_qpdf,
parse_content_stream,
unparse_content_stream,
)
from pikepdf._qpdf import StreamParser
from pikepdf.models import PdfParsingError
# pylint: disable=useless-super-delegation,redefined-outer-name
@pytest.fixture
def graph(resources):
yield Pdf.open(resources / 'graph.pdf')
@pytest.fixture
def inline(resources):
yield Pdf.open(resources / 'image-mono-inline.pdf')
class PrintParser(StreamParser):
def __init__(self):
super().__init__()
def handle_object(self, obj, *_args):
print(repr(obj))
def handle_eof(self):
print("--EOF--")
class ExceptionParser(StreamParser):
def __init__(self):
super().__init__()
def handle_object(self, obj, *_args): # pylint: disable=unused-argument
raise ValueError("I take exception to this")
def handle_eof(self):
print("--EOF--")
def slow_unparse_content_stream(instructions):
def encode(obj):
return _qpdf.unparse(obj)
def encode_iimage(iimage: PdfInlineImage):
return iimage.unparse()
def encode_operator(obj):
if isinstance(obj, Operator):
return obj.unparse()
return encode(Operator(obj))
def for_each_instruction():
for n, (operands, operator) in enumerate(instructions):
try:
if operator == Operator(b'INLINE IMAGE'):
iimage = operands[0]
if not isinstance(iimage, PdfInlineImage):
raise ValueError(
"Operator was INLINE IMAGE but operands were not "
"a PdfInlineImage"
)
line = encode_iimage(iimage)
else:
if operands:
line = b' '.join(encode(operand) for operand in operands)
line += b' ' + encode_operator(operator)
else:
line = encode_operator(operator)
except (PdfError, ValueError) as e:
raise PdfParsingError(line=n + 1) from e
yield line
return b'\n'.join(for_each_instruction())
def test_open_pdf(graph):
page = graph.pages[0]
Object._parse_stream(page.obj, PrintParser())
def test_parser_exception(graph):
stream = graph.pages[0]['/Contents']
with pytest.raises(ValueError):
Object._parse_stream(stream, ExceptionParser())
@pytest.mark.skipif(shutil.which('pdftotext') is None, reason="poppler not installed")
def test_text_filter(resources, outdir):
input_pdf = resources / 'veraPDF test suite 6-2-10-t02-pass-a.pdf'
# Ensure the test PDF has detect we can find
proc = run(
['pdftotext', str(input_pdf), '-'], check=True, stdout=PIPE, encoding='utf-8'
)
assert proc.stdout.strip() != '', "Need input test file that contains text"
with Pdf.open(input_pdf) as pdf:
page = pdf.pages[0]
keep = []
for operands, command in parse_content_stream(
page, """TJ Tj ' " BT ET Td TD Tm T* Tc Tw Tz TL Tf Tr Ts"""
):
if command == Operator('Tj'):
print("skipping Tj")
continue
keep.append((operands, command))
new_stream = Stream(pdf, pikepdf.unparse_content_stream(keep))
print(new_stream.read_bytes()) # pylint: disable=no-member
page['/Contents'] = new_stream
page['/Rotate'] = 90
pdf.save(outdir / 'notext.pdf', static_id=True)
proc = run(
['pdftotext', str(outdir / 'notext.pdf'), '-'],
check=True,
stdout=PIPE,
encoding='utf-8',
)
assert proc.stdout.strip() == '', "Expected text to be removed"
def test_invalid_stream_object():
with pytest.raises(TypeError, match="must be a pikepdf.Object"):
parse_content_stream(42)
with pytest.raises(TypeError, match="called on page or stream"):
parse_content_stream(Dictionary({"/Hi": 3}))
with pytest.raises(
TypeError, match="parse_content_stream called on non-stream Object"
):
false_page = Dictionary(Type=Name.Page, Contents=42)
parse_content_stream(false_page)
# @pytest.mark.parametrize(
# "test_file,expected",
# [
# ("fourpages.pdf", True),
# ("graph.pdf", False),
# ("veraPDF test suite 6-2-10-t02-pass-a.pdf", True),
# ("veraPDF test suite 6-2-3-3-t01-fail-c.pdf", False),
# ('sandwich.pdf', True),
# ],
# )
# def test_has_text(resources, test_file, expected):
# with Pdf.open(resources / test_file) as pdf:
# for p in pdf.pages:
# page = p
# assert page.has_text() == expected
def test_unparse_cs():
instructions = [
([], Operator('q')),
([*PdfMatrix.identity().shorthand], Operator('cm')),
([], Operator('Q')),
]
assert unparse_content_stream(instructions).strip() == b'q\n1 0 0 1 0 0 cm\nQ'
def test_unparse_failure():
instructions = [([float('nan')], Operator('cm'))]
with pytest.raises(PdfParsingError):
unparse_content_stream(instructions)
def test_parse_xobject(resources):
with Pdf.open(resources / 'formxobject.pdf') as pdf:
form1 = pdf.pages[0].Resources.XObject.Form1
instructions = parse_content_stream(form1)
assert instructions[0][1] == Operator('cm')
def test_parse_results(inline):
p0 = inline.pages[0]
cmds = parse_content_stream(p0)
assert isinstance(cmds[0], ContentStreamInstruction)
csi = cmds[0]
assert isinstance(csi.operands, _qpdf._ObjectList)
assert isinstance(csi.operator, Operator)
assert 'Operator' in repr(csi)
assert ContentStreamInstruction(cmds[0]).operator == cmds[0].operator
for cmd in cmds:
if isinstance(cmd, ContentStreamInlineImage):
assert cmd.operator == Operator("INLINE IMAGE")
assert isinstance(cmd.operands[0], PdfInlineImage)
assert 'INLINE' in repr(cmd)
assert cmd.operands[0] == cmd.iimage
def test_build_instructions():
cs = ContentStreamInstruction([1, 0, 0, 1, 0, 0], Operator('cm'))
assert 'cm' in repr(cs)
assert unparse_content_stream([cs]) == b'1 0 0 1 0 0 cm'
def test_unparse_interpret_operator():
commands = []
matrix = [2, 0, 0, 2, 0, 0]
commands.insert(0, (matrix, 'cm'))
commands.insert(0, (matrix, b'cm'))
commands.insert(0, (matrix, Operator('cm')))
unparsed = unparse_content_stream(commands)
assert (
unparsed
== b'2 0 0 2 0 0 cm\n2 0 0 2 0 0 cm\n2 0 0 2 0 0 cm'
== slow_unparse_content_stream(commands)
)
def test_unparse_inline(inline):
p0 = inline.pages[0]
cmds = parse_content_stream(p0)
unparsed = unparse_content_stream(cmds)
assert b'BI' in unparsed
assert unparsed == slow_unparse_content_stream(cmds)
def test_unparse_invalid_inline_image():
instructions = [((42,), Operator(b'INLINE IMAGE'))]
with pytest.raises(PdfParsingError):
unparse_content_stream(instructions)
def test_inline_copy(inline):
for instr in parse_content_stream(inline.pages[0].Contents):
if not isinstance(instr, ContentStreamInlineImage):
continue
csiimage = instr
_copy_of_csiimage = ContentStreamInlineImage(csiimage)
new_iimage = ContentStreamInlineImage(csiimage.iimage)
assert unparse_content_stream([new_iimage]).startswith(b'BI')
def test_end_inline_parse():
pdf = pikepdf.new()
pdf.add_blank_page(page_size=(1000, 1000))
stream = b"""
q 200 0 0 200 500 500 cm
BI
/W 1
/H 1
/BPC 8
/CS /RGB
ID \x80\x80\x80
EI Q
q 300 0 0 300 500 200 cm
BI
/W 2
/H 2
/BPC 8
/CS /RGB
ID \xff\x00\x00\x00\xff\x00\x00\xff\x00\x00\x00\xff
EI Q
"""
pdf.pages[0].Contents = pdf.make_stream(stream)
cs = parse_content_stream(pdf.pages[0])
assert unparse_content_stream(cs).split() == stream.split()
class TestMalformedContentStreamInstructions:
def test_rejects_not_list_of_pairs(self):
with pytest.raises(PdfParsingError):
unparse_content_stream([(1, 2, 3)])
def test_rejects_not_castable_to_object(self):
with pytest.raises(PdfParsingError, match="While unparsing"):
unparse_content_stream([(['one', 'two'], 42)]) # 42 is not an operator
def test_rejects_not_operator(self):
with pytest.raises(PdfParsingError, match="While unparsing"):
unparse_content_stream(
[(['one', 'two'], Name.FortyTwo)]
) # Name is not an operator
def test_rejects_inline_image_missing(self):
with pytest.raises(PdfParsingError):
unparse_content_stream(
[('should be a PdfInlineImage but is not', b'INLINE IMAGE')]
)
def test_accepts_all_lists(self):
unparse_content_stream([[[], b'Q']])
def test_accepts_all_tuples(self):
unparse_content_stream((((Name.Foo,), b'/Do'),))
| mpl-2.0 |
pikepdf/pikepdf | tests/test_matrix.py | 1 | 1141 | # SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: CC0-1.0
from __future__ import annotations
from math import isclose
import pytest
import pikepdf
from pikepdf.models import PdfMatrix
def test_init_6():
m = PdfMatrix(1, 0, 0, 1, 0, 0)
m2 = m.scaled(2, 2)
m2t = m2.translated(2, 3)
assert (
repr(m2t)
== 'pikepdf.PdfMatrix(((2.0, 0.0, 0.0), (0.0, 2.0, 0.0), (2.0, 3.0, 1.0)))'
)
m2tr = m2t.rotated(90)
assert isclose(m2tr.a, 0, abs_tol=1e-6)
assert isclose(m2tr.b, 2, abs_tol=1e-6)
assert isclose(m2tr.c, -2, abs_tol=1e-6)
assert isclose(m2tr.d, 0, abs_tol=1e-6)
assert isclose(m2tr.e, -3, abs_tol=1e-6)
assert isclose(m2tr.f, 2, abs_tol=1e-6)
def test_invalid_init():
with pytest.raises(ValueError, match='arguments'):
PdfMatrix('strings')
def test_matrix_from_matrix():
m = PdfMatrix(1, 0, 0, 1, 0, 0)
m_copy = PdfMatrix(m)
assert m == m_copy
assert m != 'not matrix'
def test_matrix_encode():
m = PdfMatrix(1, 0, 0, 1, 0, 0)
assert m.encode() == b'1.000000 0.000000 0.000000 1.000000 0.000000 0.000000'
| mpl-2.0 |
pikepdf/pikepdf | tests/test_decimal.py | 1 | 1961 | # SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: CC0-1.0
from __future__ import annotations
from decimal import Decimal, getcontext
import pytest
import pikepdf
from pikepdf.settings import get_decimal_precision, set_decimal_precision
encode = pikepdf._qpdf._encode
# pylint: disable=redefined-outer-name
def test_decimal_precision():
d = Decimal('0.1234567890123456789')
assert str(encode(d)) == '0.123456789012346'
def test_decimal_change_precision():
d = Decimal('0.1234567890123456789')
saved = get_decimal_precision()
try:
set_decimal_precision(10)
assert str(encode(d)) == '0.1234567890'
assert get_decimal_precision() == 10
finally:
set_decimal_precision(saved)
def test_decimal_independent_of_app():
d = Decimal('0.1234567890123456789')
pikepdf_prec = get_decimal_precision()
decimal_prec = getcontext().prec
try:
getcontext().prec = 6
set_decimal_precision(8)
assert str(encode(d)) == '0.12345679'
assert get_decimal_precision() != 6
finally:
set_decimal_precision(pikepdf_prec)
getcontext().prec = decimal_prec
@pytest.fixture
def pal(resources):
return pikepdf.open(resources / 'pal-1bit-trivial.pdf')
def test_output_rounded(pal, outdir):
pal.pages[0].MediaBox[2] = pal.pages[0].MediaBox[2] * Decimal(
'1.2345678912345678923456789123456789'
)
pal.save(outdir / 'round.pdf')
with pikepdf.open(outdir / 'round.pdf') as pdf:
assert len(str(pdf.pages[0].MediaBox[2])) == 16
def test_nonfinite(pal):
with pytest.raises(ValueError):
pal.pages[0].MediaBox[2] = Decimal('NaN')
with pytest.raises(ValueError):
pal.pages[0].MediaBox[2] = Decimal('Infinity')
with pytest.raises(ValueError):
pal.pages[0].MediaBox[2] = float('NaN')
with pytest.raises(ValueError):
pal.pages[0].MediaBox[2] = float('Infinity')
| mpl-2.0 |
pikepdf/pikepdf | src/pikepdf/models/_transcoding.py | 1 | 8054 | # SPDX-FileCopyrightText: 2022 James R. Barlow
# SPDX-License-Identifier: MPL-2.0
from __future__ import annotations
import struct
from typing import Any, Callable, NamedTuple, Union
from PIL import Image
from PIL.TiffTags import TAGS_V2 as TIFF_TAGS
BytesLike = Union[bytes, memoryview]
MutableBytesLike = Union[bytearray, memoryview]
def _next_multiple(n: int, k: int) -> int:
"""Return the multiple of k that is greater than or equal n.
>>> _next_multiple(101, 4)
104
>>> _next_multiple(100, 4)
100
"""
div, mod = divmod(n, k)
if mod > 0:
div += 1
return div * k
def unpack_subbyte_pixels(
packed: BytesLike, size: tuple[int, int], bits: int, scale: int = 0
) -> tuple[BytesLike, int]:
"""Unpack subbyte *bits* pixels into full bytes and rescale.
When scale is 0, the appropriate scale is calculated.
e.g. for 2-bit, the scale is adjusted so that
0b00 = 0.00 = 0x00
0b01 = 0.33 = 0x55
0b10 = 0.66 = 0xaa
0b11 = 1.00 = 0xff
When scale is 1, no scaling is applied, appropriate when
the bytes are palette indexes.
"""
width, height = size
bits_per_byte = 8 // bits
stride = _next_multiple(width, bits_per_byte)
buffer = bytearray(bits_per_byte * stride * height)
max_read = len(buffer) // bits_per_byte
if scale == 0:
scale = 255 / ((2**bits) - 1)
if bits == 4:
_4bit_inner_loop(packed[:max_read], buffer, scale)
elif bits == 2:
_2bit_inner_loop(packed[:max_read], buffer, scale)
# elif bits == 1:
# _1bit_inner_loop(packed[:max_read], buffer, scale)
else:
raise NotImplementedError(bits)
return memoryview(buffer), stride
# def _1bit_inner_loop(in_: BytesLike, out: MutableBytesLike, scale: int) -> None:
# """Unpack 1-bit values to their 8-bit equivalents.
# Thus *out* must be 8x at long as *in*.
# """
# for n, val in enumerate(in_):
# out[8 * n + 0] = int((val >> 7) & 0b1) * scale
# out[8 * n + 1] = int((val >> 6) & 0b1) * scale
# out[8 * n + 2] = int((val >> 5) & 0b1) * scale
# out[8 * n + 3] = int((val >> 4) & 0b1) * scale
# out[8 * n + 4] = int((val >> 3) & 0b1) * scale
# out[8 * n + 5] = int((val >> 2) & 0b1) * scale
# out[8 * n + 6] = int((val >> 1) & 0b1) * scale
# out[8 * n + 7] = int((val >> 0) & 0b1) * scale
def _2bit_inner_loop(in_: BytesLike, out: MutableBytesLike, scale: int) -> None:
"""Unpack 2-bit values to their 8-bit equivalents.
Thus *out* must be 4x at long as *in*.
"""
for n, val in enumerate(in_):
out[4 * n] = int((val >> 6) * scale)
out[4 * n + 1] = int(((val >> 4) & 0b11) * scale)
out[4 * n + 2] = int(((val >> 2) & 0b11) * scale)
out[4 * n + 3] = int((val & 0b11) * scale)
def _4bit_inner_loop(in_: BytesLike, out: MutableBytesLike, scale: int) -> None:
"""Unpack 4-bit values to their 8-bit equivalents.
Thus *out* must be 2x at long as *in*.
"""
for n, val in enumerate(in_):
out[2 * n] = int((val >> 4) * scale)
out[2 * n + 1] = int((val & 0b1111) * scale)
def image_from_byte_buffer(buffer: BytesLike, size: tuple[int, int], stride: int):
"""Use Pillow to create one-component image from a byte buffer.
*stride* is the number of bytes per row, and is essential for packed bits
with odd image widths.
"""
ystep = 1 # image is top to bottom in memory
return Image.frombuffer('L', size, buffer, "raw", 'L', stride, ystep)
def _make_rgb_palette(gray_palette: bytes) -> bytes:
palette = b''
for entry in gray_palette:
palette += bytes([entry]) * 3
return palette
def _depalettize_cmyk(buffer: BytesLike, palette: BytesLike):
with memoryview(buffer) as mv:
output = bytearray(4 * len(mv))
for n, pal_idx in enumerate(mv):
output[4 * n : 4 * (n + 1)] = palette[4 * pal_idx : 4 * (pal_idx + 1)]
return output
def image_from_buffer_and_palette(
buffer: BytesLike,
size: tuple[int, int],
stride: int,
base_mode: str,
palette: BytesLike,
) -> Image.Image:
"""Construct an image from a byte buffer and apply the palette.
1/2/4-bit images must be unpacked (no scaling!) to byte buffers first, such
that every 8-bit integer is an index into the palette.
"""
# Reminder Pillow palette byte order unintentionally changed in 8.3.0
# https://github.com/python-pillow/Pillow/issues/5595
# 8.2.0: all aligned by channel (very nonstandard)
# 8.3.0: all channels for one color followed by the next color (e.g. RGBRGBRGB)
if base_mode == 'RGB':
im = image_from_byte_buffer(buffer, size, stride)
im.putpalette(palette, rawmode=base_mode)
elif base_mode == 'L':
# Pillow does not fully support palettes with rawmode='L'.
# Convert to RGB palette.
gray_palette = _make_rgb_palette(palette)
im = image_from_byte_buffer(buffer, size, stride)
im.putpalette(gray_palette, rawmode='RGB')
elif base_mode == 'CMYK':
# Pillow does not support CMYK with palettes; convert manually
output = _depalettize_cmyk(buffer, palette)
im = Image.frombuffer('CMYK', size, data=output, decoder_name='raw')
else:
raise NotImplementedError(f'palette with {base_mode}')
return im
def fix_1bit_palette_image(
im: Image.Image, base_mode: str, palette: BytesLike
) -> Image.Image:
"""Apply palettes to 1-bit images."""
im = im.convert('P')
if base_mode == 'RGB' and len(palette) == 6:
# rgbrgb -> rgb000000...rgb
palette = palette[0:3] + (b'\x00\x00\x00' * (256 - 2)) + palette[3:6]
im.putpalette(palette, rawmode='RGB')
elif base_mode == 'L':
try:
im.putpalette(palette, rawmode='L')
except ValueError as e:
if 'unrecognized raw mode' in str(e):
rgb_palette = _make_rgb_palette(palette)
im.putpalette(rgb_palette, rawmode='RGB')
return im
def generate_ccitt_header(
size: tuple[int, int],
data_length: int,
ccitt_group: int,
photometry: int,
icc: bytes,
) -> bytes:
"""Generate binary CCITT header for image with given parameters."""
tiff_header_struct = '<' + '2s' + 'H' + 'L' + 'H'
tag_keys = {tag.name: key for key, tag in TIFF_TAGS.items()} # type: ignore
ifd_struct = '<HHLL'
class IFD(NamedTuple):
key: int
typecode: Any
count_: int
data: int | Callable[[], int | None]
ifds: list[IFD] = []
def header_length(ifd_count) -> int:
return (
struct.calcsize(tiff_header_struct)
+ struct.calcsize(ifd_struct) * ifd_count
+ 4
)
def add_ifd(tag_name: str, data: int | Callable[[], int | None], count: int = 1):
key = tag_keys[tag_name]
typecode = TIFF_TAGS[key].type # type: ignore
ifds.append(IFD(key, typecode, count, data))
image_offset = None
width, height = size
add_ifd('ImageWidth', width)
add_ifd('ImageLength', height)
add_ifd('BitsPerSample', 1)
add_ifd('Compression', ccitt_group)
add_ifd('PhotometricInterpretation', int(photometry))
add_ifd('StripOffsets', lambda: image_offset)
add_ifd('RowsPerStrip', height)
add_ifd('StripByteCounts', data_length)
icc_offset = 0
if icc:
add_ifd('ICCProfile', lambda: icc_offset, count=len(icc))
icc_offset = header_length(len(ifds))
image_offset = icc_offset + len(icc)
ifd_args = [(arg() if callable(arg) else arg) for ifd in ifds for arg in ifd]
tiff_header = struct.pack(
(tiff_header_struct + ifd_struct[1:] * len(ifds) + 'L'),
b'II', # Byte order indication: Little endian
42, # Version number (always 42)
8, # Offset to first IFD
len(ifds), # Number of tags in IFD
*ifd_args,
0, # Last IFD
)
if icc:
tiff_header += icc
return tiff_header
| mpl-2.0 |
marl/jams | tests/test_sonify.py | 1 | 4469 | #!/usr/bin/env python
# CREATED:2016-02-11 12:07:58 by Brian McFee <brian.mcfee@nyu.edu>
"""Sonification tests"""
import numpy as np
import pytest
from test_eval import create_hierarchy
import jams
@pytest.mark.xfail(raises=jams.NamespaceError)
def test_no_sonify():
ann = jams.Annotation(namespace='vector')
jams.sonify.sonify(ann)
@pytest.mark.xfail(raises=jams.SchemaError)
def test_bad_sonify():
ann = jams.Annotation(namespace='chord')
ann.append(time=0, duration=1, value='not a chord')
jams.sonify.sonify(ann)
@pytest.mark.parametrize('ns', ['segment_open', 'chord'])
@pytest.mark.parametrize('sr', [8000, 11025])
@pytest.mark.parametrize('duration', [None, 5.0, 1.0])
def test_duration(ns, sr, duration):
ann = jams.Annotation(namespace=ns)
ann.append(time=3, duration=1, value='C')
y = jams.sonify.sonify(ann, sr=sr, duration=duration)
if duration is not None:
assert len(y) == int(sr * duration)
def test_note_hz():
ann = jams.Annotation(namespace='note_hz')
ann.append(time=0, duration=1, value=261.0)
y = jams.sonify.sonify(ann, sr=8000, duration=2.0)
assert len(y) == 8000 * 2
def test_note_hz_nolength():
ann = jams.Annotation(namespace='note_hz')
ann.append(time=0, duration=1, value=261.0)
y = jams.sonify.sonify(ann, sr=8000)
assert len(y) == 8000 * 1
assert np.any(y)
def test_note_midi():
ann = jams.Annotation(namespace='note_midi')
ann.append(time=0, duration=1, value=60)
y = jams.sonify.sonify(ann, sr=8000, duration=2.0)
assert len(y) == 8000 * 2
@pytest.fixture(scope='module')
def ann_contour():
ann = jams.Annotation(namespace='pitch_contour')
duration = 5.0
fs = 0.01
# Generate a contour with deep vibrato and no voicing from 3s-4s
times = np.linspace(0, duration, num=int(duration / fs))
rate = 5
vibrato = 220 + 20 * np.sin(2 * np.pi * times * rate)
for t, v in zip(times, vibrato):
ann.append(time=t, duration=fs, value={'frequency': v,
'index': 0,
'voiced': (t < 3 or t > 4)})
return ann
@pytest.mark.parametrize('duration', [None, 5.0, 10.0])
@pytest.mark.parametrize('sr', [8000])
def test_contour(ann_contour, duration, sr):
y = jams.sonify.sonify(ann_contour, sr=sr, duration=duration)
if duration is not None:
assert len(y) == sr * duration
@pytest.mark.parametrize('namespace', ['chord', 'chord_harte'])
@pytest.mark.parametrize('sr', [8000])
@pytest.mark.parametrize('duration', [2.0])
@pytest.mark.parametrize('value', ['C:maj/5'])
def test_chord(namespace, sr, duration, value):
ann = jams.Annotation(namespace=namespace)
ann.append(time=0.5, duration=1.0, value=value)
y = jams.sonify.sonify(ann, sr=sr, duration=duration)
assert len(y) == sr * duration
@pytest.mark.parametrize('namespace, value',
[('beat', 1),
('segment_open', 'C'),
('onset', 1)])
@pytest.mark.parametrize('sr', [8000])
@pytest.mark.parametrize('duration', [2.0])
def test_event(namespace, sr, duration, value):
ann = jams.Annotation(namespace=namespace)
ann.append(time=0.5, duration=0, value=value)
y = jams.sonify.sonify(ann, sr=sr, duration=duration)
assert len(y) == sr * duration
@pytest.fixture(scope='module')
def beat_pos_ann():
ann = jams.Annotation(namespace='beat_position')
for i, t in enumerate(np.arange(0, 10, 0.25)):
ann.append(time=t, duration=0,
value=dict(position=1 + i % 4,
measure=1 + i // 4,
num_beats=4,
beat_units=4))
return ann
@pytest.mark.parametrize('sr', [8000])
@pytest.mark.parametrize('duration', [None, 5, 15])
def test_beat_position(beat_pos_ann, sr, duration):
yout = jams.sonify.sonify(beat_pos_ann, sr=sr, duration=duration)
if duration is not None:
assert len(yout) == duration * sr
@pytest.fixture(scope='module')
def ann_hier():
return create_hierarchy(values=['AB', 'abac', 'xxyyxxzz'], duration=30)
@pytest.mark.parametrize('sr', [8000])
@pytest.mark.parametrize('duration', [None, 15, 30])
def test_multi_segment(ann_hier, sr, duration):
y = jams.sonify.sonify(ann_hier, sr=sr, duration=duration)
if duration:
assert len(y) == duration * sr
| isc |
marl/jams | jams/sonify.py | 1 | 6973 | #!/usr/bin/env python
# CREATED:2015-12-12 18:20:37 by Brian McFee <brian.mcfee@nyu.edu>
r'''
Sonification
------------
.. autosummary::
:toctree: generated/
sonify
'''
from itertools import product
from collections import OrderedDict, defaultdict
import six
import numpy as np
import mir_eval.sonify
from mir_eval.util import filter_kwargs
from .eval import coerce_annotation, hierarchy_flatten
from .exceptions import NamespaceError
__all__ = ['sonify']
def mkclick(freq, sr=22050, duration=0.1):
'''Generate a click sample.
This replicates functionality from mir_eval.sonify.clicks,
but exposes the target frequency and duration.
'''
times = np.arange(int(sr * duration))
click = np.sin(2 * np.pi * times * freq / float(sr))
click *= np.exp(- times / (1e-2 * sr))
return click
def clicks(annotation, sr=22050, length=None, **kwargs):
'''Sonify events with clicks.
This uses mir_eval.sonify.clicks, and is appropriate for instantaneous
events such as beats or segment boundaries.
'''
interval, _ = annotation.to_interval_values()
return filter_kwargs(mir_eval.sonify.clicks, interval[:, 0],
fs=sr, length=length, **kwargs)
def downbeat(annotation, sr=22050, length=None, **kwargs):
'''Sonify beats and downbeats together.
'''
beat_click = mkclick(440 * 2, sr=sr)
downbeat_click = mkclick(440 * 3, sr=sr)
intervals, values = annotation.to_interval_values()
beats, downbeats = [], []
for time, value in zip(intervals[:, 0], values):
if value['position'] == 1:
downbeats.append(time)
else:
beats.append(time)
if length is None:
length = int(sr * np.max(intervals)) + len(beat_click) + 1
y = filter_kwargs(mir_eval.sonify.clicks,
np.asarray(beats),
fs=sr, length=length, click=beat_click)
y += filter_kwargs(mir_eval.sonify.clicks,
np.asarray(downbeats),
fs=sr, length=length, click=downbeat_click)
return y
def multi_segment(annotation, sr=22050, length=None, **kwargs):
'''Sonify multi-level segmentations'''
# Pentatonic scale, because why not
PENT = [1, 32./27, 4./3, 3./2, 16./9]
DURATION = 0.1
h_int, _ = hierarchy_flatten(annotation)
if length is None:
length = int(sr * (max(np.max(_) for _ in h_int) + 1. / DURATION) + 1)
y = 0.0
for ints, (oc, scale) in zip(h_int, product(range(3, 3 + len(h_int)),
PENT)):
click = mkclick(440.0 * scale * oc, sr=sr, duration=DURATION)
y = y + filter_kwargs(mir_eval.sonify.clicks,
np.unique(ints),
fs=sr, length=length,
click=click)
return y
def chord(annotation, sr=22050, length=None, **kwargs):
'''Sonify chords
This uses mir_eval.sonify.chords.
'''
intervals, chords = annotation.to_interval_values()
return filter_kwargs(mir_eval.sonify.chords,
chords, intervals,
fs=sr, length=length,
**kwargs)
def pitch_contour(annotation, sr=22050, length=None, **kwargs):
'''Sonify pitch contours.
This uses mir_eval.sonify.pitch_contour, and should only be applied
to pitch annotations using the pitch_contour namespace.
Each contour is sonified independently, and the resulting waveforms
are summed together.
'''
# Map contours to lists of observations
times = defaultdict(list)
freqs = defaultdict(list)
for obs in annotation:
times[obs.value['index']].append(obs.time)
freqs[obs.value['index']].append(obs.value['frequency'] *
(-1)**(~obs.value['voiced']))
y_out = 0.0
for ix in times:
y_out = y_out + filter_kwargs(mir_eval.sonify.pitch_contour,
np.asarray(times[ix]),
np.asarray(freqs[ix]),
fs=sr, length=length,
**kwargs)
if length is None:
length = len(y_out)
return y_out
def piano_roll(annotation, sr=22050, length=None, **kwargs):
'''Sonify a piano-roll
This uses mir_eval.sonify.time_frequency, and is appropriate
for sparse transcription data, e.g., annotations in the `note_midi`
namespace.
'''
intervals, pitches = annotation.to_interval_values()
# Construct the pitchogram
pitch_map = {f: idx for idx, f in enumerate(np.unique(pitches))}
gram = np.zeros((len(pitch_map), len(intervals)))
for col, f in enumerate(pitches):
gram[pitch_map[f], col] = 1
return filter_kwargs(mir_eval.sonify.time_frequency,
gram, pitches, intervals,
sr, length=length, **kwargs)
SONIFY_MAPPING = OrderedDict()
SONIFY_MAPPING['beat_position'] = downbeat
SONIFY_MAPPING['beat'] = clicks
SONIFY_MAPPING['multi_segment'] = multi_segment
SONIFY_MAPPING['segment_open'] = clicks
SONIFY_MAPPING['onset'] = clicks
SONIFY_MAPPING['chord'] = chord
SONIFY_MAPPING['note_hz'] = piano_roll
SONIFY_MAPPING['pitch_contour'] = pitch_contour
def sonify(annotation, sr=22050, duration=None, **kwargs):
'''Sonify a jams annotation through mir_eval
Parameters
----------
annotation : jams.Annotation
The annotation to sonify
sr = : positive number
The sampling rate of the output waveform
duration : float (optional)
Optional length (in seconds) of the output waveform
kwargs
Additional keyword arguments to mir_eval.sonify functions
Returns
-------
y_sonified : np.ndarray
The waveform of the sonified annotation
Raises
------
NamespaceError
If the annotation has an un-sonifiable namespace
'''
length = None
if duration is None:
duration = annotation.duration
if duration is not None:
length = int(duration * sr)
# If the annotation can be directly sonified, try that first
if annotation.namespace in SONIFY_MAPPING:
ann = coerce_annotation(annotation, annotation.namespace)
return SONIFY_MAPPING[annotation.namespace](ann,
sr=sr,
length=length,
**kwargs)
for namespace, func in six.iteritems(SONIFY_MAPPING):
try:
ann = coerce_annotation(annotation, namespace)
return func(ann, sr=sr, length=length, **kwargs)
except NamespaceError:
pass
raise NamespaceError('Unable to sonify annotation of namespace="{:s}"'
.format(annotation.namespace))
| isc |
marl/jams | docs/examples/example_beat.py | 1 | 1897 | #!/usr/bin/env python
import librosa
import jams
def beat_track(infile, outfile):
# Load the audio file
y, sr = librosa.load(infile)
# Compute the track duration
track_duration = librosa.get_duration(y=y, sr=sr)
# Extract tempo and beat estimates
tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)
# Convert beat frames to time
beat_times = librosa.frames_to_time(beat_frames, sr=sr)
# Construct a new JAMS object and annotation records
jam = jams.JAMS()
# Store the track duration
jam.file_metadata.duration = track_duration
beat_a = jams.Annotation(namespace='beat')
beat_a.annotation_metadata = jams.AnnotationMetadata(data_source='librosa beat tracker')
# Add beat timings to the annotation record.
# The beat namespace does not require value or confidence fields,
# so we can leave those blank.
for t in beat_times:
beat_a.append(time=t, duration=0.0)
# Store the new annotation in the jam
jam.annotations.append(beat_a)
# Add tempo estimation to the annotation.
tempo_a = jams.Annotation(namespace='tempo', time=0, duration=track_duration)
tempo_a.annotation_metadata = jams.AnnotationMetadata(data_source='librosa tempo estimator')
# The tempo estimate is global, so it should start at time=0 and cover the full
# track duration.
# If we had a likelihood score on the estimation, it could be stored in
# `confidence`. Since we have no competing estimates, we'll set it to 1.0.
tempo_a.append(time=0.0,
duration=track_duration,
value=tempo,
confidence=1.0)
# Store the new annotation in the jam
jam.annotations.append(tempo_a)
# Save to disk
jam.save(outfile)
if __name__ == '__main__':
infile = librosa.util.example_audio_file()
beat_track(infile, 'output.jams')
| isc |
mail-in-a-box/mailinabox | tests/fail2ban.py | 1 | 6372 | # Test that a box's fail2ban setting are working
# correctly by attempting a bunch of failed logins.
#
# Specify a SSH login command (which we use to reset
# fail2ban after each test) and the hostname to
# try to log in to.
######################################################################
import sys, os, time, functools
# parse command line
if len(sys.argv) != 4:
print("Usage: tests/fail2ban.py \"ssh user@hostname\" hostname owncloud_user")
sys.exit(1)
ssh_command, hostname, owncloud_user = sys.argv[1:4]
# define some test types
import socket
socket.setdefaulttimeout(10)
class IsBlocked(Exception):
"""Tests raise this exception when it appears that a fail2ban
jail is in effect, i.e. on a connection refused error."""
pass
def smtp_test():
import smtplib
try:
server = smtplib.SMTP(hostname, 587)
except ConnectionRefusedError:
# looks like fail2ban worked
raise IsBlocked()
server.starttls()
server.ehlo_or_helo_if_needed()
try:
server.login("fakeuser", "fakepassword")
raise Exception("authentication didn't fail")
except smtplib.SMTPAuthenticationError:
# athentication should fail
pass
try:
server.quit()
except:
# ignore errors here
pass
def imap_test():
import imaplib
try:
M = imaplib.IMAP4_SSL(hostname)
except ConnectionRefusedError:
# looks like fail2ban worked
raise IsBlocked()
try:
M.login("fakeuser", "fakepassword")
raise Exception("authentication didn't fail")
except imaplib.IMAP4.error:
# authentication should fail
pass
finally:
M.logout() # shuts down connection, has nothing to do with login()
def pop_test():
import poplib
try:
M = poplib.POP3_SSL(hostname)
except ConnectionRefusedError:
# looks like fail2ban worked
raise IsBlocked()
try:
M.user('fakeuser')
try:
M.pass_('fakepassword')
except poplib.error_proto as e:
# Authentication should fail.
M = None # don't .quit()
return
M.list()
raise Exception("authentication didn't fail")
finally:
if M:
M.quit()
def managesieve_test():
# We don't have a Python sieve client, so we'll
# just run the IMAP client and see what happens.
import imaplib
try:
M = imaplib.IMAP4(hostname, 4190)
except ConnectionRefusedError:
# looks like fail2ban worked
raise IsBlocked()
try:
M.login("fakeuser", "fakepassword")
raise Exception("authentication didn't fail")
except imaplib.IMAP4.error:
# authentication should fail
pass
finally:
M.logout() # shuts down connection, has nothing to do with login()
def http_test(url, expected_status, postdata=None, qsargs=None, auth=None):
import urllib.parse
import requests
from requests.auth import HTTPBasicAuth
# form request
url = urllib.parse.urljoin("https://" + hostname, url)
if qsargs: url += "?" + urllib.parse.urlencode(qsargs)
urlopen = requests.get if not postdata else requests.post
try:
# issue request
r = urlopen(
url,
auth=HTTPBasicAuth(*auth) if auth else None,
data=postdata,
headers={'User-Agent': 'Mail-in-a-Box fail2ban tester'},
timeout=8,
verify=False) # don't bother with HTTPS validation, it may not be configured yet
except requests.exceptions.ConnectTimeout as e:
raise IsBlocked()
except requests.exceptions.ConnectionError as e:
if "Connection refused" in str(e):
raise IsBlocked()
raise # some other unexpected condition
# return response status code
if r.status_code != expected_status:
r.raise_for_status() # anything but 200
raise IOError("Got unexpected status code %s." % r.status_code)
# define how to run a test
def restart_fail2ban_service(final=False):
# Log in over SSH to restart fail2ban.
command = "sudo fail2ban-client reload"
if not final:
# Stop recidive jails during testing.
command += " && sudo fail2ban-client stop recidive"
os.system("%s \"%s\"" % (ssh_command, command))
def testfunc_runner(i, testfunc, *args):
print(i+1, end=" ", flush=True)
testfunc(*args)
def run_test(testfunc, args, count, within_seconds, parallel):
# Run testfunc count times in within_seconds seconds (and actually
# within a little less time so we're sure we're under the limit).
#
# Because some services are slow, like IMAP, we can't necessarily
# run testfunc sequentially and still get to count requests within
# the required time. So we split the requests across threads.
import requests.exceptions
from multiprocessing import Pool
restart_fail2ban_service()
# Log.
print(testfunc.__name__, " ".join(str(a) for a in args), "...")
# Record the start time so we can know how to evenly space our
# calls to testfunc.
start_time = time.time()
with Pool(parallel) as p:
# Distribute the requests across the pool.
asyncresults = []
for i in range(count):
ar = p.apply_async(testfunc_runner, [i, testfunc] + list(args))
asyncresults.append(ar)
# Wait for all runs to finish.
p.close()
p.join()
# Check for errors.
for ar in asyncresults:
try:
ar.get()
except IsBlocked:
print("Test machine prematurely blocked!")
return False
# Did we make enough requests within the limit?
if (time.time()-start_time) > within_seconds:
raise Exception("Test failed to make %s requests in %d seconds." % (count, within_seconds))
# Wait a moment for the block to be put into place.
time.sleep(4)
# The next call should fail.
print("*", end=" ", flush=True)
try:
testfunc(*args)
except IsBlocked:
# Success -- this one is supposed to be refused.
print("blocked [OK]")
return True # OK
print("not blocked!")
return False
######################################################################
if __name__ == "__main__":
# run tests
# SMTP bans at 10 even though we say 20 in the config because we get
# doubled-up warnings in the logs, we'll let that be for now
run_test(smtp_test, [], 10, 30, 8)
# IMAP
run_test(imap_test, [], 20, 30, 4)
# POP
run_test(pop_test, [], 20, 30, 4)
# Managesieve
run_test(managesieve_test, [], 20, 30, 4)
# Mail-in-a-Box control panel
run_test(http_test, ["/admin/login", 200], 20, 30, 1)
# Munin via the Mail-in-a-Box control panel
run_test(http_test, ["/admin/munin/", 401], 20, 30, 1)
# ownCloud
run_test(http_test, ["/cloud/remote.php/webdav", 401, None, None, [owncloud_user, "aa"]], 20, 120, 1)
# restart fail2ban so that this client machine is no longer blocked
restart_fail2ban_service(final=True)
| cc0-1.0 |
mail-in-a-box/mailinabox | management/auth.py | 1 | 6049 | import base64, os, os.path, hmac, json, secrets
from datetime import timedelta
from expiringdict import ExpiringDict
import utils
from mailconfig import get_mail_password, get_mail_user_privileges
from mfa import get_hash_mfa_state, validate_auth_mfa
DEFAULT_KEY_PATH = '/var/lib/mailinabox/api.key'
DEFAULT_AUTH_REALM = 'Mail-in-a-Box Management Server'
class AuthService:
def __init__(self):
self.auth_realm = DEFAULT_AUTH_REALM
self.key_path = DEFAULT_KEY_PATH
self.max_session_duration = timedelta(days=2)
self.init_system_api_key()
self.sessions = ExpiringDict(max_len=64, max_age_seconds=self.max_session_duration.total_seconds())
def init_system_api_key(self):
"""Write an API key to a local file so local processes can use the API"""
with open(self.key_path, 'r') as file:
self.key = file.read()
def authenticate(self, request, env, login_only=False, logout=False):
"""Test if the HTTP Authorization header's username matches the system key, a session key,
or if the username/password passed in the header matches a local user.
Returns a tuple of the user's email address and list of user privileges (e.g.
('my@email', []) or ('my@email', ['admin']); raises a ValueError on login failure.
If the user used the system API key, the user's email is returned as None since
this key is not associated with a user."""
def parse_http_authorization_basic(header):
def decode(s):
return base64.b64decode(s.encode('ascii')).decode('ascii')
if " " not in header:
return None, None
scheme, credentials = header.split(maxsplit=1)
if scheme != 'Basic':
return None, None
credentials = decode(credentials)
if ":" not in credentials:
return None, None
username, password = credentials.split(':', maxsplit=1)
return username, password
username, password = parse_http_authorization_basic(request.headers.get('Authorization', ''))
if username in (None, ""):
raise ValueError("Authorization header invalid.")
if username.strip() == "" and password.strip() == "":
raise ValueError("No email address, password, session key, or API key provided.")
# If user passed the system API key, grant administrative privs. This key
# is not associated with a user.
if username == self.key and not login_only:
return (None, ["admin"])
# If the password corresponds with a session token for the user, grant access for that user.
if self.get_session(username, password, "login", env) and not login_only:
sessionid = password
session = self.sessions[sessionid]
if logout:
# Clear the session.
del self.sessions[sessionid]
else:
# Re-up the session so that it does not expire.
self.sessions[sessionid] = session
# If no password was given, but a username was given, we're missing some information.
elif password.strip() == "":
raise ValueError("Enter a password.")
else:
# The user is trying to log in with a username and a password
# (and possibly a MFA token). On failure, an exception is raised.
self.check_user_auth(username, password, request, env)
# Get privileges for authorization. This call should never fail because by this
# point we know the email address is a valid user --- unless the user has been
# deleted after the session was granted. On error the call will return a tuple
# of an error message and an HTTP status code.
privs = get_mail_user_privileges(username, env)
if isinstance(privs, tuple): raise ValueError(privs[0])
# Return the authorization information.
return (username, privs)
def check_user_auth(self, email, pw, request, env):
# Validate a user's login email address and password. If MFA is enabled,
# check the MFA token in the X-Auth-Token header.
#
# On login failure, raises a ValueError with a login error message. On
# success, nothing is returned.
# Authenticate.
try:
# Get the hashed password of the user. Raise a ValueError if the
# email address does not correspond to a user. But wrap it in the
# same exception as if a password fails so we don't easily reveal
# if an email address is valid.
pw_hash = get_mail_password(email, env)
# Use 'doveadm pw' to check credentials. doveadm will return
# a non-zero exit status if the credentials are no good,
# and check_call will raise an exception in that case.
utils.shell('check_call', [
"/usr/bin/doveadm", "pw",
"-p", pw,
"-t", pw_hash,
])
except:
# Login failed.
raise ValueError("Incorrect email address or password.")
# If MFA is enabled, check that MFA passes.
status, hints = validate_auth_mfa(email, request, env)
if not status:
# Login valid. Hints may have more info.
raise ValueError(",".join(hints))
def create_user_password_state_token(self, email, env):
# Create a token that changes if the user's password or MFA options change
# so that sessions become invalid if any of that information changes.
msg = get_mail_password(email, env).encode("utf8")
# Add to the message the current MFA state, which is a list of MFA information.
# Turn it into a string stably.
msg += b" " + json.dumps(get_hash_mfa_state(email, env), sort_keys=True).encode("utf8")
# Make a HMAC using the system API key as a hash key.
hash_key = self.key.encode('ascii')
return hmac.new(hash_key, msg, digestmod="sha256").hexdigest()
def create_session_key(self, username, env, type=None):
# Create a new session.
token = secrets.token_hex(32)
self.sessions[token] = {
"email": username,
"password_token": self.create_user_password_state_token(username, env),
"type": type,
}
return token
def get_session(self, user_email, session_key, session_type, env):
if session_key not in self.sessions: return None
session = self.sessions[session_key]
if session_type == "login" and session["email"] != user_email: return None
if session["type"] != session_type: return None
if session["password_token"] != self.create_user_password_state_token(session["email"], env): return None
return session
| cc0-1.0 |
mcedit/pymclevel | minecraft_server.py | 3 | 20215 | import atexit
import itertools
import logging
import os
from os.path import dirname, join, basename
import random
import re
import shutil
import subprocess
import sys
import tempfile
import time
import urllib
import infiniteworld
from mclevelbase import appSupportDir, exhaust, ChunkNotPresent
log = logging.getLogger(__name__)
__author__ = 'Rio'
# Thank you, Stackoverflow
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(f):
return os.path.exists(f) and os.access(f, os.X_OK)
fpath, _fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
if sys.platform == "win32":
if "SYSTEMROOT" in os.environ:
root = os.environ["SYSTEMROOT"]
exe_file = os.path.join(root, program)
if is_exe(exe_file):
return exe_file
if "PATH" in os.environ:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
class ServerJarStorage(object):
defaultCacheDir = os.path.join(appSupportDir, u"ServerJarStorage")
def __init__(self, cacheDir=None):
if cacheDir is None:
cacheDir = self.defaultCacheDir
self.cacheDir = cacheDir
if not os.path.exists(self.cacheDir):
os.makedirs(self.cacheDir)
readme = os.path.join(self.cacheDir, "README.TXT")
if not os.path.exists(readme):
with file(readme, "w") as f:
f.write("""
About this folder:
This folder is used by MCEdit and pymclevel to store different versions of the
Minecraft Server to use for terrain generation. It should have one or more
subfolders, one for each version of the server. Each subfolder must hold at
least one file named minecraft_server.jar, and the subfolder's name should
have the server's version plus the names of any installed mods.
There may already be a subfolder here (for example, "Beta 1.7.3") if you have
used the Chunk Create feature in MCEdit to create chunks using the server.
Version numbers can be automatically detected. If you place one or more
minecraft_server.jar files in this folder, they will be placed automatically
into well-named subfolders the next time you run MCEdit. If a file's name
begins with "minecraft_server" and ends with ".jar", it will be detected in
this way.
""")
self.reloadVersions()
def reloadVersions(self):
cacheDirList = os.listdir(self.cacheDir)
self.versions = list(reversed(sorted([v for v in cacheDirList if os.path.exists(self.jarfileForVersion(v))], key=alphanum_key)))
if MCServerChunkGenerator.javaExe:
for f in cacheDirList:
p = os.path.join(self.cacheDir, f)
if f.startswith("minecraft_server") and f.endswith(".jar") and os.path.isfile(p):
print "Unclassified minecraft_server.jar found in cache dir. Discovering version number..."
self.cacheNewVersion(p)
os.remove(p)
print "Minecraft_Server.jar storage initialized."
print u"Each server is stored in a subdirectory of {0} named with the server's version number".format(self.cacheDir)
print "Cached servers: ", self.versions
def downloadCurrentServer(self):
print "Downloading the latest Minecraft Server..."
try:
(filename, headers) = urllib.urlretrieve("http://www.minecraft.net/download/minecraft_server.jar")
except Exception, e:
print "Error downloading server: {0!r}".format(e)
return
self.cacheNewVersion(filename, allowDuplicate=False)
def cacheNewVersion(self, filename, allowDuplicate=True):
""" Finds the version number from the server jar at filename and copies
it into the proper subfolder of the server jar cache folder"""
version = MCServerChunkGenerator._serverVersionFromJarFile(filename)
print "Found version ", version
versionDir = os.path.join(self.cacheDir, version)
i = 1
newVersionDir = versionDir
while os.path.exists(newVersionDir):
if not allowDuplicate:
return
newVersionDir = versionDir + " (" + str(i) + ")"
i += 1
os.mkdir(newVersionDir)
shutil.copy2(filename, os.path.join(newVersionDir, "minecraft_server.jar"))
if version not in self.versions:
self.versions.append(version)
def jarfileForVersion(self, v):
return os.path.join(self.cacheDir, v, "minecraft_server.jar").encode(sys.getfilesystemencoding())
def checksumForVersion(self, v):
jf = self.jarfileForVersion(v)
with file(jf, "rb") as f:
import hashlib
return hashlib.md5(f.read()).hexdigest()
broken_versions = ["Beta 1.9 Prerelease {0}".format(i) for i in (1, 2, 3)]
@property
def latestVersion(self):
if len(self.versions) == 0:
return None
return max((v for v in self.versions if v not in self.broken_versions), key=alphanum_key)
def getJarfile(self, version=None):
if len(self.versions) == 0:
print "No servers found in cache."
self.downloadCurrentServer()
version = version or self.latestVersion
if version not in self.versions:
return None
return self.jarfileForVersion(version)
class JavaNotFound(RuntimeError):
pass
class VersionNotFound(RuntimeError):
pass
def readProperties(filename):
if not os.path.exists(filename):
return {}
with file(filename) as f:
properties = dict((line.split("=", 2) for line in (l.strip() for l in f) if not line.startswith("#")))
return properties
def saveProperties(filename, properties):
with file(filename, "w") as f:
for k, v in properties.iteritems():
f.write("{0}={1}\n".format(k, v))
def findJava():
if sys.platform == "win32":
javaExe = which("java.exe")
if javaExe is None:
KEY_NAME = "HKLM\SOFTWARE\JavaSoft\Java Runtime Environment"
try:
p = subprocess.Popen(["REG", "QUERY", KEY_NAME, "/v", "CurrentVersion"], stdout=subprocess.PIPE, universal_newlines=True)
o, e = p.communicate()
lines = o.split("\n")
for l in lines:
l = l.strip()
if l.startswith("CurrentVersion"):
words = l.split(None, 2)
version = words[-1]
p = subprocess.Popen(["REG", "QUERY", KEY_NAME + "\\" + version, "/v", "JavaHome"], stdout=subprocess.PIPE, universal_newlines=True)
o, e = p.communicate()
lines = o.split("\n")
for l in lines:
l = l.strip()
if l.startswith("JavaHome"):
w = l.split(None, 2)
javaHome = w[-1]
javaExe = os.path.join(javaHome, "bin", "java.exe")
print "RegQuery: java.exe found at ", javaExe
break
except Exception, e:
print "Error while locating java.exe using the Registry: ", repr(e)
else:
javaExe = which("java")
return javaExe
class MCServerChunkGenerator(object):
"""Generates chunks using minecraft_server.jar. Uses a ServerJarStorage to
store different versions of minecraft_server.jar in an application support
folder.
from pymclevel import *
Example usage:
gen = MCServerChunkGenerator() # with no arguments, use the newest
# server version in the cache, or download
# the newest one automatically
level = loadWorldNamed("MyWorld")
gen.generateChunkInLevel(level, 12, 24)
Using an older version:
gen = MCServerChunkGenerator("Beta 1.6.5")
"""
defaultJarStorage = None
javaExe = findJava()
jarStorage = None
tempWorldCache = {}
def __init__(self, version=None, jarfile=None, jarStorage=None):
self.jarStorage = jarStorage or self.getDefaultJarStorage()
if self.javaExe is None:
raise JavaNotFound("Could not find java. Please check that java is installed correctly. (Could not find java in your PATH environment variable.)")
if jarfile is None:
jarfile = self.jarStorage.getJarfile(version)
if jarfile is None:
raise VersionNotFound("Could not find minecraft_server.jar for version {0}. Please make sure that a minecraft_server.jar is placed under {1} in a subfolder named after the server's version number.".format(version or "(latest)", self.jarStorage.cacheDir))
self.serverJarFile = jarfile
self.serverVersion = version or self._serverVersion()
@classmethod
def getDefaultJarStorage(cls):
if cls.defaultJarStorage is None:
cls.defaultJarStorage = ServerJarStorage()
return cls.defaultJarStorage
@classmethod
def clearWorldCache(cls):
cls.tempWorldCache = {}
for tempDir in os.listdir(cls.worldCacheDir):
t = os.path.join(cls.worldCacheDir, tempDir)
if os.path.isdir(t):
shutil.rmtree(t)
def createReadme(self):
readme = os.path.join(self.worldCacheDir, "README.TXT")
if not os.path.exists(readme):
with file(readme, "w") as f:
f.write("""
About this folder:
This folder is used by MCEdit and pymclevel to cache levels during terrain
generation. Feel free to delete it for any reason.
""")
worldCacheDir = os.path.join(tempfile.gettempdir(), "pymclevel_MCServerChunkGenerator")
def tempWorldForLevel(self, level):
# tempDir = tempfile.mkdtemp("mclevel_servergen")
tempDir = os.path.join(self.worldCacheDir, self.jarStorage.checksumForVersion(self.serverVersion), str(level.RandomSeed))
propsFile = os.path.join(tempDir, "server.properties")
properties = readProperties(propsFile)
tempWorld = self.tempWorldCache.get((self.serverVersion, level.RandomSeed))
if tempWorld is None:
if not os.path.exists(tempDir):
os.makedirs(tempDir)
self.createReadme()
worldName = "world"
worldName = properties.setdefault("level-name", worldName)
tempWorldDir = os.path.join(tempDir, worldName)
tempWorld = infiniteworld.MCInfdevOldLevel(tempWorldDir, create=True, random_seed=level.RandomSeed)
tempWorld.close()
tempWorldRO = infiniteworld.MCInfdevOldLevel(tempWorldDir, readonly=True)
self.tempWorldCache[self.serverVersion, level.RandomSeed] = tempWorldRO
if level.dimNo == 0:
properties["allow-nether"] = "false"
else:
tempWorld = tempWorld.getDimension(level.dimNo)
properties["allow-nether"] = "true"
properties["server-port"] = int(32767 + random.random() * 32700)
saveProperties(propsFile, properties)
return tempWorld, tempDir
def generateAtPosition(self, tempWorld, tempDir, cx, cz):
return exhaust(self.generateAtPositionIter(tempWorld, tempDir, cx, cz))
def generateAtPositionIter(self, tempWorld, tempDir, cx, cz, simulate=False):
tempWorldRW = infiniteworld.MCInfdevOldLevel(tempWorld.filename)
tempWorldRW.setPlayerSpawnPosition((cx * 16, 64, cz * 16))
tempWorldRW.saveInPlace()
tempWorldRW.close()
del tempWorldRW
tempWorld.unload()
startTime = time.time()
proc = self.runServer(tempDir)
while proc.poll() is None:
line = proc.stdout.readline().strip()
log.info(line)
yield line
# Forge and FML change stderr output, causing MCServerChunkGenerator to wait endlessly.
#
# Vanilla:
# 2012-11-13 11:29:19 [INFO] Done (9.962s)!
#
# Forge/FML:
# 2012-11-13 11:47:13 [INFO] [Minecraft] Done (8.020s)!
if "INFO" in line and "Done" in line:
if simulate:
duration = time.time() - startTime
simSeconds = max(8, int(duration) + 1)
for i in range(simSeconds):
# process tile ticks
yield "%2d/%2d: Simulating the world for a little bit..." % (i, simSeconds)
time.sleep(1)
proc.stdin.write("stop\n")
proc.wait()
break
if "FAILED TO BIND" in line:
proc.kill()
proc.wait()
raise RuntimeError("Server failed to bind to port!")
stdout, _ = proc.communicate()
if "Could not reserve enough space" in stdout and not MCServerChunkGenerator.lowMemory:
MCServerChunkGenerator.lowMemory = True
for i in self.generateAtPositionIter(tempWorld, tempDir, cx, cz):
yield i
(tempWorld.parentWorld or tempWorld).loadLevelDat() # reload version number
def copyChunkAtPosition(self, tempWorld, level, cx, cz):
if level.containsChunk(cx, cz):
return
try:
tempChunkBytes = tempWorld._getChunkBytes(cx, cz)
except ChunkNotPresent, e:
raise ChunkNotPresent, "While generating a world in {0} using server {1} ({2!r})".format(tempWorld, self.serverJarFile, e), sys.exc_info()[2]
level.worldFolder.saveChunk(cx, cz, tempChunkBytes)
level._allChunks = None
def generateChunkInLevel(self, level, cx, cz):
assert isinstance(level, infiniteworld.MCInfdevOldLevel)
tempWorld, tempDir = self.tempWorldForLevel(level)
self.generateAtPosition(tempWorld, tempDir, cx, cz)
self.copyChunkAtPosition(tempWorld, level, cx, cz)
minRadius = 5
maxRadius = 20
def createLevel(self, level, box, simulate=False, **kw):
return exhaust(self.createLevelIter(level, box, simulate, **kw))
def createLevelIter(self, level, box, simulate=False, **kw):
if isinstance(level, basestring):
filename = level
level = infiniteworld.MCInfdevOldLevel(filename, create=True, **kw)
assert isinstance(level, infiniteworld.MCInfdevOldLevel)
minRadius = self.minRadius
genPositions = list(itertools.product(
xrange(box.mincx, box.maxcx, minRadius * 2),
xrange(box.mincz, box.maxcz, minRadius * 2)))
for i, (cx, cz) in enumerate(genPositions):
log.info("Generating at %s" % ((cx, cz),))
parentDir = dirname(os.path.abspath(level.worldFolder.filename))
propsFile = join(parentDir, "server.properties")
props = readProperties(join(dirname(self.serverJarFile), "server.properties"))
props["level-name"] = basename(level.worldFolder.filename)
props["server-port"] = int(32767 + random.random() * 32700)
saveProperties(propsFile, props)
for p in self.generateAtPositionIter(level, parentDir, cx, cz, simulate):
yield i, len(genPositions), p
level.close()
def generateChunksInLevel(self, level, chunks):
return exhaust(self.generateChunksInLevelIter(level, chunks))
def generateChunksInLevelIter(self, level, chunks, simulate=False):
tempWorld, tempDir = self.tempWorldForLevel(level)
startLength = len(chunks)
minRadius = self.minRadius
maxRadius = self.maxRadius
chunks = set(chunks)
while len(chunks):
length = len(chunks)
centercx, centercz = chunks.pop()
chunks.add((centercx, centercz))
# assume the generator always generates at least an 11x11 chunk square.
centercx += minRadius
centercz += minRadius
# boxedChunks = [cPos for cPos in chunks if inBox(cPos)]
print "Generating {0} chunks out of {1} starting from {2}".format("XXX", len(chunks), (centercx, centercz))
yield startLength - len(chunks), startLength
# chunks = [c for c in chunks if not inBox(c)]
for p in self.generateAtPositionIter(tempWorld, tempDir, centercx, centercz, simulate):
yield startLength - len(chunks), startLength, p
i = 0
for cx, cz in itertools.product(
xrange(centercx - maxRadius, centercx + maxRadius),
xrange(centercz - maxRadius, centercz + maxRadius)):
if level.containsChunk(cx, cz):
chunks.discard((cx, cz))
elif ((cx, cz) in chunks
and all(tempWorld.containsChunk(ncx, ncz) for ncx, ncz in itertools.product(xrange(cx-1, cx+2), xrange(cz-1, cz+2)))
):
self.copyChunkAtPosition(tempWorld, level, cx, cz)
i += 1
chunks.discard((cx, cz))
yield startLength - len(chunks), startLength
if length == len(chunks):
print "No chunks were generated. Aborting."
break
level.saveInPlace()
def runServer(self, startingDir):
if isinstance(startingDir, unicode):
startingDir = startingDir.encode(sys.getfilesystemencoding())
return self._runServer(startingDir, self.serverJarFile)
lowMemory = False
@classmethod
def _runServer(cls, startingDir, jarfile):
log.info("Starting server %s in %s", jarfile, startingDir)
if cls.lowMemory:
memflags = []
else:
memflags = ["-Xmx1024M", "-Xms1024M", ]
proc = subprocess.Popen([cls.javaExe, "-Djava.awt.headless=true"] + memflags + ["-jar", jarfile],
executable=cls.javaExe,
cwd=startingDir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
atexit.register(proc.terminate)
return proc
def _serverVersion(self):
return self._serverVersionFromJarFile(self.serverJarFile)
@classmethod
def _serverVersionFromJarFile(cls, jarfile):
tempdir = tempfile.mkdtemp("mclevel_servergen")
proc = cls._runServer(tempdir, jarfile)
version = "Unknown"
# out, err = proc.communicate()
# for line in err.split("\n"):
while proc.poll() is None:
line = proc.stdout.readline()
if "Preparing start region" in line:
break
if "Starting minecraft server version" in line:
version = line.split("Starting minecraft server version")[1].strip()
break
if proc.returncode is None:
try:
proc.kill()
except WindowsError:
pass # access denied, process already terminated
proc.wait()
shutil.rmtree(tempdir)
if ";)" in version:
version = version.replace(";)", "") # Damnit, Jeb!
# Versions like "0.2.1" are alphas, and versions like "1.0.0" without "Beta" are releases
if version[0] == "0":
version = "Alpha " + version
try:
if int(version[0]) > 0:
version = "Release " + version
except ValueError:
pass
return version
| isc |
mcedit/pymclevel | block_fill.py | 3 | 3454 | import logging
import materials
log = logging.getLogger(__name__)
import numpy
from mclevelbase import exhaust
import blockrotation
from entity import TileEntity
def blockReplaceTable(blocksToReplace):
blocktable = numpy.zeros((materials.id_limit, 16), dtype='bool')
for b in blocksToReplace:
if b.hasVariants:
blocktable[b.ID, b.blockData] = True
else:
blocktable[b.ID] = True
return blocktable
def fillBlocks(level, box, blockInfo, blocksToReplace=()):
return exhaust(level.fillBlocksIter(box, blockInfo, blocksToReplace))
def fillBlocksIter(level, box, blockInfo, blocksToReplace=()):
if box is None:
chunkIterator = level.getAllChunkSlices()
box = level.bounds
else:
chunkIterator = level.getChunkSlices(box)
# shouldRetainData = (not blockInfo.hasVariants and not any([b.hasVariants for b in blocksToReplace]))
# if shouldRetainData:
# log.info( "Preserving data bytes" )
shouldRetainData = False # xxx old behavior overwrote blockdata with 0 when e.g. replacing water with lava
log.info("Replacing {0} with {1}".format(blocksToReplace, blockInfo))
changesLighting = True
blocktable = None
if len(blocksToReplace):
blocktable = blockReplaceTable(blocksToReplace)
shouldRetainData = all([blockrotation.SameRotationType(blockInfo, b) for b in blocksToReplace])
newAbsorption = level.materials.lightAbsorption[blockInfo.ID]
oldAbsorptions = [level.materials.lightAbsorption[b.ID] for b in blocksToReplace]
changesLighting = False
for a in oldAbsorptions:
if a != newAbsorption:
changesLighting = True
newEmission = level.materials.lightEmission[blockInfo.ID]
oldEmissions = [level.materials.lightEmission[b.ID] for b in blocksToReplace]
for a in oldEmissions:
if a != newEmission:
changesLighting = True
i = 0
skipped = 0
replaced = 0
for (chunk, slices, point) in chunkIterator:
i += 1
if i % 100 == 0:
log.info(u"Chunk {0}...".format(i))
yield i, box.chunkCount
blocks = chunk.Blocks[slices]
data = chunk.Data[slices]
mask = slice(None)
needsLighting = changesLighting
if blocktable is not None:
mask = blocktable[blocks, data]
blockCount = mask.sum()
replaced += blockCount
# don't waste time relighting and copying if the mask is empty
if blockCount:
blocks[:][mask] = blockInfo.ID
if not shouldRetainData:
data[mask] = blockInfo.blockData
else:
skipped += 1
needsLighting = False
def include(tileEntity):
p = TileEntity.pos(tileEntity)
x, y, z = map(lambda a, b, c: (a - b) - c, p, point, box.origin)
return not ((p in box) and mask[x, z, y])
chunk.TileEntities[:] = filter(include, chunk.TileEntities)
else:
blocks[:] = blockInfo.ID
if not shouldRetainData:
data[:] = blockInfo.blockData
chunk.removeTileEntitiesInBox(box)
chunk.chunkChanged(needsLighting)
if len(blocksToReplace):
log.info(u"Replace: Skipped {0} chunks, replaced {1} blocks".format(skipped, replaced))
| isc |
mcedit/mcedit | filters/CreateSpawners.py | 1 | 1386 | # Feel free to modify and use this filter however you wish. If you do,
# please give credit to SethBling.
# http://youtube.com/SethBling
from pymclevel import TAG_Compound
from pymclevel import TAG_Int
from pymclevel import TAG_Short
from pymclevel import TAG_Byte
from pymclevel import TAG_String
from pymclevel import TAG_Float
from pymclevel import TAG_Double
from pymclevel import TAG_List
from pymclevel import TileEntity
displayName = "Create Spawners"
inputs = (
("Include position data", False),
)
def perform(level, box, options):
includePos = options["Include position data"]
entitiesToRemove = []
for (chunk, slices, point) in level.getChunkSlices(box):
for entity in chunk.Entities:
x = int(entity["Pos"][0].value)
y = int(entity["Pos"][1].value)
z = int(entity["Pos"][2].value)
if x >= box.minx and x < box.maxx and y >= box.miny and y < box.maxy and z >= box.minz and z < box.maxz:
entitiesToRemove.append((chunk, entity))
level.setBlockAt(x, y, z, 52)
spawner = TileEntity.Create("MobSpawner")
TileEntity.setpos(spawner, (x, y, z))
spawner["Delay"] = TAG_Short(120)
spawner["SpawnData"] = entity
if not includePos:
del spawner["SpawnData"]["Pos"]
spawner["EntityId"] = entity["id"]
chunk.TileEntities.append(spawner)
for (chunk, entity) in entitiesToRemove:
chunk.Entities.remove(entity)
| isc |
mcedit/mcedit | filters/surfacerepair.py | 1 | 2001 |
from numpy import zeros, array
import itertools
#naturally occuring materials
from pymclevel.level import extractHeights
blocktypes = [1, 2, 3, 7, 12, 13, 14, 15, 16, 56, 73, 74, 87, 88, 89]
blockmask = zeros((256,), dtype='bool')
#compute a truth table that we can index to find out whether a block
# is naturally occuring and should be considered in a heightmap
blockmask[blocktypes] = True
displayName = "Chunk Surface Repair"
inputs = (
("Repairs the backwards surfaces made by old versions of Minecraft.", "label"),
)
def perform(level, box, options):
#iterate through the slices of each chunk in the selection box
for chunk, slices, point in level.getChunkSlices(box):
# slicing the block array is straightforward. blocks will contain only
# the area of interest in this chunk.
blocks = chunk.Blocks
data = chunk.Data
# use indexing to look up whether or not each block in blocks is
# naturally-occuring. these blocks will "count" for column height.
maskedBlocks = blockmask[blocks]
heightmap = extractHeights(maskedBlocks)
for x in range(heightmap.shape[0]):
for z in range(x + 1, heightmap.shape[1]):
h = heightmap[x, z]
h2 = heightmap[z, x]
b2 = blocks[z, x, h2]
if blocks[x, z, h] == 1:
h += 2 # rock surface - top 4 layers become 2 air and 2 rock
if blocks[z, x, h2] == 1:
h2 += 2 # rock surface - top 4 layers become 2 air and 2 rock
# topsoil is 4 layers deep
def swap(s1, s2):
a2 = array(s2)
s2[:] = s1[:]
s1[:] = a2[:]
swap(blocks[x, z, h - 3:h + 1], blocks[z, x, h2 - 3:h2 + 1])
swap(data[x, z, h - 3:h + 1], data[z, x, h2 - 3:h2 + 1])
# remember to do this to make sure the chunk is saved
chunk.chunkChanged()
| isc |
josephmisiti/awesome-machine-learning | scripts/pull_R_packages.py | 1 | 1150 | #!/usr/bin/python
"""
This script will scrape the r-project.org machine learning selection and
format the packages in github markdown style for this
awesome-machine-learning repo.
"""
from pyquery import PyQuery as pq
import urllib
import codecs
import random
text_file = codecs.open("Packages.txt", encoding='utf-8', mode="w")
d = pq(url='http://cran.r-project.org/web/views/MachineLearning.html',
opener=lambda url, **kw: urllib.urlopen(url).read())
for e in d("li").items():
package_name = e("a").html()
package_link = e("a")[0].attrib['href']
if '..' in package_link:
package_link = package_link.replace("..",
'http://cran.r-project.org/web')
dd = pq(url=package_link, opener=lambda url,
**kw: urllib.urlopen(url).read())
package_description = dd("h2").html()
text_file.write(" [%s](%s) - %s \n" % (package_name, package_link,
package_description))
# print("* [%s](%s) - %s" % (package_name,package_link,
# package_description))
| cc0-1.0 |
mozilla-services/tecken | docs/exts/adr_log.py | 1 | 5841 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""Directive for generating an ADR log from a directory of ADRs.
Usage::
.. adrlog:: PATH
.. adrlog:: PATH
:urlroot: https://github.com/mozilla-services/socorro/tree/main/docs/adr
Required parameters:
* PATH: the path relative to the docs/ directory to the ADR directory
Optional parameters:
* urlroot: the absolute url where the ADR files are located
"""
import dataclasses
import os
import os.path
from typing import Dict
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import ViewList
@dataclasses.dataclass(order=True)
class ADR:
adr_id: str
name: str
metadata: Dict[str, str]
def fetch_adr(filepath):
"""Parses an ADR at filepath and returns ADR
:param filepath: path to ADR file in Markdown format
:returns: ADR
"""
with open(filepath) as fp:
source = fp.read()
# NOTE(willkg): I didn't want to require a markdown parser, so this just looks at
# Socorro's ADR log structure which is a header followed by a list of meta
# information
adr_id = os.path.splitext(os.path.basename(filepath))[0]
name = ""
metadata = {}
STATE_DEFAULT, STATE_LIST = range(2)
state = STATE_DEFAULT
for line in source.splitlines():
line = line.rstrip()
if state == STATE_DEFAULT:
if not line:
continue
elif line.startswith("# "):
name = line[2:]
elif line.startswith("- "):
state = STATE_LIST
if ":" not in line:
continue
key, val = line.split(":", 1)
metadata[key[2:].strip()] = val.strip()
if state == STATE_LIST:
if not line:
# If we hit an empty line while parsing the first list, then we're done
# and we can stop parsing
break
if ":" not in line:
continue
key, val = line.split(":", 1)
metadata[key[2:].strip()] = val.strip()
return ADR(adr_id=adr_id, name=name, metadata=metadata)
def fetch_adrs(filepath):
"""Given a filepath to an ADRs directory, returns the log
:param filepath: the filepath to ADR directory
:returns: list of ADRs
"""
adrs = []
for fn in os.listdir(filepath):
if not fn.endswith(".md"):
continue
if fn in ["index.md", "README.md", "template.md"]:
continue
fn = os.path.join(filepath, fn)
adrs.append(fetch_adr(fn))
return adrs
def build_table(table):
"""Generates reST for a table.
:param table: a 2d array of rows and columns
:returns: list of strings
"""
output = []
col_size = [0] * len(table[0])
for row in table:
for i, col in enumerate(row):
col_size[i] = max(col_size[i], len(col))
col_size = [width + 2 for width in col_size]
# Build header
output.append(" ".join("=" * width for width in col_size))
output.append(
" ".join(
header + (" " * (width - len(header)))
for header, width in zip(table[0], col_size)
)
)
output.append(" ".join("=" * width for width in col_size))
# Iterate through rows
for row in table[1:]:
output.append(
" ".join(
col + (" " * (width - len(col)))
for col, width in zip(row, col_size)
)
)
output.append(" ".join("=" * width for width in col_size))
return output
class ADRLogDirective(Directive):
"""Directive for showing an ADR log."""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
"urlroot": directives.unchanged_required,
}
def add_line(self, line, source, *lineno):
"""Add a line to the result"""
self.result.append(line, source, *lineno)
def generate_log(self, filepath, urlroot):
def linkify(adr_id, urlroot):
if urlroot:
return f"`{adr_id} <{urlroot}/{adr_id}.md>`_"
return adr_id
adrs = fetch_adrs(filepath)
adrs.sort(reverse=True) # key=lambda adr: adr.adr_id, reverse=True)
table = [["Date", "ADR id", "Status", "Name", "Deciders"]]
for adr in adrs:
table.append(
[
adr.metadata.get("Date", "Unknown"),
linkify(adr.adr_id, urlroot),
adr.metadata.get("Status", "Unknown"),
adr.name,
adr.metadata.get("Deciders", "Unknown"),
]
)
sourcename = "adrlog %s" % filepath
for line in build_table(table):
self.add_line(line, sourcename)
def run(self):
if "urlroot" in self.options:
urlroot = self.options["urlroot"]
else:
urlroot = ""
self.reporter = self.state.document.reporter
self.result = ViewList()
filepath = os.path.abspath(self.arguments[0]).rstrip("/")
self.generate_log(filepath, urlroot)
if not self.result:
return []
node = nodes.paragraph()
node.document = self.state.document
self.state.nested_parse(self.result, 0, node)
return node.children
def setup(app):
"""Register directive in Sphinx."""
app.add_directive("adrlog", ADRLogDirective)
return {
"version": "1.0",
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| mpl-2.0 |
mozilla-services/tecken | tecken/useradmin/management/commands/is-blocked-in-auth0.py | 1 | 1430 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from urllib.parse import urlparse
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from tecken.librequests import session_with_retries
from tecken.useradmin.middleware import find_users
class Command(BaseCommand):
help = "Find out if a user is blocked in Auth0 on the command line"
def add_arguments(self, parser):
parser.add_argument("email")
def handle(self, *args, **options):
email = options["email"]
if " " in email or email.count("@") != 1:
raise CommandError(f"Invalid email {email!r}")
session = session_with_retries()
users = find_users(
settings.OIDC_RP_CLIENT_ID,
settings.OIDC_RP_CLIENT_SECRET,
urlparse(settings.OIDC_OP_USER_ENDPOINT).netloc,
email,
session,
)
for user in users:
if user.get("blocked"):
self.stdout.write(self.style.ERROR("BLOCKED!"))
else:
self.stdout.write(self.style.SUCCESS("NOT blocked!"))
break
else:
self.stdout.write(
self.style.WARNING(f"{email} could not be found in Auth0")
)
| mpl-2.0 |
mozilla-services/tecken | eliot-service/eliot/health_resource.py | 1 | 1629 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
"""
Application-health related Falcon resources.
"""
import json
from dockerflow.version import get_version
import falcon
import markus
METRICS = markus.get_metrics(__name__)
class BrokenResource:
"""Handle ``/__broken__`` endpoint."""
def on_get(self, req, resp):
"""Implement GET HTTP request."""
METRICS.incr("broken.count")
# This is intentional breakage
raise Exception("intentional exception")
class VersionResource:
"""Handle ``/__version__`` endpoint."""
def __init__(self, basedir):
self.basedir = basedir
def on_get(self, req, resp):
"""Implement GET HTTP request."""
METRICS.incr("version.count")
resp.status = falcon.HTTP_200
resp.text = json.dumps(get_version(self.basedir) or {})
class LBHeartbeatResource:
"""Handle ``/__lbheartbeat__`` to let the load balancing know application health."""
def on_get(self, req, resp):
"""Implement GET HTTP request."""
METRICS.incr("lbheartbeat.count")
resp.content_type = "application/json; charset=utf-8"
resp.status = falcon.HTTP_200
class HeartbeatResource:
"""Handle ``/__heartbeat__`` for app health."""
def on_get(self, req, resp):
"""Implement GET HTTP request."""
METRICS.incr("heartbeat.count")
resp.content_type = "application/json; charset=utf-8"
resp.status = falcon.HTTP_200
| mpl-2.0 |
mozilla-services/tecken | systemtests/bin/make-stacks.py | 1 | 4833 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Fetches processed crash data for given crash ids and generates
# stacks for use with the Symbolication API. This has two modes:
#
# * print: prints the stack for a single crash id to stdout
# * save: saves one or more stacks for specified crash ids to the file
# system
#
# Usage: ./bin/make-stacks.py print [CRASHID]
#
# Usage: ./bin/make-stacks.py save [OUTPUTDIR] [CRASHID] [CRASHID...]
import json
import os
import sys
import click
import requests
PROCESSED_CRASH_API = "https://crash-stats.mozilla.org/api/ProcessedCrash"
def fetch_crash_report(crashid):
"""Fetch processed crash data from crash-stats
:param crashid: the crash id
:returns: processed crash as a dict
"""
headers = {"User-Agent": "tecken-systemtests"}
resp = requests.get(
PROCESSED_CRASH_API, params={"crash_id": crashid}, headers=headers
)
resp.raise_for_status()
return resp.json()
def build_stack(data):
"""Convert processed crash to a Symbolicate API payload
:param data: the processed crash as a dict
:returns: Symbolicate API payload
"""
json_dump = data.get("json_dump") or {}
if not json_dump:
return {}
crashing_thread = json_dump.get("crashing_thread") or {}
if not crashing_thread:
return {}
modules = []
modules_list = []
for module in json_dump.get("modules") or []:
debug_file = module.get("debug_file") or ""
debug_id = module.get("debug_id") or ""
# Add the module information to the map
modules.append((debug_file, debug_id))
# Keep track of which modules are at which index
modules_list.append(module.get("filename") or "unknown")
stack = []
for frame in crashing_thread.get("frames") or []:
if frame.get("module"):
module_index = modules_list.index(frame["module"])
else:
# -1 indicates the module is unknown
module_index = -1
if frame.get("module_offset"):
module_offset = int(frame["module_offset"], base=16)
else:
# -1 indicates the module_offset is unknown
module_offset = -1
stack.append((module_index, module_offset))
return {
"stacks": [stack],
"memoryMap": modules,
# NOTE(willkg): we mark this as version 5 so we can use curl on the
# json files directly
"version": 5,
}
@click.group()
def make_stacks_group():
"""Generate stacks for symbolication from existing processed crash data."""
@make_stacks_group.command("print")
@click.option(
"--pretty/--no-pretty", default=False, help="Whether or not to print it pretty."
)
@click.argument("crashid", nargs=1)
@click.pass_context
def make_stacks_print(ctx, pretty, crashid):
"""Generate a stack from a processed crash and print it to stdout."""
crashid = crashid.strip()
crash_report = fetch_crash_report(crashid)
stack = build_stack(crash_report)
if pretty:
kwargs = {"indent": 2}
else:
kwargs = {}
print(json.dumps(stack, **kwargs))
@make_stacks_group.command("save")
@click.argument("outputdir")
@click.argument("crashids", nargs=-1)
@click.pass_context
def make_stacks_save(ctx, outputdir, crashids):
"""Generate stacks from processed crashes and save to file-system."""
# Handle crash ids from stdin or command line
if not crashids and not sys.stdin.isatty():
crashids = list(click.get_text_stream("stdin").readlines())
if not crashids:
raise click.BadParameter(
"No crashids provided.", ctx=ctx, param="crashids", param_hint="crashids"
)
if not os.path.exists(outputdir):
raise click.BadParameter(
"Outputdir does not exist.",
ctx=ctx,
param="outputdir",
param_hint="outputdir",
)
click.echo(f"Creating stacks and saving them to {outputdir!r} ...")
for crashid in crashids:
crashid = crashid.strip()
if crashid.startswith("#"):
continue
print(f"{crashid} ...")
crash_report = fetch_crash_report(crashid)
try:
data = build_stack(crash_report)
except Exception as exc:
click.echo(f"Exception thrown: {exc!r}")
data = None
if not data or not data["stacks"][0]:
click.echo("Nothing to save.")
continue
with open(os.path.join(outputdir, "%s.json" % crashid), "w") as fp:
json.dump(data, fp, indent=2)
click.echo("Done!")
if __name__ == "__main__":
make_stacks_group()
| mpl-2.0 |
mozilla-services/tecken | tecken/tests/test_libboto.py | 1 | 1241 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import pickle
from tecken.libboto import (
OwnEndpointConnectionError,
OwnClientError,
)
def test_pickle_OwnEndpointConnectionError():
"""test that it's possible to pickle, and unpickle an instance
of a OwnEndpointConnectionError exception class."""
exception = OwnEndpointConnectionError(endpoint_url="http://example.com")
pickled = pickle.dumps(exception)
exception = pickle.loads(pickled)
# They can't be compared, but...
assert exception.msg == exception.msg
assert exception.kwargs == exception.kwargs
assert exception.fmt == exception.fmt
def test_pickle_OwnClientError():
"""test that it's possible to pickle, and unpickle an instance
of a OwnClientError exception class."""
exception = OwnClientError({"Error": {"Code": "123"}}, "PutObject")
pickled = pickle.dumps(exception)
exception = pickle.loads(pickled)
# They can't be compared, but...
assert exception.response == exception.response
assert exception.operation_name == exception.operation_name
| mpl-2.0 |
mozilla-services/tecken | tecken/upload/forms.py | 1 | 4406 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import os
from urllib.parse import urlparse
from requests.exceptions import ConnectionError, RetryError
from django import forms
from django.conf import settings
from tecken.librequests import session_with_retries
class UploadByDownloadRemoteError(Exception):
"""Happens when the upload-by-download URL is failing in a "transient" way.
For example, if the URL (when GET'ing) causes a ConnectionError or if it works
but returns a >=500 error. In those cases, we want to make sure the client
is informed "more strongly" than just getting a "400 Bad Request".
As a note;
See https://dxr.mozilla.org/mozilla-central/rev/423bdf7a802b0d302244492b423609187de39f56/toolkit/crashreporter/tools/upload_symbols.py#116 # noqa
The Taskcluster symbol uploader knows to retry on any 5xx error. That's
meant to reflect 5xx in Tecken. But by carrying the 5xx from the
upload-by-download URL, we're doing them a favor.
"""
class UploadByDownloadForm(forms.Form):
url = forms.URLField()
def clean_url(self):
url = self.cleaned_data["url"]
# The URL has to be https:// to start with
parsed = urlparse(url)
if not settings.ALLOW_UPLOAD_BY_ANY_DOMAIN:
if parsed.scheme != "https":
raise forms.ValidationError("Insecure URL")
self._check_url_domain(url)
return url
@staticmethod
def _check_url_domain(url):
netloc_wo_port = urlparse(url).netloc.split(":")[0]
if not settings.ALLOW_UPLOAD_BY_ANY_DOMAIN:
if netloc_wo_port not in settings.ALLOW_UPLOAD_BY_DOWNLOAD_DOMAINS:
raise forms.ValidationError(
f"Not an allowed domain ({netloc_wo_port!r}) " "to download from."
)
def clean(self):
cleaned_data = super().clean()
if "url" in cleaned_data:
# In the main view code where the download actually happens,
# it'll follow any redirects automatically, but we want to
# do "recursive HEADs" to find out the size of the file.
# It also gives us an opportunity to record the redirect trail.
url = cleaned_data["url"]
parsed = urlparse(url)
response, redirect_urls = self.get_final_response(url)
content_length = response.headers["content-length"]
cleaned_data["upload"] = {
"name": os.path.basename(parsed.path),
"size": int(content_length),
"redirect_urls": redirect_urls,
}
return cleaned_data
@staticmethod
def get_final_response(initial_url, max_redirects=5):
"""return the final response when it 200 OK'ed and a list of URLs
that we had to go through redirects of."""
redirect_urls = [] # the mutable "store"
def get_response(url):
try:
response = session_with_retries().head(url)
status_code = response.status_code
except ConnectionError:
raise UploadByDownloadRemoteError(
f"ConnectionError trying to open {url}"
)
except RetryError:
raise UploadByDownloadRemoteError(f"RetryError trying to open {url}")
if status_code >= 500:
raise UploadByDownloadRemoteError(f"{url} errored ({status_code})")
if status_code >= 400:
raise forms.ValidationError(f"{url} can't be found ({status_code})")
if status_code >= 300 and status_code < 400:
redirect_url = response.headers["location"]
redirect_urls.append(redirect_url)
# Only do this if we haven't done it "too much" yet.
if len(redirect_urls) > max_redirects:
raise forms.ValidationError(
f"Too many redirects trying to open {initial_url}"
)
return get_response(redirect_url)
assert status_code >= 200 and status_code < 300, status_code
return response
final_response = get_response(initial_url)
return final_response, redirect_urls
| mpl-2.0 |
pimutils/todoman | tests/test_ui.py | 1 | 4822 | from datetime import datetime
from unittest import mock
import pytest
import pytz
from freezegun import freeze_time
from urwid import ExitMainLoop
from todoman.interactive import TodoEditor
def test_todo_editor_priority(default_database, todo_factory, default_formatter):
todo = todo_factory(priority=1)
lists = list(default_database.lists())
editor = TodoEditor(todo, lists, default_formatter)
assert editor._priority.label == "high"
editor._priority.keypress(10, "right")
with pytest.raises(ExitMainLoop): # Look at editor._msg_text if this fails
editor._keypress("ctrl s")
assert todo.priority == 0
def test_todo_editor_list(default_database, todo_factory, default_formatter, tmpdir):
tmpdir.mkdir("another_list")
default_database.paths = [
str(tmpdir.join("default")),
str(tmpdir.join("another_list")),
]
default_database.update_cache()
todo = todo_factory()
lists = list(default_database.lists())
editor = TodoEditor(todo, lists, default_formatter)
default_list = next(filter(lambda x: x.label == "default", editor.list_selector))
another_list = next(
filter(lambda x: x.label == "another_list", editor.list_selector)
)
assert editor.current_list == todo.list
assert default_list.label == todo.list.name
another_list.set_state(True)
editor._save_inner()
assert editor.current_list == todo.list
assert another_list.label == todo.list.name
def test_todo_editor_summary(default_database, todo_factory, default_formatter):
todo = todo_factory()
lists = list(default_database.lists())
editor = TodoEditor(todo, lists, default_formatter)
assert editor._summary.edit_text == "YARR!"
editor._summary.edit_text = "Goodbye"
with pytest.raises(ExitMainLoop): # Look at editor._msg_text if this fails
editor._keypress("ctrl s")
assert todo.summary == "Goodbye"
@freeze_time("2017-03-04 14:00:00", tz_offset=4)
def test_todo_editor_due(default_database, todo_factory, default_formatter):
tz = pytz.timezone("CET")
todo = todo_factory(due=datetime(2017, 3, 4, 14))
lists = list(default_database.lists())
default_formatter.tz = tz
editor = TodoEditor(todo, lists, default_formatter)
assert editor._due.edit_text == "2017-03-04 14:00"
editor._due.edit_text = "2017-03-10 12:00"
with pytest.raises(ExitMainLoop): # Look at editor._msg_text if this fails
editor._keypress("ctrl s")
assert todo.due == datetime(2017, 3, 10, 12, tzinfo=tz)
def test_toggle_help(default_database, default_formatter, todo_factory):
todo = todo_factory()
lists = list(default_database.lists())
editor = TodoEditor(todo, lists, default_formatter)
editor._loop = mock.MagicMock()
assert editor._help_text not in editor.left_column.body.contents
editor._keypress("f1")
# Help text is made visible
assert editor._help_text in editor.left_column.body.contents
# Called event_loop.draw_screen
assert editor._loop.draw_screen.call_count == 1
assert editor._loop.draw_screen.call_args == mock.call()
editor._keypress("f1")
# Help text is made visible
assert editor._help_text not in editor.left_column.body.contents
# Called event_loop.draw_screen
assert editor._loop.draw_screen.call_count == 2
assert editor._loop.draw_screen.call_args == mock.call()
def test_show_save_errors(default_database, default_formatter, todo_factory):
todo = todo_factory()
lists = list(default_database.lists())
editor = TodoEditor(todo, lists, default_formatter)
# editor._loop = mock.MagicMock()
editor._due.set_edit_text("not a date")
editor._keypress("ctrl s")
assert (
editor.left_column.body.contents[2].get_text()[0]
== "Time description not recognized: not a date"
)
@pytest.mark.parametrize("completed", [True, False])
@pytest.mark.parametrize("check", [True, False])
def test_save_completed(check, completed, default_formatter, todo_factory):
todo = todo_factory()
if completed:
todo.complete()
editor = TodoEditor(todo, [todo.list], default_formatter)
editor._completed.state = check
with pytest.raises(ExitMainLoop):
editor._keypress("ctrl s")
assert todo.is_completed is check
def test_ctrl_c_clears(default_formatter, todo_factory):
todo = todo_factory()
editor = TodoEditor(todo, [todo.list], default_formatter)
# Simulate that ctrl+c gets pressed, since we can't *really* do that
# trivially inside unit tests.
with mock.patch(
"urwid.main_loop.MainLoop.run", side_effect=KeyboardInterrupt
), mock.patch(
"urwid.main_loop.MainLoop.stop",
) as mocked_stop:
editor.edit()
assert mocked_stop.call_count == 1
| isc |
pimutils/todoman | docs/source/conf.py | 1 | 2282 | #!/usr/bin/env python3
import todoman
from todoman.configuration import CONFIG_SPEC
from todoman.configuration import NO_DEFAULT
# -- Generate confspec.rst ----------------------------------------------
def confspec_rst():
"""Generator that returns lines for the confspec doc page."""
for name, type_, default, description, _validation in sorted(CONFIG_SPEC):
if default == NO_DEFAULT:
formatted_default = "None, this field is mandatory."
elif isinstance(default, str):
formatted_default = f'``"{default}"``'
else:
formatted_default = f"``{default}``"
yield f"\n.. _main-{name}:"
yield f"\n\n.. object:: {name}\n"
yield " " + "\n ".join(line for line in description.splitlines())
yield "\n\n"
if isinstance(type_, tuple):
yield f" :type: {type_[0].__name__}"
else:
yield f" :type: {type_.__name__}"
yield f"\n :default: {formatted_default}\n"
with open("confspec.tmp", "w") as file_:
file_.writelines(confspec_rst())
# -- General configuration ------------------------------------------------
extensions = [
"sphinx_click.ext",
"sphinx.ext.autodoc",
"sphinx_autorun",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_rtd_theme",
]
source_suffix = ".rst"
master_doc = "index"
project = "Todoman"
copyright = "2015-2020, Hugo Osvaldo Barrera"
author = "Hugo Osvaldo Barrera <hugo@barrera.io>, et al"
# The short X.Y version.
version = todoman.__version__
# The full version, including alpha/beta/rc tags.
release = todoman.__version__
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
html_theme = "sphinx_rtd_theme"
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"man",
"todo",
"a simple, standards-based, cli todo manager",
[author],
1,
)
]
| isc |
mozilla-services/autopush | autopush/main.py | 1 | 10560 | """autopush/autoendpoint daemon scripts"""
import os
from argparse import Namespace # noqa
from twisted.application.internet import (
TCPServer,
TimerService,
SSLServer,
StreamServerEndpointService,
)
from twisted.application.service import MultiService
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.protocol import ServerFactory # noqa
from twisted.logger import Logger
from typing import ( # noqa
Any,
Dict,
Optional,
Sequence,
)
from autopush import constants
from autopush.http import (
InternalRouterHTTPFactory,
EndpointHTTPFactory,
MemUsageHTTPFactory,
agent_from_config
)
from autopush.config import AutopushConfig
from autopush.db import DatabaseManager, DynamoDBResource # noqa
from autopush.exceptions import InvalidConfig
from autopush.haproxy import HAProxyServerEndpoint
from autopush.logging import PushLogger
from autopush.main_argparse import parse_connection, parse_endpoint
from autopush.metrics import periodic_reporter
from autopush.router import routers_from_config
from autopush.ssl import (
monkey_patch_ssl_wrap_socket,
undo_monkey_patch_ssl_wrap_socket,
)
from autopush.websocket import (
ConnectionWSSite,
PushServerFactory,
)
from autopush.websocket import PushServerProtocol # noqa
log = Logger()
class AutopushMultiService(MultiService):
shared_config_files = (
'/etc/autopush_shared.ini',
'configs/autopush_shared.ini',
'~/.autopush_shared.ini',
'.autopush_shared.ini',
)
config_files = None # type: Sequence[str]
logger_name = None # type: str
def __init__(self, conf, resource=None):
# type: (AutopushConfig, DynamoDBResource) -> None
super(AutopushMultiService, self).__init__()
self.conf = conf
self.db = DatabaseManager.from_config(conf, resource=resource)
self.agent = agent_from_config(conf)
@staticmethod
def parse_args(config_files, args):
# type: (Sequence[str], Sequence[str]) -> Namespace
"""Parse command line args via argparse"""
raise NotImplementedError # pragma: nocover
def setup(self, rotate_tables=True):
# type: (bool) -> None
"""Initialize the services"""
if not self.conf.no_sslcontext_cache:
monkey_patch_ssl_wrap_socket()
def add_maybe_ssl(self, port, factory, ssl_cf):
# type: (int, ServerFactory, Optional[Any]) -> None
"""Add a Service from factory, optionally behind TLS"""
self.addService(
SSLServer(port, factory, contextFactory=ssl_cf, reactor=reactor)
if ssl_cf else
TCPServer(port, factory, reactor=reactor)
)
def add_timer(self, *args, **kwargs):
"""Add a TimerService"""
self.addService(TimerService(*args, **kwargs))
def add_memusage(self):
"""Add the memusage Service"""
factory = MemUsageHTTPFactory(self.conf, None)
self.addService(
TCPServer(self.conf.memusage_port, factory, reactor=reactor))
def run(self):
"""Start the services and run the reactor"""
reactor.suggestThreadPoolSize(constants.THREAD_POOL_SIZE)
self.startService()
reactor.run()
@inlineCallbacks
def stopService(self):
yield self.agent._pool.closeCachedConnections()
yield super(AutopushMultiService, self).stopService()
if not self.conf.no_sslcontext_cache:
undo_monkey_patch_ssl_wrap_socket()
@classmethod
def _from_argparse(cls, ns, resource=None, **kwargs):
# type: (Namespace, DynamoDBResource, **Any) -> AutopushMultiService
"""Create an instance from argparse/additional kwargs"""
# Add some entropy to prevent potential conflicts.
postfix = os.urandom(4).encode('hex').ljust(8, '0')
conf = AutopushConfig.from_argparse(
ns,
debug=ns.debug,
preflight_uaid="deadbeef00000000deadbeef" + postfix,
**kwargs
)
return cls(conf, resource=resource)
@classmethod
def main(cls, args=None, use_files=True, resource=None):
# type: (Sequence[str], bool, DynamoDBResource) -> Any
"""Entry point to autopush's main command line scripts.
aka autopush/autoendpoint.
"""
ns = cls.parse_args(cls.config_files if use_files else [], args)
PushLogger.setup_logging(
cls.logger_name,
log_level=ns.log_level or ("debug" if ns.debug else "info"),
log_format="text" if ns.human_logs else "json",
log_output=ns.log_output,
sentry_dsn=bool(os.environ.get("SENTRY_DSN")),
firehose_delivery_stream=ns.firehose_stream_name,
no_aws=ns.no_aws
)
try:
app = cls.from_argparse(ns, resource=resource)
except InvalidConfig as e:
log.critical(str(e))
return 1
app.setup()
return app.run()
class EndpointApplication(AutopushMultiService):
"""The autoendpoint application"""
config_files = AutopushMultiService.shared_config_files + (
'/etc/autopush_endpoint.ini',
'configs/autopush_endpoint.ini',
'~/.autopush_endpoint.ini',
'.autopush_endpoint.ini'
)
parse_args = staticmethod(parse_endpoint) # type: ignore
logger_name = "Autoendpoint"
endpoint_factory = EndpointHTTPFactory
def __init__(self, conf, resource=None):
# type: (AutopushConfig, DynamoDBResource) -> None
super(EndpointApplication, self).__init__(conf, resource=resource)
self.routers = routers_from_config(conf, self.db, self.agent)
def setup(self, rotate_tables=True):
super(EndpointApplication, self).setup(rotate_tables)
self.db.setup(self.conf.preflight_uaid)
self.add_endpoint()
if self.conf.memusage_port:
self.add_memusage()
# Start the table rotation checker/updater
if rotate_tables:
self.add_timer(60, self.db.update_rotating_tables)
self.add_timer(15, periodic_reporter, self.db.metrics,
prefix='autoendpoint')
def add_endpoint(self):
"""Start the Endpoint HTTP router"""
conf = self.conf
factory = self.endpoint_factory(conf, self.db, self.routers)
factory.protocol.maxData = conf.max_data
factory.add_health_handlers()
ssl_cf = factory.ssl_cf()
self.add_maybe_ssl(conf.port, factory, ssl_cf)
if conf.proxy_protocol_port:
ep = HAProxyServerEndpoint(
reactor,
conf.proxy_protocol_port,
ssl_cf
)
self.addService(StreamServerEndpointService(ep, factory))
@classmethod
def from_argparse(cls, ns, resource=None):
# type: (Namespace, DynamoDBResource) -> AutopushMultiService
return super(EndpointApplication, cls)._from_argparse(
ns,
port=ns.port,
endpoint_scheme=ns.endpoint_scheme,
endpoint_hostname=ns.endpoint_hostname or ns.hostname,
endpoint_port=ns.endpoint_port,
cors=not ns.no_cors,
bear_hash_key=ns.auth_key,
proxy_protocol_port=ns.proxy_protocol_port,
aws_ddb_endpoint=ns.aws_ddb_endpoint,
resource=resource
)
class ConnectionApplication(AutopushMultiService):
"""The autopush application"""
config_files = AutopushMultiService.shared_config_files + (
'/etc/autopush_connection.ini',
'configs/autopush_connection.ini',
'~/.autopush_connection.ini',
'.autopush_connection.ini'
)
parse_args = staticmethod(parse_connection) # type: ignore
logger_name = "Autopush"
internal_router_factory = InternalRouterHTTPFactory
websocket_factory = PushServerFactory
websocket_site_factory = ConnectionWSSite
def __init__(self, conf, resource=None):
# type: (AutopushConfig, DynamoDBResource) -> None
super(ConnectionApplication, self).__init__(
conf,
resource=resource
)
self.clients = {} # type: Dict[str, PushServerProtocol]
def setup(self, rotate_tables=True):
super(ConnectionApplication, self).setup(rotate_tables)
self.db.setup(self.conf.preflight_uaid)
self.add_internal_router()
if self.conf.memusage_port:
self.add_memusage()
self.add_websocket()
# Start the table rotation checker/updater
if rotate_tables:
self.add_timer(60, self.db.update_rotating_tables)
self.add_timer(15, periodic_reporter, self.db.metrics)
def add_internal_router(self):
"""Start the internal HTTP notification router"""
factory = self.internal_router_factory(
self.conf, self.db, self.clients)
factory.add_health_handlers()
self.add_maybe_ssl(self.conf.router_port, factory, factory.ssl_cf())
def add_websocket(self):
"""Start the public WebSocket server"""
conf = self.conf
ws_factory = self.websocket_factory(conf, self.db, self.agent,
self.clients)
site_factory = self.websocket_site_factory(conf, ws_factory)
self.add_maybe_ssl(conf.port, site_factory, site_factory.ssl_cf())
@classmethod
def from_argparse(cls, ns, resource=None):
# type: (Namespace, DynamoDBResource) -> AutopushMultiService
return super(ConnectionApplication, cls)._from_argparse(
ns,
port=ns.port,
endpoint_scheme=ns.endpoint_scheme,
endpoint_hostname=ns.endpoint_hostname,
endpoint_port=ns.endpoint_port,
router_scheme="https" if ns.router_ssl_key else "http",
router_hostname=ns.router_hostname,
router_port=ns.router_port,
env=ns.env,
hello_timeout=ns.hello_timeout,
router_ssl=dict(
key=ns.router_ssl_key,
cert=ns.router_ssl_cert,
dh_param=ns.ssl_dh_param
),
auto_ping_interval=ns.auto_ping_interval,
auto_ping_timeout=ns.auto_ping_timeout,
max_connections=ns.max_connections,
close_handshake_timeout=ns.close_handshake_timeout,
aws_ddb_endpoint=ns.aws_ddb_endpoint,
resource=resource
)
| mpl-2.0 |
mozilla-services/autopush | autopush/tests/test_web_validation.py | 1 | 40559 | import time
import uuid
import base64
from hashlib import sha256
import ecdsa
from cryptography.fernet import InvalidToken
from cryptography.exceptions import InvalidSignature
from jose import jws, jwk
from marshmallow import Schema, fields
from mock import Mock, patch
import pytest
from twisted.internet.defer import inlineCallbacks
from twisted.trial import unittest
from autopush.metrics import SinkMetrics
from autopush.exceptions import (
InvalidRequest,
InvalidTokenException,
ItemNotFound
)
from autopush.tests.support import test_db
import autopush.utils as utils
dummy_uaid = str(uuid.UUID("abad1dea00000000aabbccdd00000000"))
dummy_chid = str(uuid.UUID("deadbeef00000000decafbad00000000"))
dummy_token = dummy_uaid + ":" + dummy_chid
class InvalidSchema(Schema):
afield = fields.Integer(required=True)
class TestThreadedValidate(unittest.TestCase):
def _make_fut(self, schema):
from autopush.web.base import ThreadedValidate
return ThreadedValidate(schema)
def _make_basic_schema(self):
class Basic(Schema):
pass
return Basic
def _make_dummy_request(self, method="GET", uri="/", **kwargs):
from cyclone.httpserver import HTTPRequest
req = HTTPRequest(method, uri, **kwargs)
req.connection = Mock()
return req
def _make_req_handler(self, request):
self._mock_errors = Mock()
from cyclone.web import RequestHandler
class ValidateRequest(RequestHandler):
def _write_validation_err(rh, errors):
self._mock_errors(errors)
# Minimal mocks needed for a cyclone app to work
app = Mock()
app.ui_modules = dict()
app.ui_methods = dict()
vr = ValidateRequest(app, request)
vr._timings = dict()
vr.conf = Mock()
vr.metrics = Mock()
vr.db = Mock()
vr.routers = Mock()
return vr
def _make_full(self, schema=None):
req = self._make_dummy_request()
if not schema:
schema = self._make_basic_schema()
tv = self._make_fut(schema)
rh = self._make_req_handler(req)
return tv, rh
def test_validate_load(self):
tv, rh = self._make_full()
d, errors = tv._validate_request(rh)
assert errors == {}
assert d == {}
def test_validate_invalid_schema(self):
tv, rh = self._make_full(schema=InvalidSchema)
d, errors = tv._validate_request(rh)
assert "afield" in errors
assert d == {}
def test_call_func_no_error(self):
mock_func = Mock()
tv, rh = self._make_full()
result = tv._validate_request(rh)
tv._call_func(result, mock_func, rh)
mock_func.assert_called()
def test_call_func_error(self):
mock_func = Mock()
tv, rh = self._make_full(schema=InvalidSchema)
result = tv._validate_request(rh)
tv._call_func(result, mock_func, rh)
self._mock_errors.assert_called()
assert len(mock_func.mock_calls) == 0
@inlineCallbacks
def test_decorator(self):
from autopush.http import EndpointHTTPFactory
from autopush.web.base import BaseWebHandler, threaded_validate
from autopush.tests.client import Client
schema = self._make_basic_schema()
class AHandler(BaseWebHandler):
def authenticate_peer_cert(self):
pass
@threaded_validate(schema)
def get(self):
self.write("done")
self.finish()
app = EndpointHTTPFactory(
Mock(),
db=test_db(),
routers=None,
handlers=[('/test', AHandler)]
)
client = Client(app)
resp = yield client.get('/test')
assert resp.content == "done"
class TestWebPushRequestSchema(unittest.TestCase):
def _make_fut(self):
from autopush.web.webpush import WebPushRequestSchema
schema = WebPushRequestSchema()
schema.context.update(
conf=Mock(),
metrics=SinkMetrics(),
db=test_db(),
routers=Mock(),
log=Mock()
)
return schema
def _make_test_data(self, headers=None, body="", path_args=None,
path_kwargs=None, arguments=None):
return dict(
headers=headers or {},
body=body,
path_args=path_args or [],
path_kwargs=path_kwargs or {},
arguments=arguments or {},
)
def test_valid_data(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
router_data=dict(creds=dict(senderID="bogus")),
)
result, errors = schema.load(self._make_test_data())
assert errors == {}
assert "notification" in result
assert str(result["subscription"]["uaid"]) == dummy_uaid
def test_no_headers(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
router_data=dict(creds=dict(senderID="bogus")),
)
data = self._make_test_data(body="asdfasdf")
with pytest.raises(InvalidRequest) as cm:
schema.load(data)
assert cm.value.status_code == 400
assert cm.value.errno == 110
assert str(cm.value) == "Unknown Content-Encoding"
def test_invalid_token(self):
schema = self._make_fut()
def throw_item(*args, **kwargs):
raise InvalidTokenException("Not found")
schema.context["conf"].parse_endpoint.side_effect = throw_item
with pytest.raises(InvalidRequest) as cm:
schema.load(self._make_test_data())
assert cm.value.errno == 102
def test_invalid_fernet_token(self):
schema = self._make_fut()
def throw_item(*args, **kwargs):
raise InvalidToken
schema.context["conf"].parse_endpoint.side_effect = throw_item
with pytest.raises(InvalidRequest) as cm:
schema.load(self._make_test_data())
assert cm.value.errno == 102
def test_invalid_uaid_not_found(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
def throw_item(*args, **kwargs):
raise ItemNotFound("Not found")
schema.context["db"].router.get_uaid.side_effect = throw_item
with pytest.raises(InvalidRequest) as cm:
schema.load(self._make_test_data())
assert cm.value.errno == 103
def test_critical_failure(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="fcm",
critical_failure="Bad SenderID",
)
with pytest.raises(InvalidRequest) as cm:
schema.load(self._make_test_data())
assert cm.value.errno == 105
def test_invalid_header_combo(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
router_data=dict(creds=dict(senderID="bogus")),
)
info = self._make_test_data(
headers={
"content-encoding": "aesgcm128",
"crypto-key": "dh=asdfjialsjdfiasjld",
"encryption-key": "dh=asdfjasidlfjaislf",
},
body="asdfasdf",
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.errno == 110
def test_invalid_header_combo_04(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
router_data=dict(creds=dict(senderID="bogus")),
)
info = self._make_test_data(
headers={
"content-encoding": "aesgcm",
"encryption": "salt=ajisldjfi",
"crypto-key": "dh=asdfjialsjdfiasjld",
"encryption-key": "dh=asdfjasidlfjaislf",
},
body="asdfasdf",
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert str(cm.value) == (
"Encryption-Key header not valid for 02 "
"or later webpush-encryption")
assert cm.value.errno == 110
def test_missing_encryption_salt(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
router_data=dict(creds=dict(senderID="bogus")),
)
info = self._make_test_data(
headers={
"content-encoding": "aesgcm128",
"encryption": "dh=asdfjasidlfjaislf",
"encryption-key": "dh=jilajsidfljasildjf",
},
body="asdfasdf",
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 400
assert cm.value.errno == 110
def test_missing_encryption_salt_04(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
router_data=dict(creds=dict(senderID="bogus")),
)
info = self._make_test_data(
headers={
"content-encoding": "aesgcm",
"encryption": "dh=asdfjasidlfjaislf",
"crypto-key": "dh=jilajsidfljasildjf",
},
body="asdfasdf",
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 400
assert cm.value.errno == 110
def test_missing_encryption_key_dh(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
router_data=dict(creds=dict(senderID="bogus")),
)
info = self._make_test_data(
headers={
"content-encoding": "aesgcm128",
"encryption": "salt=asdfjasidlfjaislf",
"encryption-key": "keyid=jialsjdifjlasd",
},
body="asdfasdf",
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 400
assert cm.value.errno == 110
def test_missing_crypto_key_dh(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
uaid=dummy_uaid,
router_data=dict(creds=dict(senderID="bogus")),
)
info = self._make_test_data(
headers={
"content-encoding": "aesgcm",
"encryption": "salt=asdfjasidlfjaislf",
"crypto-key": "p256ecdsa=BA1Hxzyi1RUM1b5wjxsn7nGxAs",
},
body="asdfasdf",
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 400
assert cm.value.errno == 110
def test_invalid_data_size(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
uaid=dummy_uaid,
router_data=dict(creds=dict(senderID="bogus")),
)
schema.context["conf"].max_data = 1
with pytest.raises(InvalidRequest) as cm:
schema.load(self._make_test_data(
headers={
"content-encoding": "aesgcm",
"crypto-key": "dh=asdfjialsjdfiasjld",
},
body="asdfasdfasdfasdfasd"))
assert cm.value.errno == 104
def test_invalid_data_must_have_crypto_headers(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
router_data=dict(creds=dict(senderID="bogus")),
)
with pytest.raises(InvalidRequest) as cm:
schema.load(self._make_test_data(body="asdfasdfasdfasdfasd"))
assert cm.value.errno == 110
def test_valid_data_crypto_padding_stripped(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
router_data=dict(creds=dict(senderID="bogus")),
)
padded_value = "asdfjiasljdf==="
info = self._make_test_data(
body="asdfasdfasdfasdf",
headers={
"content-encoding": "aesgcm128",
"encryption": "salt=" + padded_value,
"encryption-key": "dh=asdfasdfasdf",
}
)
result, errors = schema.load(info)
assert errors == {}
assert result["headers"]["encryption"] == "salt=asdfjiasljdf"
def test_invalid_dh_value_for_01_crypto(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
router_data=dict(creds=dict(senderID="bogus")),
)
padded_value = "asdfjiasljdf==="
info = self._make_test_data(
body="asdfasdfasdfasdf",
headers={
"authorization": "not vapid",
"content-encoding": "aesgcm128",
"encryption": "salt=" + padded_value,
"crypto-key": "dh=asdfasdfasdf"
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 400
assert str(cm.value) == (
"dh value in Crypto-Key header not valid "
"for 01 or earlier webpush-encryption")
def test_invalid_vapid_crypto_header(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
uaid=dummy_uaid,
router_data=dict(creds=dict(senderID="bogus")),
)
info = self._make_test_data(
body="asdfasdfasdfasdf",
headers={
"content-encoding": "aesgcm",
"encryption": "salt=ignored",
"authorization": "invalid",
"crypto-key": "dh=crap",
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 401
def test_invalid_topic(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="gcm",
uaid=dummy_uaid,
router_data=dict(creds=dict(senderID="bogus")),
)
info = self._make_test_data(
headers={
"topic": "asdfasdfasdfasdfasdfasdfasdfasdfasdfasdf",
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 400
assert cm.value.errno == 113
assert str(cm.value) == "Topic must be no greater than 32 characters"
info = self._make_test_data(
headers={
"topic": "asdf??asdf::;f",
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 400
assert cm.value.errno == 113
assert str(cm.value) == ("Topic must be URL and Filename "
"safe Base64 alphabet")
def test_no_current_month(self):
schema = self._make_fut()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="webpush",
uaid=dummy_uaid,
)
info = self._make_test_data()
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 410
assert cm.value.errno == 106
assert str(cm.value) == "No such subscription"
def test_old_current_month(self):
schema = self._make_fut()
schema.context["db"].message_tables = dict()
schema.context["conf"].parse_endpoint.return_value = dict(
uaid=dummy_uaid,
chid=dummy_chid,
public_key="",
)
schema.context["db"].router.get_uaid.return_value = dict(
router_type="webpush",
uaid=dummy_uaid,
current_month="message_2014_01",
)
info = self._make_test_data()
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 410
assert cm.value.errno == 106
assert str(cm.value) == "No such subscription"
class TestWebPushRequestSchemaUsingVapid(unittest.TestCase):
def _make_fut(self):
from autopush.config import AutopushConfig
from autopush.web.webpush import WebPushRequestSchema
conf = AutopushConfig(
hostname="localhost",
endpoint_scheme="http",
statsd_host=None,
)
db = test_db()
schema = WebPushRequestSchema()
schema.context.update(
conf=conf,
metrics=SinkMetrics(),
db=db,
routers=Mock(),
log=Mock()
)
db.router.get_uaid.return_value = dict(
router_type="gcm",
uaid=dummy_uaid,
router_data=dict(creds=dict(senderID="bogus")),
)
conf.fernet = self.fernet_mock = Mock()
return schema
def _make_test_data(self, headers=None, body="", path_args=None,
path_kwargs=None, arguments=None):
return dict(
headers=headers or {},
body=body,
path_args=path_args or [],
path_kwargs=path_kwargs or {"api_ver": "v2", "token": "xxx"},
arguments=arguments or {},
)
def _gen_jwt(self, header, payload):
sk256p = ecdsa.SigningKey.generate(curve=ecdsa.NIST256p)
vk = sk256p.get_verifying_key()
sig = jws.sign(payload, sk256p, algorithm="ES256").strip('=')
crypto_key = utils.base64url_encode(vk.to_string()).strip('=')
return sig, crypto_key
def test_valid_vapid_crypto_header(self):
schema = self._make_fut()
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "http://localhost",
"exp": int(time.time()) + 86400,
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
auth = "Bearer %s" % token
ckey = 'keyid="a1"; dh="foo";p256ecdsa="%s"' % crypto_key
self.fernet_mock.decrypt.return_value = ('a'*32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"authorization": auth,
"crypto-key": ckey
}
)
result, errors = schema.load(info)
assert errors == {}
assert "jwt" in result
def test_valid_vapid_crypto_header_webpush(self, use_crypto=False):
schema = self._make_fut()
schema.context["conf"].use_cryptography = use_crypto
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "http://localhost",
"exp": int(time.time()) + 86400,
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
auth = "WebPush %s" % token
self.fernet_mock.decrypt.return_value = ('a'*32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
ckey = 'keyid="a1"; dh="foo";p256ecdsa="%s"' % crypto_key
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"authorization": auth,
"crypto-key": ckey
}
)
result, errors = schema.load(info)
assert errors == {}
assert "jwt" in result
def test_valid_vapid_crypto_header_webpush_crypto(self):
self.test_valid_vapid_crypto_header_webpush(use_crypto=True)
def test_valid_vapid_02_crypto_header_webpush(self):
schema = self._make_fut()
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "http://localhost",
"exp": int(time.time()) + 86400,
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
auth = "vapid t={token},k={key}".format(token=token,
key=crypto_key)
self.fernet_mock.decrypt.return_value = ('a' * 32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"authorization": auth,
}
)
result, errors = schema.load(info)
assert errors == {}
assert "jwt" in result
assert payload == result['jwt']['jwt_data']
def test_valid_vapid_02_crypto_header_webpush_alt(self):
schema = self._make_fut()
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "http://localhost",
"exp": int(time.time()) + 86400,
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
# Switch the params and add an extra, ignored parameter
auth = "vapid k={key}, t={token}, foo=bar".format(
token=token,
key=crypto_key)
self.fernet_mock.decrypt.return_value = ('a' * 32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"authorization": auth,
}
)
result, errors = schema.load(info)
assert errors == {}
assert "jwt" in result
assert payload == result['jwt']['jwt_data']
def test_bad_vapid_02_crypto_header(self):
schema = self._make_fut()
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "https://pusher_origin.example.com",
"exp": int(time.time()) + 86400,
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
# Missing one of the two required parameters, t & k
auth = "vapid t={token},n={key}".format(token=token,
key=crypto_key)
self.fernet_mock.decrypt.return_value = ('a' * 32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"authorization": auth,
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 401
assert cm.value.errno == 109
def test_invalid_vapid_draft2_crypto_header(self):
schema = self._make_fut()
schema.context["conf"].use_cryptography = True
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "https://pusher_origin.example.com",
"exp": int(time.time()) + 86400,
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
self.fernet_mock.decrypt.return_value = ('a'*32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
# Corrupt the token so it fails. (Mock doesn't always catch)
auth = "vapid t={token},k={key}".format(token=token+"foo",
key=crypto_key)
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"authorization": auth,
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 401
assert cm.value.errno == 109
@patch("autopush.web.webpush.extract_jwt")
def test_invalid_vapid_crypto_header(self, mock_jwt):
schema = self._make_fut()
schema.context["conf"].use_cryptography = True
mock_jwt.side_effect = ValueError("Unknown public key "
"format specified")
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "https://pusher_origin.example.com",
"exp": int(time.time()) + 86400,
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
self.fernet_mock.decrypt.return_value = ('a'*32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
auth = "WebPush %s" % token
ckey = 'keyid="a1"; dh="foo";p256ecdsa="%s"' % crypto_key
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"authorization": auth,
"crypto-key": ckey
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 401
assert cm.value.errno == 109
def test_invalid_too_far_exp_vapid_crypto_header(self):
schema = self._make_fut()
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "https://pusher_origin.example.com",
"exp": int(time.time()) + 86400 + 86400,
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
auth = "WebPush %s" % token
self.fernet_mock.decrypt.return_value = ('a'*32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
ckey = 'keyid="a1"; dh="foo";p256ecdsa="%s"' % crypto_key
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"authorization": auth,
"crypto-key": ckey
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 401
assert cm.value.errno == 109
def test_invalid_bad_exp_vapid_crypto_header(self):
schema = self._make_fut()
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "https://pusher_origin.example.com",
"exp": "bleh",
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
auth = "WebPush %s" % token
self.fernet_mock.decrypt.return_value = ('a'*32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
ckey = 'keyid="a1"; dh="foo";p256ecdsa="%s"' % crypto_key
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"authorization": auth,
"crypto-key": ckey
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 401
assert cm.value.errno == 109
@patch("autopush.web.webpush.extract_jwt")
def test_invalid_encryption_header(self, mock_jwt):
schema = self._make_fut()
mock_jwt.side_effect = ValueError("Unknown public key "
"format specified")
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "https://pusher_origin.example.com",
"exp": int(time.time()) + 86400,
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
self.fernet_mock.decrypt.return_value = ('a'*32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
auth = "Bearer %s" % token
ckey = 'keyid="a1"; dh="foo";p256ecdsa="%s"' % crypto_key
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"authorization": auth,
"crypto-key": ckey
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 401
assert cm.value.errno == 109
@patch("autopush.web.webpush.extract_jwt")
def test_invalid_encryption_jwt(self, mock_jwt):
schema = self._make_fut()
schema.context['conf'].use_cryptography = True
# use a deeply superclassed error to make sure that it gets picked up.
mock_jwt.side_effect = InvalidSignature("invalid signature")
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "https://push.example.com",
"exp": int(time.time()) + 86400,
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
self.fernet_mock.decrypt.return_value = ('a'*32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
auth = "Bearer %s" % token
ckey = 'keyid="a1"; dh="foo";p256ecdsa="%s"' % crypto_key
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"authorization": auth,
"crypto-key": ckey
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 401
assert cm.value.errno == 109
@patch("autopush.web.webpush.extract_jwt")
def test_invalid_crypto_key_header_content(self, mock_jwt):
schema = self._make_fut()
mock_jwt.side_effect = ValueError("Unknown public key "
"format specified")
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "https://pusher_origin.example.com",
"exp": int(time.time()) + 86400,
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
self.fernet_mock.decrypt.return_value = ('a'*32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
auth = "Bearer %s" % token
ckey = 'keyid="a1";invalid="foo";p256ecdsa="%s"' % crypto_key
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aes128",
"encryption": "salt=stuff",
"authorization": auth,
"crypto-key": ckey
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 400
assert cm.value.errno == 110
def test_expired_vapid_header(self):
schema = self._make_fut()
schema.context["conf"].use_cryptography = True
header = {"typ": "JWT", "alg": "ES256"}
payload = {"aud": "https://pusher_origin.example.com",
"exp": 20,
"sub": "mailto:admin@example.com"}
token, crypto_key = self._gen_jwt(header, payload)
auth = "WebPush %s" % token
ckey = 'keyid="a1"; dh="foo";p256ecdsa="%s"' % crypto_key
self.fernet_mock.decrypt.return_value = ('a'*32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"authorization": auth,
"crypto-key": ckey
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 401
assert cm.value.errno == 109
def test_missing_vapid_header(self):
schema = self._make_fut()
header = {"typ": "JWT", "alg": "ES256"}
payload = {
"aud": "https://pusher_origin.example.com",
"exp": 20,
"sub": "mailto:admin@example.com"
}
token, crypto_key = self._gen_jwt(header, payload)
self.fernet_mock.decrypt.return_value = ('a'*32) + \
sha256(utils.base64url_decode(crypto_key)).digest()
ckey = 'keyid="a1"; dh="foo";p256ecdsa="%s"' % crypto_key
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"crypto-key": ckey
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 401
assert cm.value.errno == 109
def test_bogus_vapid_header(self):
schema = self._make_fut()
schema.context["conf"].use_cryptography = True
header = {"typ": "JWT", "alg": "ES256"}
payload = {
"aud": "https://pusher_origin.example.com",
"exp": 20,
"sub": "mailto:admin@example.com"
}
token, crypto_key = self._gen_jwt(header, payload)
self.fernet_mock.decrypt.return_value = (
'a' * 32) + sha256(utils.base64url_decode(crypto_key)).digest()
ckey = 'keyid="a1"; dh="foo";p256ecdsa="%s"' % crypto_key
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aesgcm",
"encryption": "salt=stuff",
"crypto-key": ckey,
"authorization": "bogus crap"
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 401
assert cm.value.errno == 109
def test_null_vapid_header(self):
schema = self._make_fut()
schema.context["conf"].use_cryptography = True
def b64s(content):
return base64.urlsafe_b64encode(content).strip(b'=')
payload = b'.'.join([b64s("null"), b64s("null")])
# force sign the header, since jws will "fix" the invalid one.
sk256p = ecdsa.SigningKey.generate(curve=ecdsa.NIST256p)
vk = sk256p.get_verifying_key()
key = jwk.construct(sk256p, "ES256")
signature = b64s(key.sign(payload))
token = b'.'.join([payload, signature])
crypto_key = b64s(vk.to_string())
self.fernet_mock.decrypt.return_value = (
'a' * 32) + sha256(utils.base64url_decode(crypto_key)).digest()
info = self._make_test_data(
body="asdfasdfasdfasdf",
path_kwargs=dict(
api_ver="v2",
token="asdfasdf",
),
headers={
"content-encoding": "aes128gcm",
"authorization": "vapid k={},t={}".format(crypto_key, token)
}
)
with pytest.raises(InvalidRequest) as cm:
schema.load(info)
assert cm.value.status_code == 401
assert cm.value.errno == 109
| mpl-2.0 |
mozilla-services/autopush | autopush/tests/test_ssl.py | 1 | 1186 | import socket
import ssl
from twisted.trial import unittest
from autopush.ssl import (
monkey_patch_ssl_wrap_socket,
ssl_wrap_socket_cached,
undo_monkey_patch_ssl_wrap_socket
)
class SSLContextCacheTestCase(unittest.TestCase):
def setUp(self):
# XXX: test_main doesn't cleanup after itself
undo_monkey_patch_ssl_wrap_socket()
def test_monkey_patch_ssl_wrap_socket(self):
assert ssl.wrap_socket is not ssl_wrap_socket_cached
orig = ssl.wrap_socket
monkey_patch_ssl_wrap_socket()
self.addCleanup(undo_monkey_patch_ssl_wrap_socket)
assert ssl.wrap_socket is ssl_wrap_socket_cached
undo_monkey_patch_ssl_wrap_socket()
assert ssl.wrap_socket is orig
def test_ssl_wrap_socket_cached(self):
monkey_patch_ssl_wrap_socket()
self.addCleanup(undo_monkey_patch_ssl_wrap_socket)
s1 = socket.create_connection(('search.yahoo.com', 443))
s2 = socket.create_connection(('google.com', 443))
ssl1 = ssl.wrap_socket(s1, do_handshake_on_connect=False)
ssl2 = ssl.wrap_socket(s2, do_handshake_on_connect=False)
assert ssl1.context is ssl2.context
| mpl-2.0 |
mozilla-services/autopush | autopush/tests/test_protocol.py | 1 | 1356 | from mock import Mock
from nose.tools import eq_
from twisted.trial import unittest
from twisted.web.client import Response
from autopush.protocol import IgnoreBody
class ProtocolTestCase(unittest.TestCase):
def test_ignore(self):
mock_reason = Mock()
mock_reason.check.return_value = True
def deliverBody(proto):
proto.dataReceived("some data to ignore")
proto.connectionLost(mock_reason)
mock_response = Mock(spec=Response)
mock_response.deliverBody.side_effect = deliverBody
d = IgnoreBody.ignore(mock_response)
def verifyResponse(result):
eq_(result, mock_response)
eq_(len(mock_reason.mock_calls), 1)
d.addCallback(verifyResponse)
return d
def test_ignore_check_false(self):
mock_reason = Mock()
mock_reason.check.return_value = False
def deliverBody(proto):
proto.dataReceived("some data to ignore")
proto.connectionLost(mock_reason)
mock_response = Mock(spec=Response)
mock_response.deliverBody.side_effect = deliverBody
d = IgnoreBody.ignore(mock_response)
def verifyResponse(result):
eq_(result.value, mock_reason)
eq_(len(mock_reason.mock_calls), 1)
d.addErrback(verifyResponse)
return d
| mpl-2.0 |
mozilla-services/autopush | autopush/base.py | 1 | 3544 | import sys
import uuid
from typing import TYPE_CHECKING
import cyclone.web
from twisted.logger import Logger
from twisted.python import failure
if TYPE_CHECKING: # pragma: nocover
from autopush.config import AutopushConfig # noqa
from autopush.db import DatabaseManager # noqa
from autopush.metrics import IMetrics # noqa
class BaseHandler(cyclone.web.RequestHandler):
"""Base cyclone RequestHandler for autopush"""
log = Logger()
def initialize(self):
"""Initialize info from the client"""
self._client_info = self._init_info()
@property
def conf(self):
# type: () -> AutopushConfig
return self.application.conf
@property
def db(self):
# type: () -> DatabaseManager
return self.application.db
@property
def metrics(self):
# type: () -> IMetrics
return self.db.metrics
def _init_info(self):
return dict(
ami_id=self.conf.ami_id,
request_id=str(uuid.uuid4()),
user_agent=self.request.headers.get('user-agent', ""),
remote_ip=self.request.headers.get('x-forwarded-for',
self.request.remote_ip),
authorization=self.request.headers.get('authorization', ""),
message_ttl=self.request.headers.get('ttl', None),
uri=self.request.uri,
python_version=sys.version,
)
def write_error(self, code, **kwargs):
"""Write the error (otherwise unhandled exception when dealing with
unknown method specifications.)
This is a Cyclone API Override method used by endpoint and
websocket.
"""
try:
self.set_status(code)
if 'exc_info' in kwargs:
self.log.failure(
format=kwargs.get('format', "Exception"),
failure=failure.Failure(*kwargs['exc_info']),
client_info=self._client_info)
else:
self.log.error("Error in handler: %s" % code,
client_info=self._client_info)
self.finish()
except Exception as ex:
self.log.failure(
"error in write_error: {}:{} while printing {};{}".format(
code, ex, kwargs, self._client_info))
def authenticate_peer_cert(self):
"""Authenticate the client per the configured client_certs.
Aborts the request w/ a 401 on failure.
"""
cert = self.request.connection.transport.getPeerCertificate()
if cert:
cert_signature = cert.digest('sha256')
cn = cert.get_subject().CN
auth = self.conf.client_certs.get(cert_signature)
if auth is not None:
# TLS authenticated
self._client_info.update(tls_auth=auth,
tls_auth_sha256=cert_signature,
tls_auth_cn=cn)
return
self._client_info.update(tls_failed_sha256=cert_signature,
tls_failed_cn=cn)
self.log.warn("Failed TLS auth", client_info=self._client_info)
self.set_status(401)
# "Transport mode" isn't standard, inspired by:
# http://www6.ietf.org/mail-archive/web/tls/current/msg05589.html
self.set_header('WWW-Authenticate',
'Transport mode="tls-client-certificate"')
self.finish()
| mpl-2.0 |
mozilla-services/autopush | autopush/tests/test_db.py | 1 | 27490 | import os
import unittest
import uuid
from datetime import datetime, timedelta
from autopush.websocket import ms_time
from botocore.exceptions import ClientError
from mock import Mock, patch
import pytest
from autopush.config import DDBTableConfig
from autopush.db import (
get_rotating_message_tablename,
create_router_table,
preflight_check,
table_exists,
Message,
Router,
generate_last_connect,
make_rotating_tablename,
create_rotating_message_table,
_drop_table,
_make_table,
DatabaseManager,
DynamoDBResource
)
from autopush.exceptions import AutopushException, ItemNotFound
from autopush.metrics import SinkMetrics
from autopush.utils import WebPushNotification
# nose fails to import sessions correctly.
import autopush.tests
dummy_uaid = str(uuid.UUID("abad1dea00000000aabbccdd00000000"))
dummy_chid = str(uuid.UUID("deadbeef00000000decafbad00000000"))
test_router = None
def setup_module():
global test_router
config = DDBTableConfig("router_test")
test_router = Router(config, SinkMetrics(),
resource=autopush.tests.boto_resource)
def make_webpush_notification(uaid, chid, ttl=100):
message_id = str(uuid.uuid4())
return WebPushNotification(
uaid=uuid.UUID(uaid),
channel_id=uuid.UUID(chid),
update_id=message_id,
message_id=message_id,
ttl=ttl,
)
class DbUtilsTest(unittest.TestCase):
def test_make_table(self):
fake_resource = Mock()
fake_func = Mock()
fake_table = "DoesNotExist_{}".format(uuid.uuid4())
_make_table(fake_func, fake_table, 5, 10, boto_resource=fake_resource)
assert fake_func.call_args[0] == (fake_table, 5, 10, fake_resource)
def test_make_table_no_resource(self):
fake_func = Mock()
fake_table = "DoesNotExist_{}".format(uuid.uuid4())
with pytest.raises(AutopushException) as ex:
_make_table(fake_func, fake_table, 5, 10,
boto_resource=None)
assert str(ex.value) == "No boto3 resource provided for _make_table"
class DatabaseManagerTest(unittest.TestCase):
def fake_conf(self, table_name=""):
fake_conf = Mock()
fake_conf.statsd_host = "localhost"
fake_conf.statsd_port = 8125
fake_conf.allow_table_rotation = False
fake_conf.message_table = Mock()
fake_conf.message_table.tablename = table_name
fake_conf.message_table.read_throughput = 5
fake_conf.message_table.write_throughput = 5
return fake_conf
def test_init_with_resources(self):
from autopush.db import DynamoDBResource
dm = DatabaseManager(router_conf=Mock(),
message_conf=Mock(),
metrics=Mock(),
resource=None)
assert dm.resource is not None
assert isinstance(dm.resource, DynamoDBResource)
def test_init_with_no_rotate(self):
fake_conf = self.fake_conf("message_int_test")
dm = DatabaseManager.from_config(
fake_conf,
resource=autopush.tests.boto_resource)
dm.create_initial_message_tables()
assert dm.current_msg_month == \
autopush.tests.boto_resource.get_latest_message_tablename(
prefix=fake_conf.message_table.tablename
)
def test_init_with_no_rotate_create_table(self):
fake_conf = self.fake_conf("message_bogus")
dm = DatabaseManager.from_config(
fake_conf,
resource=autopush.tests.boto_resource)
try:
dm.create_initial_message_tables()
latest = autopush.tests.boto_resource.get_latest_message_tablename(
prefix=fake_conf.message_table.tablename
)
assert dm.current_msg_month == latest
assert dm.message_tables == [fake_conf.message_table.tablename]
finally:
# clean up the bogus table.
dm.resource._resource.meta.client.delete_table(
TableName=fake_conf.message_table.tablename)
class DdbResourceTest(unittest.TestCase):
@patch("boto3.resource")
def test_ddb_no_endpoint(self, mresource):
safe = os.getenv("AWS_LOCAL_DYNAMODB")
try:
os.unsetenv("AWS_LOCAL_DYANMODB")
del(os.environ["AWS_LOCAL_DYNAMODB"])
DynamoDBResource(region_name="us-east-1")
assert mresource.call_args[0] == ('dynamodb',)
resource = DynamoDBResource(endpoint_url="")
assert resource.conf == {}
finally:
if safe: # pragma: nocover
os.environ["AWS_LOCAL_DYNAMODB"] = safe
def test_ddb_env(self):
ddb_session_args = dict(
endpoint_url=os.getenv("AWS_LOCAL_DYNAMODB"),
aws_access_key_id="BogusKey",
aws_secret_access_key="BogusKey",
)
safe = os.getenv("AWS_DEFAULT_REGION")
try:
os.environ["AWS_DEFAULT_REGION"] = "us-west-2"
boto_resource = DynamoDBResource(**ddb_session_args)
assert boto_resource._resource.meta.client.meta.region_name == \
'us-west-2'
finally:
if safe: # pragma: nocover
os.environ["AWS_DEFAULT_REGION"] = safe
class DbCheckTestCase(unittest.TestCase):
def setUp(cls):
cls.resource = autopush.tests.boto_resource
cls.table_conf = DDBTableConfig("router_test")
cls.router = Router(cls.table_conf, SinkMetrics(),
resource=cls.resource)
def test_preflight_check_fail(self):
router = Router(self.table_conf, SinkMetrics(),
resource=self.resource)
message = Message(get_rotating_message_tablename(
boto_resource=self.resource),
boto_resource=self.resource)
def raise_exc(*args, **kwargs): # pragma: no cover
raise Exception("Oops")
router.clear_node = Mock()
router.clear_node.side_effect = raise_exc
with pytest.raises(Exception):
preflight_check(message, router, self.resource)
def test_preflight_check(self):
global test_router
message = Message(get_rotating_message_tablename(
boto_resource=self.resource),
boto_resource=self.resource)
pf_uaid = "deadbeef00000000deadbeef01010101"
preflight_check(message, test_router, pf_uaid)
# now check that the database reports no entries.
_, notifs = message.fetch_messages(uuid.UUID(pf_uaid))
assert len(notifs) == 0
with pytest.raises(ItemNotFound):
self.router.get_uaid(pf_uaid)
def test_preflight_check_wait(self):
global test_router
message = Message(
get_rotating_message_tablename(boto_resource=self.resource),
boto_resource=self.resource
)
values = ["PENDING", "ACTIVE"]
message.table_status = Mock(side_effect=values)
pf_uaid = "deadbeef00000000deadbeef01010101"
preflight_check(message, test_router, pf_uaid)
# now check that the database reports no entries.
_, notifs = message.fetch_messages(uuid.UUID(pf_uaid))
assert len(notifs) == 0
with pytest.raises(ItemNotFound):
self.router.get_uaid(pf_uaid)
def test_get_month(self):
from autopush.db import get_month
month0 = get_month(0)
month1 = get_month(1)
this_month = month0.month
next_month = 1 if this_month == 12 else this_month + 1
assert next_month == month1.month
def test_zero_fill_month(self):
from autopush.db import make_rotating_tablename
assert 'test_2016_03' == make_rotating_tablename(
'test', date=datetime(2016, 3, 15).date())
def test_hasher(self):
import autopush.db as db
db.key_hash = "SuperSikkret"
v = db.hasher("01234567123401234123456789ABCDEF")
assert v == ('0530bb351921e7b4be66831e4c126c6'
'd8f614d06cdd592cb8470f31177c8331a')
db.key_hash = ""
def test_normalize_id(self):
# Note, yes, we forbid dashes in UAIDs, and we add them here.
import autopush.db as db
abnormal = "deadbeef00000000decafbad00000000"
normal = "deadbeef-0000-0000-deca-fbad00000000"
assert db.normalize_id(abnormal) == normal
with pytest.raises(ValueError):
db.normalize_id("invalid")
assert db.normalize_id(abnormal.upper()) == normal
class MessageTestCase(unittest.TestCase):
def setUp(self):
self.resource = autopush.tests.boto_resource
table = get_rotating_message_tablename(
prefix="message_int_test",
boto_resource=self.resource)
self.real_table = table
self.uaid = uuid.uuid4().hex
def test_non_rotating_tables(self):
message_tablename = "message_int_test"
table_name = self.resource.get_latest_message_tablename(
prefix=message_tablename)
message = Message(table_name,
boto_resource=self.resource)
assert message.tablename == table_name
def test_register(self):
chid = str(uuid.uuid4())
m = get_rotating_message_tablename(boto_resource=self.resource)
message = Message(m, boto_resource=self.resource)
message.register_channel(self.uaid, chid)
lm = self.resource.Table(m)
# Verify it's in the db
response = lm.query(
KeyConditions={
'uaid': {
'AttributeValueList': [self.uaid],
'ComparisonOperator': 'EQ'
},
'chidmessageid': {
'AttributeValueList': ['02'],
'ComparisonOperator': 'LT'
}
},
ConsistentRead=True,
)
assert len(response.get('Items'))
def test_unregister(self):
chid = str(uuid.uuid4())
m = get_rotating_message_tablename(boto_resource=self.resource)
message = Message(m, boto_resource=self.resource)
message.register_channel(self.uaid, chid)
# Verify its in the db
lm = self.resource.Table(m)
# Verify it's in the db
response = lm.query(
KeyConditions={
'uaid': {
'AttributeValueList': [self.uaid],
'ComparisonOperator': 'EQ'
},
'chidmessageid': {
'AttributeValueList': [" "],
'ComparisonOperator': 'EQ'
},
},
ConsistentRead=True,
)
results = list(response.get('Items'))
assert len(results) == 1
assert results[0]["chids"] == {chid}
message.unregister_channel(self.uaid, chid)
# Verify its not in the db
response = lm.query(
KeyConditions={
'uaid': {
'AttributeValueList': [self.uaid],
'ComparisonOperator': 'EQ'
},
'chidmessageid': {
'AttributeValueList': [" "],
'ComparisonOperator': 'EQ'
},
},
ConsistentRead=True,
)
results = list(response.get('Items'))
assert len(results) == 1
assert results[0].get("chids") is None
# Test for the very unlikely case that there's no 'chid'
mtable = Mock()
mtable.update_item = Mock(return_value={
'Attributes': {'uaid': self.uaid},
'ResponseMetaData': {}
})
message.table = mtable
r = message.unregister_channel(self.uaid, dummy_chid)
assert r is False
def test_all_channels(self):
chid = str(uuid.uuid4())
chid2 = str(uuid.uuid4())
m = get_rotating_message_tablename(boto_resource=self.resource)
message = Message(m, boto_resource=self.resource)
message.register_channel(self.uaid, chid)
message.register_channel(self.uaid, chid2)
_, chans = message.all_channels(self.uaid)
assert chid in chans
assert chid2 in chans
message.unregister_channel(self.uaid, chid2)
_, chans = message.all_channels(self.uaid)
assert chid2 not in chans
assert chid in chans
def test_all_channels_fail(self):
m = get_rotating_message_tablename(boto_resource=self.resource)
message = Message(m, boto_resource=self.resource)
mtable = Mock()
mtable.get_item.return_value = {
"ResponseMetadata": {
"HTTPStatusCode": 400
},
}
message.table = mtable
res = message.all_channels(self.uaid)
assert res == (False, set([]))
def test_save_channels(self):
chid = str(uuid.uuid4())
chid2 = str(uuid.uuid4())
m = get_rotating_message_tablename(boto_resource=self.resource)
message = Message(m, boto_resource=self.resource)
message.register_channel(self.uaid, chid)
message.register_channel(self.uaid, chid2)
exists, chans = message.all_channels(self.uaid)
new_uaid = uuid.uuid4().hex
message.save_channels(new_uaid, chans)
_, new_chans = message.all_channels(new_uaid)
assert chans == new_chans
def test_all_channels_no_uaid(self):
m = get_rotating_message_tablename(boto_resource=self.resource)
message = Message(m, boto_resource=self.resource)
exists, chans = message.all_channels(dummy_uaid)
assert chans == set([])
def test_message_storage(self):
chid = str(uuid.uuid4())
chid2 = str(uuid.uuid4())
m = get_rotating_message_tablename(boto_resource=self.resource)
message = Message(m, boto_resource=self.resource)
message.register_channel(self.uaid, chid)
message.register_channel(self.uaid, chid2)
# Ensure that sort keys are fetched from DB in order.
notifs = [make_webpush_notification(self.uaid, chid) for x in range(3)]
keys = [notif.sort_key for notif in notifs]
for msg in notifs:
message.store_message(msg)
_, all_messages = message.fetch_timestamp_messages(
uuid.UUID(self.uaid), " ")
assert len(all_messages) == len(notifs)
assert keys == [msg.sort_key for msg in all_messages]
def test_message_storage_overwrite(self):
"""Test that store_message can overwrite existing messages which
can occur in some reconnect cases but shouldn't error"""
chid = str(uuid.uuid4())
chid2 = str(uuid.uuid4())
notif1 = make_webpush_notification(self.uaid, chid)
notif2 = make_webpush_notification(self.uaid, chid)
notif3 = make_webpush_notification(self.uaid, chid2)
notif2.message_id = notif1.message_id
m = get_rotating_message_tablename(boto_resource=self.resource)
message = Message(m, boto_resource=self.resource)
message.register_channel(self.uaid, chid)
message.register_channel(self.uaid, chid2)
message.store_message(notif1)
message.store_message(notif2)
message.store_message(notif3)
all_messages = list(message.fetch_messages(
uuid.UUID(self.uaid)))
assert len(all_messages) == 2
def test_message_delete_fail_condition(self):
notif = make_webpush_notification(dummy_uaid, dummy_chid)
notif.message_id = notif.update_id = dummy_uaid
m = get_rotating_message_tablename(boto_resource=self.resource)
message = Message(m, boto_resource=self.resource)
def raise_condition(*args, **kwargs):
raise ClientError({}, 'delete_item')
m_de = Mock()
m_de.delete_item = Mock(side_effect=raise_condition)
message.table = m_de
result = message.delete_message(notif)
assert result is False
def test_message_rotate_table_with_date(self):
prefix = "message" + uuid.uuid4().hex
future = (datetime.today() + timedelta(days=32)).date()
tbl_name = make_rotating_tablename(prefix, date=future)
m = get_rotating_message_tablename(prefix=prefix, date=future,
boto_resource=self.resource)
assert m == tbl_name
# Clean up the temp table.
_drop_table(tbl_name, boto_resource=self.resource)
class RouterTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.resource = autopush.tests.boto_resource
cls.table_conf = DDBTableConfig("router_test")
cls.router = test_router
def _create_minimal_record(self):
data = {
"uaid": str(uuid.uuid4()),
"router_type": "webpush",
"last_connect": generate_last_connect(),
"connected_at": ms_time(),
"current_month": datetime.today().month,
}
return data
def test_old_mobile_user(self):
# Old mobile users (ones that use a bridge) don't regularly check
# in, or update their expiry record. It's important that we don't
# drop them because reconnecting requires a re-installation.
old_mobile = self._create_minimal_record()
old_mobile["expiry"] = None
m_user = old_mobile['uaid']
self.router.register_user(old_mobile)
# verify that fetching a user without a expiry still works.
# old mobile users don't have, and may never get, and expiry
user = self.router.get_uaid(m_user)
assert user["uaid"] == m_user
def test_custom_tablename(self):
db_name = "router_%s" % uuid.uuid4()
assert not table_exists(db_name, boto_resource=self.resource)
create_router_table(db_name, boto_resource=self.resource)
assert table_exists(db_name, boto_resource=self.resource)
# Clean up the temp table.
_drop_table(db_name, boto_resource=self.resource)
def test_create_rotating_cache(self):
mock_table = Mock()
mock_table.table_status = 'ACTIVE'
mock_resource = Mock()
mock_resource.Table = Mock(return_value=mock_table)
table = create_rotating_message_table(boto_resource=mock_resource)
assert table == mock_table
def test_provisioning(self):
db_name = "router_%s" % uuid.uuid4()
r = create_router_table(db_name, 3, 17,
boto_resource=self.resource)
assert r.provisioned_throughput.get('ReadCapacityUnits') == 3
assert r.provisioned_throughput.get('WriteCapacityUnits') == 17
def test_no_uaid_found(self):
uaid = str(uuid.uuid4())
with pytest.raises(ItemNotFound):
self.router.get_uaid(uaid)
def test_uaid_provision_failed(self):
router = Router(self.table_conf, SinkMetrics(),
resource=self.resource)
router.table = Mock()
def raise_condition(*args, **kwargs):
raise ClientError(
{'Error': {'Code': 'ProvisionedThroughputExceededException'}},
'mock_update_item'
)
mm = Mock()
mm.get_item = Mock(side_effect=raise_condition)
router.table = mm
with pytest.raises(ClientError) as ex:
router.get_uaid(uaid="asdf")
assert (ex.value.response['Error']['Code'] ==
"ProvisionedThroughputExceededException")
def test_register_user_provision_failed(self):
router = Router(self.table_conf, SinkMetrics(), resource=self.resource)
mm = Mock()
mm.client = Mock()
router.table = mm
def raise_condition(*args, **kwargs):
raise ClientError(
{'Error': {'Code': 'ProvisionedThroughputExceededException'}},
'mock_update_item'
)
mm.update_item = Mock(side_effect=raise_condition)
with pytest.raises(ClientError) as ex:
router.register_user(dict(uaid=dummy_uaid, node_id="me",
connected_at=1234,
router_type="webpush"))
assert (ex.value.response['Error']['Code'] ==
"ProvisionedThroughputExceededException")
def test_register_user_condition_failed(self):
router = Router(self.table_conf, SinkMetrics(),
resource=self.resource)
router.table.meta.client = Mock()
def raise_error(*args, **kwargs):
raise ClientError(
{'Error': {'Code': 'ConditionalCheckFailedException'}},
'mock_update_item'
)
mm = Mock()
mm.update_item = Mock(side_effect=raise_error)
router.table = mm
res = router.register_user(dict(uaid=dummy_uaid, node_id="me",
connected_at=1234,
router_type="webpush"))
assert res == (False, {})
def test_clear_node_provision_failed(self):
router = Router(self.table_conf, SinkMetrics(),
resource=self.resource)
def raise_condition(*args, **kwargs):
raise ClientError(
{'Error': {'Code': 'ProvisionedThroughputExceededException'}},
'mock_update_item'
)
mm = Mock()
mm.put_item = Mock(side_effect=raise_condition)
router.table = mm
with pytest.raises(ClientError) as ex:
router.clear_node(dict(uaid=dummy_uaid,
connected_at="1234",
node_id="asdf",
router_type="webpush"))
assert (ex.value.response['Error']['Code'] ==
"ProvisionedThroughputExceededException")
def test_clear_node_condition_failed(self):
router = Router(self.table_conf, SinkMetrics(),
resource=self.resource)
def raise_error(*args, **kwargs):
raise ClientError(
{'Error': {'Code': 'ConditionalCheckFailedException'}},
'mock_put_item'
)
mock_put = Mock()
mock_put.put_item = Mock(side_effect=raise_error)
router.table = mock_put
res = router.clear_node(dict(uaid=dummy_uaid,
connected_at="1234",
node_id="asdf",
router_type="webpush"))
assert res is False
def test_incomplete_uaid(self):
# Older records may be incomplete. We can't inject them using normal
# methods.
uaid = str(uuid.uuid4())
router = Router(self.table_conf, SinkMetrics(),
resource=self.resource)
mm = Mock()
mm.get_item = Mock()
mm.get_item.return_value = {
"ResponseMetadata": {
"HTTPStatusCode": 200
},
"Item": {
"uaid": uuid.uuid4().hex
}
}
mm.delete_item.return_value = {
"ResponseMetadata": {
"HTTPStatusCode": 200
},
}
router.table = mm
router.drop_user = Mock()
try:
router.register_user(dict(uaid=uaid))
except AutopushException:
pass
with pytest.raises(ItemNotFound):
router.get_uaid(uaid)
assert router.drop_user.called
def test_failed_uaid(self):
uaid = str(uuid.uuid4())
router = Router(self.table_conf, SinkMetrics(),
resource=self.resource)
mm = Mock()
mm.get_item = Mock()
mm.get_item.return_value = {
"ResponseMetadata": {
"HTTPStatusCode": 400
},
}
router.table = mm
router.drop_user = Mock()
with pytest.raises(ItemNotFound):
router.get_uaid(uaid)
def test_save_new(self):
router = Router(self.table_conf, SinkMetrics(),
resource=self.resource)
# Sadly, moto currently does not return an empty value like boto
# when not updating data.
mock_update = Mock()
mock_update.update_item = Mock(return_value={})
router.table = mock_update
result = router.register_user(dict(uaid=dummy_uaid,
node_id="me",
router_type="webpush",
connected_at=1234))
assert result[0] is True
def test_save_fail(self):
router = Router(self.table_conf, SinkMetrics(),
resource=self.resource)
def raise_condition(*args, **kwargs):
raise ClientError(
{'Error': {'Code': 'ConditionalCheckFailedException'}},
'mock_update_item'
)
mock_update = Mock()
mock_update.update_item = Mock(side_effect=raise_condition)
router.table = mock_update
router_data = dict(uaid=dummy_uaid, node_id="asdf", connected_at=1234,
router_type="webpush")
result = router.register_user(router_data)
assert result == (False, {})
def test_node_clear(self):
router = Router(self.table_conf, SinkMetrics(),
resource=self.resource)
# Register a node user
router.register_user(dict(uaid=dummy_uaid, node_id="asdf",
connected_at=1234,
router_type="webpush"))
# Verify
user = router.get_uaid(dummy_uaid)
assert user["node_id"] == "asdf"
assert user["connected_at"] == 1234
assert user["router_type"] == "webpush"
# Clear
router.clear_node(user)
# Verify
user = router.get_uaid(dummy_uaid)
assert user.get("node_id") is None
assert user["connected_at"] == 1234
assert user["router_type"] == "webpush"
def test_node_clear_fail(self):
router = Router(self.table_conf, SinkMetrics(),
resource=self.resource)
def raise_condition(*args, **kwargs):
raise ClientError(
{'Error': {'Code': 'ConditionalCheckFailedException'}},
'mock_update_item'
)
mock_put = Mock()
mock_put.put_item = Mock(side_effect=raise_condition)
router.table = mock_put
data = dict(uaid=dummy_uaid, node_id="asdf", connected_at=1234)
result = router.clear_node(data)
assert result is False
def test_drop_user(self):
uaid = str(uuid.uuid4())
router = Router(self.table_conf, SinkMetrics(),
resource=self.resource)
# Register a node user
router.register_user(dict(uaid=uaid, node_id="asdf",
router_type="webpush",
connected_at=1234))
result = router.drop_user(uaid)
assert result is True
# Deleting already deleted record should return false.
result = router.drop_user(uaid)
assert result is False
| mpl-2.0 |
mozilla-services/autopush | autopush/router/webpush.py | 1 | 10390 | """WebPush Style Autopush Router
This router handles notifications that should be dispatched to an Autopush
node, or stores each individual message, along with its data, in a Message
table for retrieval by the client.
"""
import json
import time
from StringIO import StringIO
from typing import Any # noqa
from botocore.exceptions import ClientError
from twisted.internet.threads import deferToThread
from twisted.web.client import FileBodyProducer
from twisted.internet.defer import (
inlineCallbacks,
returnValue,
CancelledError,
)
from twisted.internet.error import (
ConnectError,
ConnectionClosed,
ConnectionRefusedError,
)
from twisted.logger import Logger
from twisted.web._newclient import ResponseFailed
from twisted.web.http import PotentialDataLoss
from autopush.exceptions import ItemNotFound, RouterException
from autopush.metrics import make_tags
from autopush.protocol import IgnoreBody
from autopush.router.interface import RouterResponse
from autopush.types import JSONDict # noqa
TTL_URL = "https://webpush-wg.github.io/webpush-protocol/#rfc.section.6.2"
class WebPushRouter(object):
"""Implements :class: `autopush.router.interface.IRouter` for internal
routing to an autopush node
"""
log = Logger()
def __init__(self, conf, router_conf, db, agent):
"""Create a new Router"""
self.conf = conf
self.router_conf = router_conf
self.db = db
self.agent = agent
@property
def metrics(self):
return self.db.metrics
def register(self, uaid, router_data, app_id, *args, **kwargs):
# type: (str, JSONDict, str, *Any, **Any) -> None
"""No additional routing data"""
def amend_endpoint_response(self, response, router_data):
# type: (JSONDict, JSONDict) -> None
"""Stubbed out for this router"""
@inlineCallbacks
def route_notification(self, notification, uaid_data):
"""Route a notification to an internal node, and store it if the node
can't deliver immediately or is no longer a valid node
"""
# Determine if they're connected at the moment
node_id = uaid_data.get("node_id")
uaid = uaid_data["uaid"]
router = self.db.router
# Node_id is present, attempt delivery.
# - Send Notification to node
# - Success: Done, return 200
# - Error (Node busy): Jump to Save notification below
# - Error (Client gone, node gone/dead): Clear node entry for user
# - Both: Done, return 503
if node_id:
result = None
try:
result = yield self._send_notification(uaid, node_id,
notification)
except (ConnectError, ConnectionClosed, ResponseFailed,
CancelledError, PotentialDataLoss) as exc:
self.metrics.increment("updates.client.host_gone")
yield deferToThread(router.clear_node,
uaid_data).addErrback(self._eat_db_err)
if isinstance(exc, ConnectionRefusedError):
# Occurs if an IP record is now used by some other node
# in AWS or if the connection timesout.
self.log.debug("Could not route message: {exc}", exc=exc)
if result and result.code == 200:
returnValue(self.delivered_response(notification))
# Save notification, node is not present or busy
# - Save notification
# - Success (older version): Done, return 202
# - Error (db error): Done, return 503
try:
yield self._save_notification(uaid_data, notification)
except ClientError as e:
log_exception = (e.response["Error"]["Code"] !=
"ProvisionedThroughputExceededException")
raise RouterException("Error saving to database",
status_code=503,
response_body="Retry Request",
log_exception=log_exception,
errno=201)
# - Lookup client again to get latest node state after save.
# - Success (node found): Notify node of new notification
# - Success: Done, return 200
# - Error (no client): Done, return 202
# - Error (no node): Clear node entry
# - Both: Done, return 202
# - Success (no node): Done, return 202
# - Error (db error): Done, return 202
# - Error (no client) : Done, return 404
try:
uaid_data = yield deferToThread(router.get_uaid, uaid)
except ClientError:
returnValue(self.stored_response(notification))
except ItemNotFound:
self.metrics.increment("updates.client.deleted")
raise RouterException("User was deleted",
status_code=410,
response_body="Invalid UAID",
log_exception=False,
errno=105)
# Verify there's a node_id in here, if not we're done
node_id = uaid_data.get("node_id")
if not node_id:
returnValue(self.stored_response(notification))
try:
result = yield self._send_notification_check(uaid, node_id)
except (ConnectError, ConnectionClosed, ResponseFailed) as exc:
self.metrics.increment("updates.client.host_gone")
if isinstance(exc, ConnectionRefusedError):
self.log.debug("Could not route message: {exc}", exc=exc)
yield deferToThread(
router.clear_node,
uaid_data).addErrback(self._eat_db_err)
returnValue(self.stored_response(notification))
if result.code == 200:
returnValue(self.delivered_response(notification))
else:
ret_val = self.stored_response(notification)
returnValue(ret_val)
def delivered_response(self, notification):
self.metrics.increment("notification.message_data",
notification.data_length,
tags=make_tags(destination='Direct'))
location = "%s/m/%s" % (self.conf.endpoint_url, notification.location)
return RouterResponse(status_code=201, response_body="",
headers={"Location": location,
"TTL": notification.ttl or 0},
logged_status=200)
def stored_response(self, notification):
self.metrics.increment("notification.message_data",
notification.data_length,
tags=make_tags(destination='Stored'))
location = "%s/m/%s" % (self.conf.endpoint_url, notification.location)
# RFC https://tools.ietf.org/html/rfc8030#section-5
# all responses should be 201, unless this is a push reciept request,
# which requires a 202 and a URL that can be checked later for UA
# acknowledgement. (We don't support that yet. See autopush-rs#244)
return RouterResponse(status_code=201, response_body="",
headers={"Location": location,
"TTL": notification.ttl},
logged_status=201)
#############################################################
# Blocking Helper Functions
#############################################################
def _send_notification(self, uaid, node_id, notification):
"""Send a notification to a specific node_id
This version of the overriden method includes the necessary crypto
headers for the notification.
:type notification: autopush.utils.WebPushNotification
"""
payload = notification.serialize()
payload["timestamp"] = int(time.time())
url = node_id + "/push/" + uaid
request = self.agent.request(
"PUT",
url.encode("utf8"),
bodyProducer=FileBodyProducer(StringIO(json.dumps(payload))),
)
request.addCallback(IgnoreBody.ignore)
return request
def _send_notification_check(self, uaid, node_id):
"""Send a command to the node to check for notifications"""
url = node_id + "/notif/" + uaid
return self.agent.request(
"PUT",
url.encode("utf8"),
).addCallback(IgnoreBody.ignore)
def _save_notification(self, uaid_data, notification):
"""Saves a notification, returns a deferred.
This version of the overridden method saves each individual message
to the message table along with relevant request headers if
available.
:type uaid_data: dict
"""
month_table = uaid_data["current_month"]
if notification.ttl is None:
# Note that this URL is temporary, as well as this warning as
# we will 400 all missing TTL's eventually
raise RouterException(
"Missing TTL Header",
response_body="Missing TTL Header, see: %s" % TTL_URL,
status_code=400,
errno=111,
log_exception=False,
)
if notification.ttl == 0:
location = "%s/m/%s" % (self.conf.endpoint_url,
notification.version)
raise RouterException("Finished Routing", status_code=201,
log_exception=False,
headers={"TTL": str(notification.ttl),
"Location": location},
logged_status=204)
return deferToThread(
self.db.message_table(month_table).store_message,
notification=notification,
)
#############################################################
# Error Callbacks
#############################################################
def _eat_db_err(self, fail):
"""errBack for ignoring provisioned throughput errors"""
fail.trap(ClientError)
| mpl-2.0 |
mozilla-services/autopush | autopush/web/message.py | 1 | 1772 | from cryptography.fernet import InvalidToken
from marshmallow import Schema, fields, pre_load
from twisted.internet.threads import deferToThread
from twisted.internet.defer import Deferred # noqa
from autopush.exceptions import InvalidRequest, InvalidTokenException
from autopush.utils import WebPushNotification
from autopush.web.base import threaded_validate, BaseWebHandler
class MessageSchema(Schema):
notification = fields.Raw()
@pre_load
def extract_data(self, req):
message_id = req['path_kwargs'].get('message_id')
try:
notif = WebPushNotification.from_message_id(
bytes(message_id),
fernet=self.context['conf'].fernet,
)
except (InvalidToken, InvalidTokenException):
raise InvalidRequest("Invalid message ID",
status_code=400)
return dict(notification=notif)
class MessageHandler(BaseWebHandler):
cors_methods = "DELETE"
cors_response_headers = ("location",)
@threaded_validate(MessageSchema)
def delete(self, notification):
# type: (WebPushNotification) -> Deferred
"""Drops a pending message.
The message will only be removed from DynamoDB. Messages that were
successfully routed to a client as direct updates, but not delivered
yet, will not be dropped.
"""
d = deferToThread(self.db.message.delete_message, notification)
d.addCallback(self._delete_completed)
self._db_error_handling(d)
return d
def _delete_completed(self, *args, **kwargs):
self.log.debug(format="Message Deleted", status_code=204,
**self._client_info)
self.set_status(204)
self.finish()
| mpl-2.0 |
mozilla-services/autopush | autopush/gcdump.py | 1 | 3447 | #! /usr/bin/env python
"""
Prints a human-readable total out of a dumpfile produced
by gc.dump_rpy_heap(), and optionally a typeids.txt.
Syntax: dump.py <dumpfile> [<typeids.txt>]
By default, typeids.txt is loaded from the same dir as dumpfile.
"""
import array
import os
import struct
import sys
class Stat(object):
summary = {}
typeids = {0: '<GCROOT>'}
def summarize(self, filename, stream=None):
a = self.load_dump_file(filename)
self.summary = {} # {typenum: [count, totalsize]}
for obj in self.walk(a, stream=stream):
self.add_object_summary(obj[2], obj[3])
def load_typeids(self, filename_or_iter):
self.typeids = Stat.typeids.copy()
if isinstance(filename_or_iter, str):
iter = open(filename_or_iter)
else:
iter = filename_or_iter
for num, line in enumerate(iter):
if num == 0:
continue
if not line:
continue
words = line.split()
if words[0].startswith('member'):
del words[0]
if words[0] == 'GcStruct':
del words[0]
self.typeids[num] = ' '.join(words)
def get_type_name(self, num):
return self.typeids.get(num, '<typenum %d>' % num)
def print_summary(self, stream):
items = self.summary.items()
items.sort(key=lambda (typenum, stat): stat[1]) # sort by totalsize
totalsize = 0
for typenum, stat in items:
totalsize += stat[1]
stream.write('%8d %8.2fM %s\n' %
(stat[0],
stat[1] / (1024.0*1024.0),
self.get_type_name(typenum)))
stream.write('total %.1fM\n' % (totalsize / (1024.0*1024.0)))
def load_dump_file(self, filename):
f = open(filename, 'rb')
f.seek(0, 2)
end = f.tell()
f.seek(0)
a = array.array('l')
a.fromfile(f, end / struct.calcsize('l'))
f.close()
return a
def add_object_summary(self, typenum, sizeobj):
try:
stat = self.summary[typenum]
except KeyError:
stat = self.summary[typenum] = [0, 0]
stat[0] += 1
stat[1] += sizeobj
def walk(self, a, start=0, stop=None, stream=None):
assert a[-1] == -1, "invalid or truncated dump file (or 32/64-bit mix)"
assert a[-2] != -1, "invalid or truncated dump file (or 32/64-bit mix)"
if stream:
stream.write('walking...')
i = start
if stop is None:
stop = len(a)
while i < stop:
j = i + 3
while a[j] != -1:
j += 1
yield (i, a[i], a[i+1], a[i+2], a[i+3:j])
i = j + 1
if stream:
stream.write('done\n')
if __name__ == '__main__':
if len(sys.argv) <= 1:
print >> sys.stderr, __doc__
sys.exit(2)
stat = Stat()
stat.summarize(sys.argv[1], stream=sys.stderr)
#
if len(sys.argv) > 2:
typeid_name = sys.argv[2]
else:
typeid_name = os.path.join(os.path.dirname(sys.argv[1]), 'typeids.txt')
if os.path.isfile(typeid_name):
stat.load_typeids(typeid_name)
else:
import gc
import zlib
stat.load_typeids(zlib.decompress(gc.get_typeids_z()).split("\n"))
#
stat.print_summary(sys.stdout)
| mpl-2.0 |
mozilla-services/autopush | autopush/tests/test_logging.py | 1 | 7541 | import json
import os
import Queue
import sys
import StringIO
import cyclone.web
import twisted.internet
import twisted.trial.unittest
from mock import Mock, patch
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.logger import Logger
from twisted.python import failure
from autopush.logging import PushLogger, FirehoseProcessor
log = Logger()
class LocalSentryChomper(cyclone.web.RequestHandler):
def post(self):
self.logged.append(json.loads(self.request.body.decode("zlib")))
return ""
class SentryLogTestCase(twisted.trial.unittest.TestCase):
def setUp(self):
from autopush.http import skip_request_logging
twisted.internet.base.DelayedCall.debug = True
sentry = LocalSentryChomper
sentry.logged = []
site = cyclone.web.Application([
(r"/.*", sentry),
],
log_function=skip_request_logging
)
self.sentry = sentry
self._site = site
self._port = reactor.listenTCP(9999, site)
os.environ["SENTRY_DSN"] = "http://PUBKEY:SECKEY@localhost:9999/1"
def tearDown(self):
os.environ.pop("SENTRY_DSN", None)
reactor.removeAll()
def test_sentry_logging(self):
out = StringIO.StringIO()
pl = PushLogger.setup_logging("Autopush", sentry_dsn=True)
pl._output = out
_client_info = dict(key='value')
_timings = dict(key2='value', key3=True)
log.failure(format="error",
failure=failure.Failure(Exception("eek")),
client_info=_client_info,
timings=_timings)
self.flushLoggedErrors()
d = Deferred()
def check():
logged = self.sentry.logged
if not logged: # pragma: nocover
reactor.callLater(0, check)
return
assert len(logged) == 1
# Check that the sentry data has the client info as a sub dict
# Note: these are double quoted, single quote strings.
assert logged[0].get('extra').get('client_info') == {
u"'key'": u"'value'"}
# Check that the json written actually contains the client info
# collapsed up into 'Fields'.
out.seek(0)
payload = json.loads(out.readline())
assert payload['Fields']['key'] == 'value'
assert payload['Fields']['key2'] == 'value'
assert payload['Fields']['key3'] is True
self._port.stopListening()
pl.stop()
d.callback(True)
reactor.callLater(0, check)
return d
def test_include_stacktrace_when_no_tb(self):
pl = PushLogger.setup_logging("Autopush", sentry_dsn=True)
log.failure("foo", failure.Failure(ZeroDivisionError(), exc_tb=None))
self.flushLoggedErrors()
d = Deferred()
co = sys._getframe().f_code
filename = co.co_filename
testname = co.co_name
def check():
logged = self.sentry.logged
if not logged: # pragma: nocover
reactor.callLater(0, check)
return
assert len(logged) == 1
# Ensure a stacktrace was included w/ the current frame as
# the last entry
frames = logged[0]['stacktrace']['frames']
last = frames[-1]
assert last['abs_path'] == filename
assert last['function'] == testname
self._port.stopListening()
pl.stop()
d.callback(True)
reactor.callLater(0, check)
return d
class PushLoggerTestCase(twisted.trial.unittest.TestCase):
def test_custom_type(self):
obj = PushLogger.setup_logging("Autopush")
obj._output = mock_stdout = Mock()
log.info("omg!", Type=7)
assert len(mock_stdout.mock_calls) == 2
kwargs = mock_stdout.mock_calls[0][1][0]
assert "Type" in kwargs
obj.stop()
def test_human_logs(self):
obj = PushLogger.setup_logging("Autopush", log_format="text")
obj._output = mock_stdout = Mock()
log.info("omg!", Type=7)
assert len(mock_stdout.mock_calls) == 2
mock_stdout.reset_mock()
log.error("wtf!", Type=7)
assert len(mock_stdout.mock_calls) == 2
obj.stop()
def test_start_stop(self):
obj = PushLogger.setup_logging("Autopush")
obj.start()
obj.stop()
def test_file_output(self):
try:
os.unlink("testfile.txt")
except OSError: # pragma: nocover
pass
obj = PushLogger.setup_logging("Autoput", log_output="testfile.txt")
obj.start()
log.info("wow")
obj.stop()
with open("testfile.txt") as f:
lines = f.readlines()
assert len(lines) == 1
@patch("autopush.logging.boto3")
def test_firehose_only_output(self, mock_boto3):
obj = PushLogger("Autoput", log_output="none",
firehose_delivery_stream="test")
obj.firehose = Mock(spec=FirehoseProcessor)
obj.start()
log.info("wow")
obj.stop()
assert len(obj.firehose.mock_calls) == 3
assert len(obj.firehose.process.mock_calls) == 1
class FirehoseProcessorTestCase(twisted.trial.unittest.TestCase):
def setUp(self):
patcher = patch("autopush.logging.boto3")
self.patcher = patcher
self.mock_boto = patcher.start()
def tearDown(self):
self.patcher.stop()
def test_full_queue(self):
proc = FirehoseProcessor("test", 1)
proc.process("test")
assert proc._records.full() is True
proc.process("another")
assert proc._records.qsize() == 1
assert proc._records.get() == "test"
def test_message_max_size(self):
proc = FirehoseProcessor("test")
proc.MAX_REQUEST_SIZE = 1
# Setup the mock
proc._client.put_record_batch.return_value = dict(FailedPutCount=0)
# Start and log
proc.start()
proc.process("a decently larger message")
proc.stop()
assert len(self.mock_boto.mock_calls) == 2
assert len(proc._client.put_record_batch.mock_calls) == 1
def test_message_max_batch(self):
proc = FirehoseProcessor("test")
proc.MAX_RECORD_BATCH = 1
# Setup the mock
proc._client.put_record_batch.return_value = dict(FailedPutCount=0)
# Start and log
proc.start()
proc.process("a decently larger message")
proc.stop()
assert len(self.mock_boto.mock_calls) == 2
assert len(proc._client.put_record_batch.mock_calls) == 1
def test_queue_timeout(self):
proc = FirehoseProcessor("test")
proc.MAX_INTERVAL = 0
proc._records.get = mock_get = Mock()
proc._send_record_batch = mock_send = Mock()
mock_get.side_effect = (Queue.Empty, None)
proc.start()
proc.stop()
mock_send.assert_called()
def test_batch_send_failure(self):
proc = FirehoseProcessor("test")
proc.MAX_RECORD_BATCH = 1
# Setup the mock
proc._client.put_record_batch.return_value = dict(FailedPutCount=1)
# Start and log
proc.start()
proc.process("a decently larger message")
proc.stop()
assert len(self.mock_boto.mock_calls) == 4
assert len(proc._client.put_record_batch.mock_calls) == 3
| mpl-2.0 |
dbr/tvnamer | tvnamer/_titlecase.py | 1 | 3442 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# fmt: off
"""
Original Perl version by: John Gruber http://daringfireball.net/ 10 May 2008
Python version by Stuart Colville http://muffinresearch.co.uk
License: http://www.opensource.org/licenses/mit-license.php
"""
import re
__all__ = ['titlecase']
__version__ = '0.5.2'
SMALL = 'a|an|and|as|at|but|by|en|for|if|in|of|on|or|the|to|v\\.?|via|vs\\.?'
PUNCT = r"""!"#$%&'โ()*+,\-./:;?@[\\\]_`{|}~"""
SMALL_WORDS = re.compile(r'^(%s)$' % SMALL, re.I)
INLINE_PERIOD = re.compile(r'[a-z][.][a-z]', re.I)
UC_ELSEWHERE = re.compile(r'[%s]*?[a-zA-Z]+[A-Z]+?' % PUNCT)
CAPFIRST = re.compile(r"^[%s]*?([A-Za-z])" % PUNCT)
SMALL_FIRST = re.compile(r'^([%s]*)(%s)\b' % (PUNCT, SMALL), re.I)
SMALL_LAST = re.compile(r'\b(%s)[%s]?$' % (SMALL, PUNCT), re.I)
SUBPHRASE = re.compile(r'([:.;?!][ ])(%s)' % SMALL)
APOS_SECOND = re.compile(r"^[dol]{1}['โ]{1}[a-z]+$", re.I)
ALL_CAPS = re.compile(r'^[A-Z\s%s]+$' % PUNCT)
UC_INITIALS = re.compile(r"^(?:[A-Z]{1}\.{1}|[A-Z]{1}\.{1}[A-Z]{1})+$")
MAC_MC = re.compile(r"^([Mm]a?c)(\w+)")
def titlecase(text):
"""
Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'.
"""
lines = re.split('[\r\n]+', text)
processed = []
for line in lines:
all_caps = ALL_CAPS.match(line)
words = re.split('[\t ]', line)
tc_line = []
for word in words:
if all_caps:
if UC_INITIALS.match(word):
tc_line.append(word)
continue
else:
word = word.lower()
if APOS_SECOND.match(word):
word = word.replace(word[0], word[0].upper())
word = word.replace(word[2], word[2].upper())
tc_line.append(word)
continue
if INLINE_PERIOD.search(word) or UC_ELSEWHERE.match(word):
tc_line.append(word)
continue
if SMALL_WORDS.match(word):
tc_line.append(word.lower())
continue
match = MAC_MC.match(word)
if match:
tc_line.append("%s%s" % (match.group(1).capitalize(),
match.group(2).capitalize()))
continue
if "/" in word and "//" not in word:
slashed = []
for item in word.split('/'):
slashed.append(CAPFIRST.sub(lambda m: m.group(0).upper(), item))
tc_line.append("/".join(slashed))
continue
hyphenated = []
for item in word.split('-'):
hyphenated.append(CAPFIRST.sub(lambda m: m.group(0).upper(), item))
tc_line.append("-".join(hyphenated))
result = " ".join(tc_line)
result = SMALL_FIRST.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), result)
result = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), result)
result = SUBPHRASE.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), result)
processed.append(result)
return "\n".join(processed)
| unlicense |
dbr/tvnamer | tvnamer/main.py | 1 | 17600 | #!/usr/bin/env python
"""Main tvnamer utility functionality
"""
import os
import sys
import logging
import warnings
try:
import readline
except ImportError:
pass
import json
import tvdb_api
from typing import List, Union, Optional
from tvnamer import cliarg_parser, __version__
from tvnamer.config_defaults import defaults
from tvnamer.config import Config
from .files import FileFinder, FileParser, Renamer, _apply_replacements_input
from .utils import (
warn,
format_episode_numbers,
make_valid_filename,
)
from tvnamer.data import (
BaseInfo,
EpisodeInfo,
DatedEpisodeInfo,
NoSeasonEpisodeInfo,
)
from tvnamer.tvnamer_exceptions import (
ShowNotFound,
SeasonNotFound,
EpisodeNotFound,
EpisodeNameNotFound,
UserAbort,
InvalidPath,
NoValidFilesFoundError,
SkipBehaviourAbort,
InvalidFilename,
DataRetrievalError,
)
LOG = logging.getLogger(__name__)
# Key for use in tvnamer only - other keys can easily be registered at https://thetvdb.com/api-information
TVNAMER_API_KEY = "fb51f9b848ffac9750bada89ecba0225"
def get_move_destination(episode):
# type: (BaseInfo) -> str
"""Constructs the location to move/copy the file
"""
# TODO: Write functional test to ensure this valid'ifying works
def wrap_validfname(fname):
# type: (str) -> str
"""Wrap the make_valid_filename function as it's called twice
and this is slightly long..
"""
if Config["move_files_lowercase_destination"]:
fname = fname.lower()
return make_valid_filename(
fname,
windows_safe=Config["windows_safe_filenames"],
custom_blacklist=Config["custom_filename_character_blacklist"],
replace_with=Config["replace_invalid_characters_with"],
)
# Calls make_valid_filename on series name, as it must valid for a filename
if isinstance(episode, DatedEpisodeInfo):
dest_dir = Config["move_files_destination_date"] % {
"seriesname": make_valid_filename(episode.seriesname),
"year": episode.episodenumbers[0].year,
"month": episode.episodenumbers[0].month,
"day": episode.episodenumbers[0].day,
"originalfilename": episode.originalfilename,
}
elif isinstance(episode, NoSeasonEpisodeInfo):
dest_dir = Config["move_files_destination"] % {
"seriesname": wrap_validfname(episode.seriesname),
"episodenumbers": wrap_validfname(
format_episode_numbers(episode.episodenumbers)
),
"originalfilename": episode.originalfilename,
}
elif isinstance(episode, EpisodeInfo):
dest_dir = Config["move_files_destination"] % {
"seriesname": wrap_validfname(episode.seriesname),
"seasonnumber": episode.seasonnumber,
"episodenumbers": wrap_validfname(
format_episode_numbers(episode.episodenumbers)
),
"originalfilename": episode.originalfilename,
}
else:
raise RuntimeError("Unhandled episode subtype of %s" % type(episode))
return dest_dir
def do_rename_file(cnamer, new_name):
# type: (Renamer, str) -> None
"""Renames the file. cnamer should be Renamer instance,
new_name should be string containing new filename.
"""
try:
cnamer.new_path(
new_fullpath=new_name,
force=Config["overwrite_destination_on_rename"],
leave_symlink=Config["leave_symlink"],
)
except OSError as e:
if Config["skip_behaviour"] == "exit":
warn("Exiting due to error: %s" % e)
raise SkipBehaviourAbort()
warn("Skipping file due to error: %s" % e)
def do_move_file(cnamer, dest_dir=None, dest_filepath=None, get_path_preview=False):
# type: (Renamer, Optional[str], Optional[str], bool) -> Optional[str]
"""Moves file to dest_dir, or to dest_filepath
"""
if (dest_dir, dest_filepath).count(None) != 1:
raise ValueError("Specify only dest_dir or dest_filepath")
if not Config["move_files_enable"]:
raise ValueError("move_files feature is disabled but do_move_file was called")
if Config["move_files_destination"] is None:
raise ValueError(
"Config value for move_files_destination cannot be None if move_files_enabled is True"
)
try:
return cnamer.new_path(
new_path=dest_dir,
new_fullpath=dest_filepath,
always_move=Config["always_move"],
leave_symlink=Config["leave_symlink"],
get_path_preview=get_path_preview,
force=Config["overwrite_destination_on_move"],
)
except OSError as e:
if Config["skip_behaviour"] == "exit":
warn("Exiting due to error: %s" % e)
raise SkipBehaviourAbort()
warn("Skipping file due to error: %s" % e)
return None
def confirm(question, options, default="y"):
# type: (str, List[str], str) -> str
"""Takes a question (string), list of options and a default value (used
when user simply hits enter).
Asks until valid option is entered.
"""
# Highlight default option with [ ]
options_chunks = []
for x in options:
if x == default:
x = "[%s]" % x
if x != "":
options_chunks.append(x)
options_str = "/".join(options_chunks)
while True:
print(question)
print("(%s) " % (options_str), end="")
try:
ans = input().strip()
except KeyboardInterrupt as errormsg:
print("\n", errormsg)
raise UserAbort(errormsg)
if ans in options:
return ans
elif ans == "":
return default
def process_file(tvdb_instance, episode):
# type: (tvdb_api.Tvdb, BaseInfo) -> None
"""Gets episode name, prompts user for input
"""
print("#" * 20)
print("# Processing file: %s" % episode.fullfilename)
if len(Config["input_filename_replacements"]) > 0:
replaced = _apply_replacements_input(episode.fullfilename)
print("# With custom replacements: %s" % (replaced))
# Use force_name option. Done after input_filename_replacements so
# it can be used to skip the replacements easily
if Config["force_name"] is not None:
episode.seriesname = Config["force_name"]
print("# Detected series: %s (%s)" % (episode.seriesname, episode.number_string()))
try:
episode.populate_from_tvdb(
tvdb_instance,
force_name=Config["force_name"],
series_id=Config["series_id"],
)
except (DataRetrievalError, ShowNotFound) as errormsg:
if Config["always_rename"] and Config["skip_file_on_error"] is True:
if Config["skip_behaviour"] == "exit":
warn("Exiting due to error: %s" % errormsg)
raise SkipBehaviourAbort()
warn("Skipping file due to error: %s" % errormsg)
return
else:
warn("%s" % (errormsg))
except (SeasonNotFound, EpisodeNotFound, EpisodeNameNotFound) as errormsg:
# Show was found, so use corrected series name
if Config["always_rename"] and Config["skip_file_on_error"]:
if Config["skip_behaviour"] == "exit":
warn("Exiting due to error: %s" % errormsg)
raise SkipBehaviourAbort()
warn("Skipping file due to error: %s" % errormsg)
return
warn("%s" % (errormsg))
cnamer = Renamer(episode.fullpath)
should_rename = False
if Config["move_files_only"]:
new_name = episode.fullfilename
should_rename = True
else:
new_name = episode.generate_filename()
if new_name == episode.fullfilename:
print("#" * 20)
print("Existing filename is correct: %s" % episode.fullfilename)
print("#" * 20)
should_rename = True
else:
print("#" * 20)
print("Old filename: %s" % episode.fullfilename)
if len(Config["output_filename_replacements"]) > 0:
# Show filename without replacements
print(
"Before custom output replacements: %s"
% (episode.generate_filename(preview_orig_filename=True))
)
print("New filename: %s" % new_name)
if Config["dry_run"]:
print("%s will be renamed to %s" % (episode.fullfilename, new_name))
if Config["move_files_enable"]:
print(
"%s will be moved to %s"
% (new_name, get_move_destination(episode))
)
return
elif Config["always_rename"]:
do_rename_file(cnamer, new_name)
if Config["move_files_enable"]:
if Config["move_files_destination_is_filepath"]:
do_move_file(
cnamer=cnamer, dest_filepath=get_move_destination(episode)
)
else:
do_move_file(cnamer=cnamer, dest_dir=get_move_destination(episode))
return
ans = confirm("Rename?", options=["y", "n", "a", "q"], default="y")
if ans == "a":
print("Always renaming")
Config["always_rename"] = True
should_rename = True
elif ans == "q":
print("Quitting")
raise UserAbort("User exited with q")
elif ans == "y":
print("Renaming")
should_rename = True
elif ans == "n":
print("Skipping")
else:
print("Invalid input, skipping")
if should_rename:
do_rename_file(cnamer, new_name)
if should_rename and Config["move_files_enable"]:
new_path = get_move_destination(episode)
if Config["dry_run"]:
print("%s will be moved to %s" % (new_name, get_move_destination(episode)))
return
if Config["move_files_destination_is_filepath"]:
do_move_file(cnamer=cnamer, dest_filepath=new_path, get_path_preview=True)
else:
do_move_file(cnamer=cnamer, dest_dir=new_path, get_path_preview=True)
if not Config["batch"] and Config["move_files_confirmation"]:
ans = confirm("Move file?", options=["y", "n", "q"], default="y")
else:
ans = "y"
if ans == "y":
print("Moving file")
do_move_file(cnamer, new_path)
elif ans == "q":
print("Quitting")
raise UserAbort("user exited with q")
def find_files(paths):
# type: (List[str]) -> List[str]
"""Takes an array of paths, returns all files found
"""
valid_files = []
for cfile in paths:
cur = FileFinder(
cfile,
with_extension=Config["valid_extensions"],
filename_blacklist=Config["filename_blacklist"],
recursive=Config["recursive"],
)
try:
valid_files.extend(cur.find_files())
except InvalidPath:
warn("Invalid path: %s" % cfile)
if len(valid_files) == 0:
raise NoValidFilesFoundError()
# Remove duplicate files (all paths from FileFinder are absolute)
valid_files = list(set(valid_files))
return valid_files
def tvnamer(paths):
# type: (List[str]) -> None
"""Main tvnamer function, takes an array of paths, does stuff.
"""
print("#" * 20)
print("# Starting tvnamer")
episodes_found = []
for cfile in find_files(paths):
parser = FileParser(cfile)
try:
episode = parser.parse()
except InvalidFilename as e:
warn("Invalid filename: %s" % e)
else:
if (
episode.seriesname is None
and Config["force_name"] is None
and Config["series_id"] is None
):
warn(
"Parsed filename did not contain series name (and --name or --series-id not specified), skipping: %s"
% cfile
)
else:
episodes_found.append(episode)
if len(episodes_found) == 0:
raise NoValidFilesFoundError()
print(
"# Found %d episode" % len(episodes_found) + ("s" * (len(episodes_found) > 1))
)
# Sort episodes by series name, season and episode number
episodes_found.sort(key=lambda x: x.sortable_info())
# episode sort order
if Config["order"] == "dvd":
dvdorder = True
else:
dvdorder = False
if Config["tvdb_api_key"] is not None:
LOG.debug("Using custom API key from config")
api_key = Config["tvdb_api_key"]
else:
LOG.debug("Using tvnamer default API key")
api_key = TVNAMER_API_KEY
if os.getenv("TVNAMER_TEST_MODE", "0") == "1":
from .test_cache import get_test_cache_session
cache = get_test_cache_session()
else:
cache = True
tvdb_instance = tvdb_api.Tvdb(
interactive=not Config["select_first"],
search_all_languages=Config["search_all_languages"],
language=Config["language"],
dvdorder=dvdorder,
cache=cache,
apikey=api_key,
)
for episode in episodes_found:
process_file(tvdb_instance, episode)
print("")
print("#" * 20)
print("# Done")
def main():
# type: () -> None
"""Parses command line arguments, displays errors from tvnamer in terminal
"""
opter = cliarg_parser.get_cli_parser(defaults)
opts, args = opter.parse_args()
if opts.show_version:
print("tvnamer version: %s" % (__version__,))
print("tvdb_api version: %s" % (tvdb_api.__version__,))
print("python version: %s" % (sys.version,))
sys.exit(0)
if opts.verbose:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
else:
logging.basicConfig()
# If a config is specified, load it, update the defaults using the loaded
# values, then reparse the options with the updated defaults.
default_configuration = os.path.expanduser("~/.config/tvnamer/tvnamer.json")
old_default_configuration = os.path.expanduser("~/.tvnamer.json")
if opts.loadconfig is not None:
# Command line overrides loading ~/.config/tvnamer/tvnamer.json
config_to_load = opts.loadconfig
elif os.path.isfile(default_configuration):
# No --config arg, so load default config if it exists
config_to_load = default_configuration
elif os.path.isfile(old_default_configuration):
# No --config arg and neow defualt config so load old version if it exist
config_to_load = old_default_configuration
else:
# No arg, nothing at default config location, don't load anything
config_to_load = None
if config_to_load is not None:
LOG.info("Loading config: %s" % (config_to_load))
if os.path.isfile(old_default_configuration):
LOG.warning("WARNING: you have a config at deprecated ~/.tvnamer.json location.")
LOG.warning("Config must be moved to new location: ~/.config/tvnamer/tvnamer.json")
try:
loaded_config = json.load(open(os.path.expanduser(config_to_load)))
except ValueError as e:
LOG.error("Error loading config: %s" % e)
opter.exit(1)
else:
# Config loaded, update optparser's defaults and reparse
defaults.update(loaded_config)
opter = cliarg_parser.get_cli_parser(defaults)
opts, args = opter.parse_args()
# Save config argument
if opts.saveconfig is not None:
LOG.info("Saving config: %s" % (opts.saveconfig))
config_to_save = dict(opts.__dict__)
del config_to_save["saveconfig"]
del config_to_save["loadconfig"]
del config_to_save["showconfig"]
json.dump(
config_to_save,
open(os.path.expanduser(opts.saveconfig), "w+"),
sort_keys=True,
indent=4,
)
opter.exit(0)
# Show config argument
if opts.showconfig:
print(json.dumps(opts.__dict__, sort_keys=True, indent=2))
return
# Process values
if opts.batch:
opts.select_first = True
opts.always_rename = True
# Update global config object
Config.update(opts.__dict__)
if Config["move_files_only"] and not Config["move_files_enable"]:
opter.error(
"Parameter move_files_enable cannot be set to false while parameter move_only is set to true."
)
if Config["titlecase_filename"] and Config["lowercase_filename"]:
warnings.warn(
"Setting 'lowercase_filename' clobbers 'titlecase_filename' option"
)
if len(args) == 0:
opter.error("No filenames or directories supplied")
try:
tvnamer(paths=sorted(args))
except NoValidFilesFoundError:
opter.error("No valid files were supplied")
except UserAbort as errormsg:
opter.error(errormsg)
except SkipBehaviourAbort as errormsg:
opter.error(errormsg)
if __name__ == "__main__":
main()
| unlicense |
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactcore/factories/fsrs.py | 1 | 10488 | from datetime import date, datetime, timezone
import factory
from factory import fuzzy
from dataactcore.models import fsrs
class _FSRSAttributes(factory.Factory):
duns = fuzzy.FuzzyText()
uei_number = fuzzy.FuzzyText()
dba_name = fuzzy.FuzzyText()
principle_place_city = fuzzy.FuzzyText()
principle_place_street = None
principle_place_state = fuzzy.FuzzyText()
principle_place_state_name = fuzzy.FuzzyText()
principle_place_country = fuzzy.FuzzyText()
principle_place_zip = fuzzy.FuzzyText()
principle_place_district = None
parent_duns = fuzzy.FuzzyText()
funding_agency_id = fuzzy.FuzzyText()
funding_agency_name = fuzzy.FuzzyText()
top_paid_fullname_1 = None
top_paid_amount_1 = None
top_paid_fullname_2 = None
top_paid_amount_2 = None
top_paid_fullname_3 = None
top_paid_amount_3 = None
top_paid_fullname_4 = None
top_paid_amount_4 = None
top_paid_fullname_5 = None
top_paid_amount_5 = None
class _ContractAttributes(_FSRSAttributes):
company_name = fuzzy.FuzzyText()
bus_types = fuzzy.FuzzyText()
company_address_city = fuzzy.FuzzyText()
company_address_street = None
company_address_state = fuzzy.FuzzyText()
company_address_state_name = fuzzy.FuzzyText()
company_address_country = fuzzy.FuzzyText()
company_address_zip = fuzzy.FuzzyText()
company_address_district = None
parent_company_name = fuzzy.FuzzyText()
naics = fuzzy.FuzzyText()
funding_office_id = fuzzy.FuzzyText()
funding_office_name = fuzzy.FuzzyText()
recovery_model_q1 = fuzzy.FuzzyChoice((False, True))
recovery_model_q2 = fuzzy.FuzzyChoice((False, True))
class _GrantAttributes(_FSRSAttributes):
dunsplus4 = None
awardee_name = fuzzy.FuzzyText()
awardee_address_city = fuzzy.FuzzyText()
awardee_address_street = None
awardee_address_state = fuzzy.FuzzyText()
awardee_address_state_name = fuzzy.FuzzyText()
awardee_address_country = fuzzy.FuzzyText()
awardee_address_zip = fuzzy.FuzzyText()
awardee_address_district = None
cfda_numbers = fuzzy.FuzzyText()
project_description = fuzzy.FuzzyText()
compensation_q1 = fuzzy.FuzzyChoice((False, True))
compensation_q2 = fuzzy.FuzzyChoice((False, True))
federal_agency_id = fuzzy.FuzzyText()
federal_agency_name = fuzzy.FuzzyText()
class _PrimeAwardAttributes(factory.Factory):
internal_id = fuzzy.FuzzyText()
date_submitted = fuzzy.FuzzyDateTime(datetime(2010, 1, 1, tzinfo=timezone.utc))
report_period_mon = fuzzy.FuzzyText()
report_period_year = fuzzy.FuzzyText()
class FSRSProcurementFactory(_ContractAttributes, _PrimeAwardAttributes):
class Meta:
model = fsrs.FSRSProcurement
contract_number = fuzzy.FuzzyText()
idv_reference_number = None
report_type = fuzzy.FuzzyText()
contract_agency_code = fuzzy.FuzzyText()
contract_idv_agency_code = None
contracting_office_aid = fuzzy.FuzzyText()
contracting_office_aname = fuzzy.FuzzyText()
contracting_office_id = fuzzy.FuzzyText()
contracting_office_name = fuzzy.FuzzyText()
treasury_symbol = fuzzy.FuzzyText()
dollar_obligated = fuzzy.FuzzyText()
date_signed = fuzzy.FuzzyDate(date(2010, 1, 1))
transaction_type = fuzzy.FuzzyText()
program_title = fuzzy.FuzzyText()
subawards = []
class FSRSSubcontractFactory(_ContractAttributes):
class Meta:
model = fsrs.FSRSSubcontract
subcontract_amount = fuzzy.FuzzyText()
subcontract_date = fuzzy.FuzzyDate(date(2010, 1, 1))
subcontract_num = fuzzy.FuzzyText()
overall_description = fuzzy.FuzzyText()
recovery_subcontract_amt = None
class FSRSGrantFactory(_GrantAttributes, _PrimeAwardAttributes):
class Meta:
model = fsrs.FSRSGrant
fain = fuzzy.FuzzyText()
total_fed_funding_amount = fuzzy.FuzzyText()
obligation_date = fuzzy.FuzzyDate(date(2010, 1, 1))
class FSRSSubgrantFactory(_GrantAttributes):
class Meta:
model = fsrs.FSRSSubgrant
subaward_amount = fuzzy.FuzzyText()
subaward_date = fuzzy.FuzzyDate(date(2010, 1, 1))
subaward_num = fuzzy.FuzzyText()
class SubawardFactory(factory.Factory):
class Meta:
model = fsrs.Subaward
unique_award_key = fuzzy.FuzzyText()
award_id = fuzzy.FuzzyText()
parent_award_id = fuzzy.FuzzyText()
award_amount = fuzzy.FuzzyText()
action_date = fuzzy.FuzzyDate(date(2010, 1, 1))
fy = fuzzy.FuzzyText()
awarding_agency_code = fuzzy.FuzzyText()
awarding_agency_name = fuzzy.FuzzyText()
awarding_sub_tier_agency_c = fuzzy.FuzzyText()
awarding_sub_tier_agency_n = fuzzy.FuzzyText()
awarding_office_code = fuzzy.FuzzyText()
awarding_office_name = fuzzy.FuzzyText()
funding_agency_code = fuzzy.FuzzyText()
funding_agency_name = fuzzy.FuzzyText()
funding_sub_tier_agency_co = fuzzy.FuzzyText()
funding_sub_tier_agency_na = fuzzy.FuzzyText()
funding_office_code = fuzzy.FuzzyText()
funding_office_name = fuzzy.FuzzyText()
awardee_or_recipient_uei = fuzzy.FuzzyText()
awardee_or_recipient_uniqu = fuzzy.FuzzyText()
awardee_or_recipient_legal = fuzzy.FuzzyText()
dba_name = fuzzy.FuzzyText()
ultimate_parent_uei = fuzzy.FuzzyText()
ultimate_parent_unique_ide = fuzzy.FuzzyText()
ultimate_parent_legal_enti = fuzzy.FuzzyText()
legal_entity_country_code = fuzzy.FuzzyText()
legal_entity_country_name = fuzzy.FuzzyText()
legal_entity_address_line1 = fuzzy.FuzzyText()
legal_entity_city_name = fuzzy.FuzzyText()
legal_entity_state_code = fuzzy.FuzzyText()
legal_entity_state_name = fuzzy.FuzzyText()
legal_entity_zip = fuzzy.FuzzyText()
legal_entity_congressional = fuzzy.FuzzyText()
legal_entity_foreign_posta = fuzzy.FuzzyText()
business_types = fuzzy.FuzzyText()
place_of_perform_city_name = fuzzy.FuzzyText()
place_of_perform_state_code = fuzzy.FuzzyText()
place_of_perform_state_name = fuzzy.FuzzyText()
place_of_performance_zip = fuzzy.FuzzyText()
place_of_perform_congressio = fuzzy.FuzzyText()
place_of_perform_country_co = fuzzy.FuzzyText()
place_of_perform_country_na = fuzzy.FuzzyText()
award_description = fuzzy.FuzzyText()
naics = fuzzy.FuzzyText()
naics_description = fuzzy.FuzzyText()
cfda_numbers = fuzzy.FuzzyText()
cfda_titles = fuzzy.FuzzyText()
subaward_type = fuzzy.FuzzyText()
subaward_report_year = fuzzy.FuzzyText()
subaward_report_month = fuzzy.FuzzyText()
subaward_number = fuzzy.FuzzyText()
subaward_amount = fuzzy.FuzzyText()
sub_action_date = fuzzy.FuzzyDate(date(2010, 1, 1))
sub_awardee_or_recipient_uei = fuzzy.FuzzyText()
sub_awardee_or_recipient_uniqu = fuzzy.FuzzyText()
sub_awardee_or_recipient_legal = fuzzy.FuzzyText()
sub_dba_name = fuzzy.FuzzyText()
sub_ultimate_parent_uei = fuzzy.FuzzyText()
sub_ultimate_parent_unique_ide = fuzzy.FuzzyText()
sub_ultimate_parent_legal_enti = fuzzy.FuzzyText()
sub_legal_entity_country_code = fuzzy.FuzzyText()
sub_legal_entity_country_name = fuzzy.FuzzyText()
sub_legal_entity_address_line1 = fuzzy.FuzzyText()
sub_legal_entity_city_name = fuzzy.FuzzyText()
sub_legal_entity_state_code = fuzzy.FuzzyText()
sub_legal_entity_state_name = fuzzy.FuzzyText()
sub_legal_entity_zip = fuzzy.FuzzyText()
sub_legal_entity_congressional = fuzzy.FuzzyText()
sub_legal_entity_foreign_posta = fuzzy.FuzzyText()
sub_business_types = fuzzy.FuzzyText()
sub_place_of_perform_city_name = fuzzy.FuzzyText()
sub_place_of_perform_state_code = fuzzy.FuzzyText()
sub_place_of_perform_state_name = fuzzy.FuzzyText()
sub_place_of_performance_zip = fuzzy.FuzzyText()
sub_place_of_perform_congressio = fuzzy.FuzzyText()
sub_place_of_perform_country_co = fuzzy.FuzzyText()
sub_place_of_perform_country_na = fuzzy.FuzzyText()
subaward_description = fuzzy.FuzzyText()
sub_high_comp_officer1_full_na = fuzzy.FuzzyText()
sub_high_comp_officer1_amount = fuzzy.FuzzyText()
sub_high_comp_officer2_full_na = fuzzy.FuzzyText()
sub_high_comp_officer2_amount = fuzzy.FuzzyText()
sub_high_comp_officer3_full_na = fuzzy.FuzzyText()
sub_high_comp_officer3_amount = fuzzy.FuzzyText()
sub_high_comp_officer4_full_na = fuzzy.FuzzyText()
sub_high_comp_officer4_amount = fuzzy.FuzzyText()
sub_high_comp_officer5_full_na = fuzzy.FuzzyText()
sub_high_comp_officer5_amount = fuzzy.FuzzyText()
prime_id = fuzzy.FuzzyInteger(0, 100)
internal_id = fuzzy.FuzzyText()
date_submitted = fuzzy.FuzzyDateTime(datetime(2010, 1, 1, tzinfo=timezone.utc))
report_type = fuzzy.FuzzyText()
transaction_type = fuzzy.FuzzyText()
program_title = fuzzy.FuzzyText()
contract_agency_code = fuzzy.FuzzyText()
contract_idv_agency_code = fuzzy.FuzzyText()
grant_funding_agency_id = fuzzy.FuzzyText()
grant_funding_agency_name = fuzzy.FuzzyText()
federal_agency_name = fuzzy.FuzzyText()
treasury_symbol = fuzzy.FuzzyText()
dunsplus4 = fuzzy.FuzzyText()
recovery_model_q1 = fuzzy.FuzzyText()
recovery_model_q2 = fuzzy.FuzzyText()
compensation_q1 = fuzzy.FuzzyText()
compensation_q2 = fuzzy.FuzzyText()
high_comp_officer1_full_na = fuzzy.FuzzyText()
high_comp_officer1_amount = fuzzy.FuzzyText()
high_comp_officer2_full_na = fuzzy.FuzzyText()
high_comp_officer2_amount = fuzzy.FuzzyText()
high_comp_officer3_full_na = fuzzy.FuzzyText()
high_comp_officer3_amount = fuzzy.FuzzyText()
high_comp_officer4_full_na = fuzzy.FuzzyText()
high_comp_officer4_amount = fuzzy.FuzzyText()
high_comp_officer5_full_na = fuzzy.FuzzyText()
high_comp_officer5_amount = fuzzy.FuzzyText()
sub_id = fuzzy.FuzzyInteger(0, 100)
sub_parent_id = fuzzy.FuzzyInteger(0, 100)
sub_federal_agency_id = fuzzy.FuzzyText()
sub_federal_agency_name = fuzzy.FuzzyText()
sub_funding_agency_id = fuzzy.FuzzyText()
sub_funding_agency_name = fuzzy.FuzzyText()
sub_funding_office_id = fuzzy.FuzzyText()
sub_funding_office_name = fuzzy.FuzzyText()
sub_naics = fuzzy.FuzzyText()
sub_cfda_numbers = fuzzy.FuzzyText()
sub_dunsplus4 = fuzzy.FuzzyText()
sub_recovery_subcontract_amt = fuzzy.FuzzyText()
sub_recovery_model_q1 = fuzzy.FuzzyText()
sub_recovery_model_q2 = fuzzy.FuzzyText()
sub_compensation_q1 = fuzzy.FuzzyText()
sub_compensation_q2 = fuzzy.FuzzyText()
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactvalidator/test_fabs47.py | 1 | 1670 | from tests.unit.dataactcore.factories.staging import FABSFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs47'
def test_column_headers(database):
expected_subset = {'row_number', 'funding_opportunity_number', 'assistance_type',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test FundingOpportunityNumber must be blank for non-grants/non-cooperative agreements
(AssistanceType = 06, 07, 08, 09, 10, or 11).
"""
fabs_1 = FABSFactory(funding_opportunity_number='', assistance_type='06', correction_delete_indicatr='C')
fabs_2 = FABSFactory(funding_opportunity_number=None, assistance_type='09', correction_delete_indicatr=None)
# Ignored for other assistance types
fabs_3 = FABSFactory(funding_opportunity_number='123', assistance_type='03', correction_delete_indicatr='C')
# Ignored for CorrectionDeleteIndicator of D
fabs_4 = FABSFactory(funding_opportunity_number='123', assistance_type='08', correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, fabs_4])
assert errors == 0
def test_failure(database):
""" Test failure FundingOpportunityNumber must be blank for non-grants/non-cooperative agreements
(AssistanceType = 06, 07, 08, 09, 10, or 11).
"""
fabs_1 = FABSFactory(funding_opportunity_number='123', assistance_type='06', correction_delete_indicatr='C')
errors = number_of_errors(_FILE, database, models=[fabs_1])
assert errors == 1
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | dataactcore/aws/s3Handler.py | 1 | 4256 | import boto3
import logging
from botocore.exceptions import ClientError
from dataactcore.config import CONFIG_BROKER
logger = logging.getLogger(__name__)
class S3Handler:
""" This class acts a wrapper for S3 URL Signing
Attributes:
bucketRoute: The name of the bucket to be used
Constants:
BASE_URL: The start of the urls generated by S3Handler
ENABLE_S3: whether to use S3 or not
URL_LIFETIME: Length of time before s3 URLs expire in seconds
"""
BASE_URL = "https://files-broker-nonprod.usaspending.gov"
ENABLE_S3 = True
URL_LIFETIME = 60
def __init__(self, name=None):
""" Creates the object for signing URLS
Args:
name: Name of the S3 bucket
"""
if name is None:
self.bucketRoute = CONFIG_BROKER['aws_bucket']
else:
self.bucketRoute = name
def _sign_url(self, path, file_name, bucket_route, url_mapping=None, method="put_object"):
""" Creates the object for signing URLS
Args:
path: Path to folder
file_name: Name of file to get signed URL for.
bucket_route: Name of the bucket being accessed
url_mapping: The mapping to replace the S3 URL before giving it to the user
method: method to create signed url for
Returns:
A string containing the signed URL to the file
"""
if S3Handler.ENABLE_S3:
s3 = boto3.client('s3', region_name=CONFIG_BROKER['aws_region'])
s3_params = {'Bucket': bucket_route,
'Key': (path + "/" + file_name) if path else file_name}
presigned_url = s3.generate_presigned_url(method, s3_params, ExpiresIn=S3Handler.URL_LIFETIME)
if url_mapping:
presigned_url = presigned_url.replace(presigned_url.split('/')[2],
CONFIG_BROKER['proxy_url'] + '/' + url_mapping[1])
return presigned_url
return S3Handler.BASE_URL + "/" + self.bucketRoute + "/" + path + "/" + file_name
def get_signed_url(self, path, file_name, bucket_route=None, url_mapping=None):
""" Signs a URL
Args:
path: Path to folder
file_name: Name of file to get signed URL for.
bucket_route: Name of the bucket being accessed
url_mapping: The mapping to replace the S3 URL before giving it to the user
method: method to create signed url for
Returns:
A string containing the signed URL to the file
"""
bucket_route = self.bucketRoute if bucket_route is None else bucket_route
return self._sign_url(path, file_name, bucket_route, url_mapping, 'get_object')
@staticmethod
def get_file_size(filename):
""" Get the size of the specified file from the submission bucket
Args:
filename: Name of the file in the submission bucket to check the size of
Returns:
File size in number of bytes for specified filename, or 0 if file doesn't exist
"""
s3_reso = boto3.resource('s3', region_name=CONFIG_BROKER['aws_region'])
obj_info = s3_reso.ObjectSummary(CONFIG_BROKER['aws_bucket'], filename)
try:
return obj_info.size
except ClientError:
logger.warning("File doesn't exist on AWS: %s", filename)
return 0
@staticmethod
def copy_file(original_bucket, new_bucket, original_path, new_path):
""" Copies a file from one bucket to another.
Args:
original_bucket: Name of the bucket to copy from
new_bucket: Name of the bucket to copy to
original_path: Path and filename of the original file
new_path: Path and filename for the copied file
"""
s3 = boto3.resource('s3', region_name=CONFIG_BROKER['aws_region'])
source_info = {
'Bucket': original_bucket,
'Key': original_path
}
s3.meta.client.copy(source_info, new_bucket, new_path)
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | dataactcore/migrations/versions/4d8408c33fee_add_frec_to_user_model.py | 1 | 2519 | """add FREC to user model
Revision ID: 4d8408c33fee
Revises: da2e50d423ff
Create Date: 2017-07-06 13:19:01.155328
"""
# revision identifiers, used by Alembic.
revision = '4d8408c33fee'
down_revision = 'da2e50d423ff'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user_affiliation', sa.Column('frec_id', sa.Integer(), nullable=True))
op.add_column('user_affiliation', sa.Column('user_affiliation_id', sa.Integer(), nullable=False, primary_key=True))
op.create_index(op.f('ix_user_affiliation_cgac_id'), 'user_affiliation', ['cgac_id'], unique=False)
op.create_index(op.f('ix_user_affiliation_frec_id'), 'user_affiliation', ['frec_id'], unique=False)
op.create_index(op.f('ix_user_affiliation_user_id'), 'user_affiliation', ['user_id'], unique=False)
op.create_foreign_key('user_affiliation_frec_fk', 'user_affiliation', 'frec', ['frec_id'], ['frec_id'], ondelete='CASCADE')
op.drop_constraint('user_affiliation_pkey', 'user_affiliation', type_='primary')
op.create_primary_key('user_affiliation_pkey', 'user_affiliation', ['user_affiliation_id'])
op.alter_column('user_affiliation', 'cgac_id',
existing_type=sa.INTEGER(),
nullable=True)
### end Alembic commands ###
def downgrade_data_broker():
op.execute("DELETE FROM user_affiliation "
"WHERE cgac_id IS NULL")
### commands auto generated by Alembic - please adjust! ###
op.alter_column('user_affiliation', 'cgac_id',
existing_type=sa.INTEGER(),
nullable=False)
op.drop_constraint('user_affiliation_pkey', 'user_affiliation', type_='primary')
op.create_primary_key('user_affiliation_pkey', 'user_affiliation', ['user_id', 'cgac_id'])
op.drop_constraint('user_affiliation_frec_fk', 'user_affiliation', type_='foreignkey')
op.drop_index(op.f('ix_user_affiliation_user_id'), table_name='user_affiliation')
op.drop_index(op.f('ix_user_affiliation_frec_id'), table_name='user_affiliation')
op.drop_index(op.f('ix_user_affiliation_cgac_id'), table_name='user_affiliation')
op.drop_column('user_affiliation', 'user_affiliation_id')
op.drop_column('user_affiliation', 'frec_id')
### end Alembic commands ###
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | dataactcore/migrations/versions/812387580a0b_rename_user_permissions_column.py | 1 | 1298 | """rename user permissions column
Revision ID: 812387580a0b
Revises: a97dabbd44f4
Create Date: 2016-11-09 11:40:11.657516
"""
# revision identifiers, used by Alembic.
revision = '812387580a0b'
down_revision = 'a97dabbd44f4'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.execute('TRUNCATE permission_type')
op.add_column('users', sa.Column('permission_type_id', sa.Integer(), nullable=True))
op.create_foreign_key('user_permission_type_fk', 'users', 'permission_type', ['permission_type_id'], ['permission_type_id'])
op.drop_column('users', 'permissions')
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('permissions', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint('user_permission_type_fk', 'users', type_='foreignkey')
op.drop_column('users', 'permission_type_id')
op.execute('TRUNCATE permission_type')
### end Alembic commands ###
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactvalidator/test_fabs37_3.py | 1 | 1435 | from tests.unit.dataactcore.factories.staging import FABSFactory
from dataactcore.models.domainModels import CFDAProgram
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs37_3'
def test_column_headers(database):
expected_subset = {'row_number', 'cfda_number', 'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test that no errors occur when the cfda_number exists. """
cfda = CFDAProgram(program_number=12.340)
fabs_1 = FABSFactory(cfda_number='12.340', correction_delete_indicatr='')
# Ignore correction delete indicator of D
fabs_2 = FABSFactory(cfda_number='AB.CDE', correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, cfda])
assert errors == 0
def test_failure(database):
""" Test that its fails when cfda_number does not exists. """
# test for cfda_number that doesn't exist in the table
cfda = CFDAProgram(program_number=12.340)
fabs_1 = FABSFactory(cfda_number='54.321', correction_delete_indicatr='')
fabs_2 = FABSFactory(cfda_number='AB.CDE', correction_delete_indicatr='c')
fabs_3 = FABSFactory(cfda_number='11.111', correction_delete_indicatr=None)
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, cfda])
assert errors == 3
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactvalidator/test_fabs6.py | 1 | 1333 | from tests.unit.dataactcore.factories.staging import FABSFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs6'
def test_column_headers(database):
expected_subset = {'row_number', 'record_type', 'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Tests success for when Record type is required and cannot be blank. It must be 1, 2, or 3 """
fabs_1 = FABSFactory(record_type=1, correction_delete_indicatr=None)
fabs_2 = FABSFactory(record_type=2, correction_delete_indicatr='')
fabs_3 = FABSFactory(record_type=3, correction_delete_indicatr='c')
# Ignore correction delete indicator of D
fabs_4 = FABSFactory(record_type=0, correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, fabs_4])
assert errors == 0
def test_failure(database):
""" Tests failure for when Record type is required and cannot be blank. It must be 1, 2, or 3 """
fabs_1 = FABSFactory(record_type=0, correction_delete_indicatr=None)
fabs_2 = FABSFactory(record_type=None, correction_delete_indicatr='c')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2])
assert errors == 2
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactvalidator/test_fabsreq9.py | 1 | 1607 | from tests.unit.dataactcore.factories.staging import FABSFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabsreq9'
def test_column_headers(database):
expected_subset = {'row_number', 'awardee_or_recipient_legal', 'correction_delete_indicatr',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. """
fabs = FABSFactory(correction_delete_indicatr='C', awardee_or_recipient_legal='REDACTED')
fabs_2 = FABSFactory(correction_delete_indicatr='', awardee_or_recipient_legal='Name')
# Test ignoring for D records
fabs_3 = FABSFactory(correction_delete_indicatr='d', awardee_or_recipient_legal=None)
fabs_4 = FABSFactory(correction_delete_indicatr='D', awardee_or_recipient_legal='')
fabs_5 = FABSFactory(correction_delete_indicatr='d', awardee_or_recipient_legal='Name')
errors = number_of_errors(_FILE, database, models=[fabs, fabs_2, fabs_3, fabs_4, fabs_5])
assert errors == 0
def test_failure(database):
""" Test fail AwardeeOrRecipientLegalEntityName is required for all submissions except delete records. """
fabs = FABSFactory(correction_delete_indicatr='c', awardee_or_recipient_legal=None)
fabs_2 = FABSFactory(correction_delete_indicatr=None, awardee_or_recipient_legal='')
errors = number_of_errors(_FILE, database, models=[fabs, fabs_2])
assert errors == 2
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | dataactcore/migrations/versions/11338a6b7e77_adding_business_categories_derivation_.py | 1 | 4287 | """Adding business categories derivation function
Revision ID: 11338a6b7e77
Revises: e26d14b0d235
Create Date: 2021-12-09 09:42:10.715687
"""
# revision identifiers, used by Alembic.
revision = '11338a6b7e77'
down_revision = 'e26d14b0d235'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("""
create or replace function compile_fabs_business_categories(business_types text)
returns text[]
immutable parallel safe
as $$
declare
bc_arr text[];
begin
-- BUSINESS (FOR-PROFIT ORGANIZATION)
if business_types ~ '(R|23)'
then
bc_arr := bc_arr || array['small_business'];
end if;
if business_types ~ '(Q|22)'
then
bc_arr := bc_arr || array['other_than_small_business'];
end if;
if bc_arr && array['small_business', 'other_than_small_business']
then
bc_arr := bc_arr || array['category_business'];
end if;
-- NON-PROFIT
if business_types ~ '(M|N|12)'
then
bc_arr := bc_arr || array['nonprofit'];
end if;
-- HIGHER EDUCATION
if business_types ~ '(H|06)'
then
bc_arr := bc_arr || array['public_institution_of_higher_education'];
end if;
if business_types ~ '(O|20)'
then
bc_arr := bc_arr || array['private_institution_of_higher_education'];
end if;
if business_types ~ '(T|U|V|S)'
then
bc_arr := bc_arr || array['minority_serving_institution_of_higher_education'];
end if;
if bc_arr && array[
'public_institution_of_higher_education',
'private_institution_of_higher_education',
'minority_serving_institution_of_higher_education'
]
then
bc_arr := bc_arr || array['higher_education'];
end if;
-- GOVERNMENT
if business_types ~ '(A|00)'
then
bc_arr := bc_arr || array['regional_and_state_government'];
end if;
if business_types ~ '(E)'
then
bc_arr := bc_arr || array['regional_organization'];
end if;
if business_types ~ '(F)'
then
bc_arr := bc_arr || array['us_territory_or_possession'];
end if;
if business_types ~ '(B|C|D|G|01|02|04|05)'
then
bc_arr := bc_arr || array['local_government'];
end if;
if business_types ~ '(I|J|K|11)'
then
bc_arr := bc_arr || array['indian_native_american_tribal_government'];
end if;
if business_types ~ '(L)'
then
bc_arr := bc_arr || array['authorities_and_commissions'];
end if;
if bc_arr && array[
'regional_and_state_government',
'us_territory_or_possession',
'local_government',
'indian_native_american_tribal_government',
'authorities_and_commissions',
'regional_organization'
]
then
bc_arr := bc_arr || array['government'];
end if;
-- INDIVIDUALS
if business_types ~ '(P|21)'
then
bc_arr := bc_arr || array['individuals'];
end if;
-- Sort and return the array.
return array(select unnest(bc_arr) order by 1);
end;
$$ language plpgsql;
""")
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.execute(""" DROP FUNCTION IF EXISTS compile_fabs_business_categories(TEXT) """)
# ### end Alembic commands ###
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactvalidator/test_b5_object_class_program_activity_2.py | 1 | 1853 | from tests.unit.dataactcore.factories.staging import ObjectClassProgramActivityFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'b5_object_class_program_activity_2'
def test_column_headers(database):
expected_subset = {'row_number', 'gross_outlay_amount_by_pro_cpe', 'gross_outlays_undelivered_cpe',
'gross_outlays_delivered_or_cpe', 'difference', 'uniqueid_TAS',
'uniqueid_DisasterEmergencyFundCode', 'uniqueid_ProgramActivityCode', 'uniqueid_ObjectClass'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Tests that Object Class Program activity gross_outlays_delivered_or_cpe + gross_outlays_undelivered_cpe
equals gross_outlay_amount_by_pro_cpe for the same TAS/DEFC combination
"""
op = ObjectClassProgramActivityFactory(gross_outlay_amount_by_pro_cpe=2, gross_outlays_undelivered_cpe=2,
gross_outlays_undelivered_fyb=1, gross_outlays_delivered_or_cpe=2,
gross_outlays_delivered_or_fyb=1)
assert number_of_errors(_FILE, database, models=[op]) == 0
def test_failure(database):
""" Tests that Object Class Program activity gross_outlays_delivered_or_cpe + gross_outlays_undelivered_cpe
doesn't equal gross_outlay_amount_by_pro_cpe for the same TAS/DEFC combination
"""
op = ObjectClassProgramActivityFactory(gross_outlay_amount_by_pro_cpe=2, gross_outlays_undelivered_cpe=1,
gross_outlays_undelivered_fyb=1, gross_outlays_delivered_or_cpe=1,
gross_outlays_delivered_or_fyb=1)
assert number_of_errors(_FILE, database, models=[op]) == 1
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | dataactcore/migrations/versions/d45dde2ba15b_alter_detached_regular_award_procurement.py | 1 | 4837 | """Alter and add many columns in DetachedAwardProcurement and AwardProcurement
Revision ID: d45dde2ba15b
Revises: 001758a1ab82
Create Date: 2018-03-09 14:08:13.058669
"""
# revision identifiers, used by Alembic.
revision = 'd45dde2ba15b'
down_revision = '001758a1ab82'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE award_procurement RENAME COLUMN walsh_healey_act TO materials_supplies_article")
op.execute("ALTER TABLE award_procurement RENAME COLUMN service_contract_act TO labor_standards")
op.execute("ALTER TABLE award_procurement RENAME COLUMN davis_bacon_act TO construction_wage_rate_req")
op.execute("ALTER TABLE award_procurement RENAME COLUMN government_furnished_equip TO government_furnished_prope")
op.add_column('award_procurement', sa.Column('cage_code', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('inherently_government_func', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('organizational_type', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('number_of_employees', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('annual_revenue', sa.Text(), nullable=True))
op.add_column('award_procurement', sa.Column('total_obligated_amount', sa.Text(), nullable=True))
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN walsh_healey_act TO materials_supplies_article")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN walsh_healey_act_descrip TO materials_supplies_descrip")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN service_contract_act TO labor_standards")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN service_contract_act_desc TO labor_standards_descrip")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN davis_bacon_act TO construction_wage_rate_req")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN davis_bacon_act_descrip TO construction_wage_rat_desc")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN government_furnished_equip TO government_furnished_prope")
op.add_column('detached_award_procurement', sa.Column('cage_code', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('inherently_government_func', sa.Text(), nullable=True))
op.add_column('detached_award_procurement', sa.Column('organizational_type', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.execute("ALTER TABLE award_procurement RENAME COLUMN materials_supplies_article TO walsh_healey_act")
op.execute("ALTER TABLE award_procurement RENAME COLUMN labor_standards TO service_contract_act")
op.execute("ALTER TABLE award_procurement RENAME COLUMN construction_wage_rate_req TO davis_bacon_act")
op.execute("ALTER TABLE award_procurement RENAME COLUMN government_furnished_prope TO government_furnished_equip")
op.drop_column('award_procurement', 'cage_code')
op.drop_column('award_procurement', 'inherently_government_func')
op.drop_column('award_procurement', 'organizational_type')
op.drop_column('award_procurement', 'number_of_employees')
op.drop_column('award_procurement', 'annual_revenue')
op.drop_column('award_procurement', 'total_obligated_amount')
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN materials_supplies_article TO walsh_healey_act")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN materials_supplies_descrip TO walsh_healey_act_descrip")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN labor_standards TO service_contract_act")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN labor_standards_descrip TO service_contract_act_desc")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN construction_wage_rate_req TO davis_bacon_act")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN construction_wage_rat_desc TO davis_bacon_act_descrip")
op.execute("ALTER TABLE detached_award_procurement RENAME COLUMN government_furnished_prope TO government_furnished_equip")
op.drop_column('detached_award_procurement', 'cage_code')
op.drop_column('detached_award_procurement', 'inherently_government_func')
op.drop_column('detached_award_procurement', 'organizational_type')
### end Alembic commands ###
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | dataactcore/migrations/versions/df2f541291a5_create_gtas_submission_window_table.py | 1 | 1146 | """Create gtas_submission_window table
Revision ID: df2f541291a5
Revises: 427320dea2ab
Create Date: 2017-07-06 12:06:53.946926
"""
# revision identifiers, used by Alembic.
revision = 'df2f541291a5'
down_revision = '427320dea2ab'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('gtas_submission_window',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('window_id', sa.Integer(), nullable=False),
sa.Column('start_date', sa.Date(), nullable=True),
sa.Column('end_date', sa.Date(), nullable=True),
sa.PrimaryKeyConstraint('window_id')
)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('gtas_submission_window')
### end Alembic commands ###
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | dataactcore/migrations/versions/4d66a8d6e11b_create_filerequest_table_for_d_file_.py | 1 | 2841 | """create FileRequest table for D file generation
Revision ID: 4d66a8d6e11b
Revises: bcdf1134f0df
Create Date: 2017-10-19 14:28:03.788883
"""
# revision identifiers, used by Alembic.
revision = '4d66a8d6e11b'
down_revision = 'bcdf1134f0df'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('file_request',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('file_request_id', sa.Integer(), nullable=False),
sa.Column('request_date', sa.Date(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('parent_job_id', sa.Integer(), nullable=True),
sa.Column('start_date', sa.Date(), nullable=False),
sa.Column('end_date', sa.Date(), nullable=False),
sa.Column('agency_code', sa.Text(), nullable=False),
sa.Column('file_type', sa.Text(), nullable=False),
sa.Column('is_cached_file', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['job_id'], ['job.job_id'], name='fk_file_request_job_id'),
sa.PrimaryKeyConstraint('file_request_id')
)
op.create_index(op.f('ix_file_request_agency_code'), 'file_request', ['agency_code'], unique=False)
op.create_index(op.f('ix_file_request_end_date'), 'file_request', ['end_date'], unique=False)
op.create_index(op.f('ix_file_request_file_type'), 'file_request', ['file_type'], unique=False)
op.create_index(op.f('ix_file_request_parent_job_id'), 'file_request', ['parent_job_id'], unique=False)
op.create_index(op.f('ix_file_request_request_date'), 'file_request', ['request_date'], unique=False)
op.create_index(op.f('ix_file_request_start_date'), 'file_request', ['start_date'], unique=False)
op.add_column('job', sa.Column('from_cached', sa.Boolean(), server_default='False', nullable=False))
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('job', 'from_cached')
op.drop_index(op.f('ix_file_request_start_date'), table_name='file_request')
op.drop_index(op.f('ix_file_request_request_date'), table_name='file_request')
op.drop_index(op.f('ix_file_request_parent_job_id'), table_name='file_request')
op.drop_index(op.f('ix_file_request_file_type'), table_name='file_request')
op.drop_index(op.f('ix_file_request_end_date'), table_name='file_request')
op.drop_index(op.f('ix_file_request_agency_code'), table_name='file_request')
op.drop_table('file_request')
### end Alembic commands ###
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | dataactcore/migrations/versions/7597deb348fb_fabs_created_at_and_fpds_updated_at_.py | 1 | 1251 | """FABS created_at and FPDS updated_at indexes
Revision ID: 7597deb348fb
Revises: b168f0cdc5a8
Create Date: 2018-02-06 16:08:20.985202
"""
# revision identifiers, used by Alembic.
revision = '7597deb348fb'
down_revision = 'b168f0cdc5a8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_detached_award_procurement_updated_at'), 'detached_award_procurement', ['updated_at'], unique=False)
op.create_index(op.f('ix_published_award_financial_assistance_created_at'), 'published_award_financial_assistance', ['created_at'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_detached_award_procurement_updated_at'), table_name='detached_award_procurement')
op.drop_index(op.f('ix_published_award_financial_assistance_created_at'), table_name='published_award_financial_assistance')
### end Alembic commands ###
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | dataactcore/scripts/load_duns_exec_comp.py | 1 | 14861 | import argparse
import datetime
import logging
import os
import re
import json
import tempfile
import boto3
import requests
from dataactcore.config import CONFIG_BROKER
from dataactcore.interfaces.db import GlobalDB
from dataactcore.interfaces.function_bag import update_external_data_load_date
from dataactcore.broker_logging import configure_logging
from dataactcore.models.domainModels import SAMRecipient
from dataactcore.utils.sam_recipient import (parse_sam_recipient_file, update_sam_recipient, parse_exec_comp_file,
update_missing_parent_names, request_sam_csv_api,
is_nonexistent_file_error)
from dataactvalidator.health_check import create_app
logger = logging.getLogger(__name__)
SAM_FILE_FORMAT = 'SAM_{data_type}_UTF-8_{period}{version}_%Y%m%d.ZIP'
DATA_TYPES = {
'DUNS': 'FOUO',
'Executive Compensation': 'EXECCOMP'
}
PERIODS = ['MONTHLY', 'DAILY']
VERSIONS = {
'v1': '', # V1 files simply exclude the version
'v2': '_V2'
}
S3_ARCHIVE = CONFIG_BROKER['sam']['duns']['csv_archive_bucket']
S3_ARCHIVE_PATH = '{data_type}/{version}/{file_name}'
def load_from_sam(data_type, sess, historic, local=None, metrics=None, reload_date=None):
""" Process the script arguments to figure out which files to process in which order
Args:
data_type: data type to load (DUNS or executive compensation)
sess: the database connection
historic: whether to load in monthly file and daily files after, or just the latest daily files
local: path to local directory to process, if None, it will go though the remote SAM service
metrics: dictionary representing metrics data for the load
reload_date: specific date to force reload from
"""
if not metrics:
metrics = {}
# Figure out what files we have available based on our local or remote setup
if local:
local_files = os.listdir(local)
monthly_v1_files = sorted([monthly_file for monthly_file in local_files
if re.match('SAM_{}_UTF-8_MONTHLY_\d+\.ZIP'.format(DATA_TYPES[data_type]),
monthly_file.upper())])
monthly_v2_files = sorted([monthly_file for monthly_file in local_files
if re.match('SAM_{}_UTF-8_MONTHLY_V2_\d+\.ZIP'.format(DATA_TYPES[data_type]),
monthly_file.upper())])
daily_v1_files = sorted([daily_file for daily_file in local_files
if re.match('SAM_{}_UTF-8_DAILY_\d+\.ZIP'.format(DATA_TYPES[data_type]),
daily_file.upper())])
daily_v2_files = sorted([daily_file for daily_file in local_files
if re.match('SAM_{}_UTF-8_DAILY_V2_\d+\.ZIP'.format(DATA_TYPES[data_type]),
daily_file.upper())])
else:
# TODO: the SAM API currently doesn't list available files and doesnt include historic ones,
# so we're pulling files from the CSV_ARCHIVE_BUCKET bucket up and then use the API.
# Rework this if SAM includes these historic files in the API and list what files are available
monthly_v1_files = list_s3_archive_files(data_type, 'MONTHLY', 'v1')
monthly_v2_files = list_s3_archive_files(data_type, 'MONTHLY', 'v2')
daily_v1_files = list_s3_archive_files(data_type, 'DAILY', 'v1')
daily_v2_files = list_s3_archive_files(data_type, 'DAILY', 'v2')
# Extracting the dates from these to figure out which files to process where
# For both monthly and daily files, we only want to process v1 files until the equivalent v2 files are available
monthly_v1_dates = extract_dates_from_list(monthly_v1_files, data_type, 'MONTHLY', 'v1')
monthly_v2_dates = extract_dates_from_list(monthly_v2_files, data_type, 'MONTHLY', 'v2')
monthly_v1_dates = [monthly_v1_date for monthly_v1_date in monthly_v1_dates
if monthly_v1_date not in monthly_v2_dates]
if historic:
earliest_date = sorted(monthly_v1_dates + monthly_v2_dates)[0]
daily_v1_dates = extract_dates_from_list(daily_v1_files, data_type, 'DAILY', 'v1')
daily_v2_dates = extract_dates_from_list(daily_v2_files, data_type, 'DAILY', 'v2')
daily_v1_dates = [daily_v1_dates for daily_v1_dates in daily_v1_dates
if daily_v1_dates not in daily_v2_dates]
latest_date = sorted(daily_v1_dates + daily_v2_dates)[-1]
# For any dates after the latest date we have in the archive, use the API
daily_v2_api_dates = [latest_date + datetime.timedelta(days=i)
for i in range(1, (datetime.date.today() - latest_date).days + 1)]
# determine which daily files to load in by setting the start load date
if historic:
load_date = earliest_date
elif reload_date:
# a bit redundant but also date validation
load_date = datetime.datetime.strptime(reload_date, '%Y-%m-%d').date()
else:
sam_field = SAMRecipient.last_sam_mod_date if data_type == 'DUNS' else SAMRecipient.last_exec_comp_mod_date
load_date = sess.query(sam_field).filter(sam_field.isnot(None)).order_by(sam_field.desc()).first()
if not load_date:
field = 'sam' if data_type == 'DUNS' else 'executive compensation'
raise Exception('No last {} mod date found in DUNS table. Please run historic loader first.'.format(field))
load_date = load_date[0]
# only load in the daily files after the load date
daily_v1_dates = list(filter(lambda daily_date: daily_date >= load_date, daily_v1_dates))
daily_v2_dates = list(filter(lambda daily_date: daily_date >= load_date, daily_v2_dates))
daily_v2_api_dates = list(filter(lambda daily_date: daily_date >= load_date, daily_v2_api_dates))
if historic:
# load in the earliest monthly file and all daily files after
version = 'v1' if earliest_date in monthly_v1_dates else 'v2'
process_sam_file(data_type, 'MONTHLY', version, earliest_date, sess, local=local, metrics=metrics)
for daily_v1_date in daily_v1_dates:
process_sam_file(data_type, 'DAILY', 'v1', daily_v1_date, sess, local=local, metrics=metrics)
for daily_v2_date in daily_v2_dates:
process_sam_file(data_type, 'DAILY', 'v2', daily_v2_date, sess, local=local, metrics=metrics)
if not local:
for daily_api_v2_date in daily_v2_api_dates:
try:
process_sam_file(data_type, 'DAILY', 'v2', daily_api_v2_date, sess, local=local, api=True,
metrics=metrics)
except requests.exceptions.HTTPError as e:
if is_nonexistent_file_error(e):
logger.warning('No file found for {}, continuing'.format(daily_api_v2_date))
continue
else:
logger.exception(e.response.content.decode('utf-8'))
raise e
if data_type == 'DUNS':
updated_date = datetime.date.today()
metrics['parent_rows_updated'] = update_missing_parent_names(sess, updated_date=updated_date)
metrics['parent_update_date'] = str(updated_date)
if historic:
logger.info('Despite the historical load being done, the UEI will most likely be out of date. '
'Please manually update using the UEI crosswalk file and SQL.')
def extract_dates_from_list(sam_files, data_type, period, version):
""" Given a list of SAM files, extract the dates the files refer to
Args:
sam_files: list of sam file names to extract dates from
data_type: data type to load (DUNS or executive compensation)
period: monthly or daily
version: v1 or v2
Returns:
sorted list of dates corresponding to the files
"""
sam_filename_format = SAM_FILE_FORMAT.format(data_type=DATA_TYPES[data_type], period=period,
version=VERSIONS[version])
return sorted([datetime.datetime.strptime(sam_file, sam_filename_format).date() for sam_file in sam_files])
def list_s3_archive_files(data_type, period, version):
""" Given the requested fields, provide a list of available files from the remote S3 archive
Args:
data_type: data type to load (DUNS or executive compensation)
period: monthly or daily
version: v1 or v2
Returns:
list of available files in the S3 archive
"""
s3_resource = boto3.resource('s3', region_name='us-gov-west-1')
archive_bucket = s3_resource.Bucket(S3_ARCHIVE)
file_name = SAM_FILE_FORMAT[:30].format(data_type=DATA_TYPES[data_type], period=period)
prefix = S3_ARCHIVE_PATH.format(data_type=data_type, version=version, file_name=file_name)
return [os.path.basename(object.key) for object in archive_bucket.objects.filter(Prefix=prefix)]
def download_sam_file(root_dir, file_name, api=False):
""" Downloads the requested DUNS file to root_dir
Args:
root_dir: the folder containing the DUNS file
file_name: the name of the SAM file
api: whether to use the SAM CSV API or not
Raises:
requests.exceptions.HTTPError if the SAM HTTP API doesnt have the file requested
"""
logger.info('Pulling {} via {}'.format(file_name, 'API' if api else 'archive'))
if api:
request_sam_csv_api(root_dir, file_name)
else:
s3_client = boto3.client('s3', region_name='us-gov-west-1')
reverse_map = {v: k for k, v in DATA_TYPES.items()}
data_type = reverse_map[file_name.split('_')[1]]
version = 'v2' if 'V2' in file_name else 'v1'
key = S3_ARCHIVE_PATH.format(data_type=data_type, version=version, file_name=file_name)
s3_client.download_file(S3_ARCHIVE, key, os.path.join(root_dir, file_name))
def process_sam_file(data_type, period, version, date, sess, local=None, api=False, metrics=None):
""" Process the SAM file found locally or remotely
Args:
data_type: data type to load (DUNS or executive compensation)
period: monthly or daily
version: v1 or v2
sess: the database connection
local: path to local directory to process, if None, it will go though the remote SAM service
api: whether to use the SAM CSV API or not
metrics: dictionary representing metrics data for the load
Raises:
requests.exceptions.HTTPError if the SAM HTTP API doesnt have the file requested
"""
if not metrics:
metrics = {}
root_dir = local if local else tempfile.gettempdir()
file_name_format = SAM_FILE_FORMAT.format(data_type=DATA_TYPES[data_type], period=period, version=VERSIONS[version])
file_name = date.strftime(file_name_format)
if not local:
download_sam_file(root_dir, file_name, api=api)
file_path = os.path.join(root_dir, file_name)
includes_uei = version == 'v2'
if data_type == 'DUNS':
add_update_data, delete_data = parse_sam_recipient_file(file_path, metrics=metrics)
if add_update_data is not None:
update_sam_recipient(sess, add_update_data, metrics=metrics, includes_uei=includes_uei)
if delete_data is not None:
update_sam_recipient(sess, delete_data, metrics=metrics, deletes=True, includes_uei=includes_uei)
else:
exec_comp_data = parse_exec_comp_file(file_path, metrics=metrics)
update_sam_recipient(sess, exec_comp_data, metrics=metrics, includes_uei=includes_uei)
if not local:
os.remove(file_path)
if __name__ == '__main__':
now = datetime.datetime.now()
configure_logging()
parser = argparse.ArgumentParser(description='Get data from SAM and update SAM Recipient/exec comp tables')
parser.add_argument("-t", "--data_type", choices=['duns', 'exec_comp', 'both'], default='both',
help='Select data type to load')
scope = parser.add_mutually_exclusive_group(required=True)
scope.add_argument("-a", "--historic", action="store_true", help='Reload from the first monthly file on')
scope.add_argument("-u", "--update", action="store_true", help='Load daily files since latest last_sam_mod_date')
environ = parser.add_mutually_exclusive_group(required=True)
environ.add_argument("-l", "--local", type=str, default=None, help='Local directory to work from')
environ.add_argument("-r", "--remote", action="store_true", help='Work from a remote directory (SAM)')
parser.add_argument("-f", "--reload_date", type=str, default=None, help='Force update from a specific date'
' (YYYY-MM-DD)')
args = parser.parse_args()
data_type = args.data_type
historic = args.historic
update = args.update
local = args.local
reload_date = args.reload_date
metrics = {
'script_name': 'load_duns_exec_comp.py',
'start_time': str(now),
'files_processed': [],
'records_received': 0,
'records_processed': 0,
'adds_received': 0,
'updates_received': 0,
'deletes_received': 0,
'added_uei': [],
'updated_uei': [],
'records_added': 0,
'records_updated': 0,
'parent_rows_updated': 0,
'parent_update_date': None
}
with create_app().app_context():
sess = GlobalDB.db().session
if data_type in ('duns', 'both'):
start_time = datetime.datetime.now()
load_from_sam('DUNS', sess, historic, local, metrics=metrics, reload_date=reload_date)
update_external_data_load_date(start_time, datetime.datetime.now(), 'recipient')
if data_type in ('exec_comp', 'both'):
start_time = datetime.datetime.now()
load_from_sam('Executive Compensation', sess, historic, local, metrics=metrics, reload_date=reload_date)
update_external_data_load_date(start_time, datetime.datetime.now(), 'executive_compensation')
sess.close()
metrics['records_added'] = len(set(metrics['added_uei']))
metrics['records_updated'] = len(set(metrics['updated_uei']) - set(metrics['added_uei']))
del metrics['added_uei']
del metrics['updated_uei']
logger.info('Added {} records and updated {} records'.format(metrics['records_added'], metrics['records_updated']))
metrics['duration'] = str(datetime.datetime.now() - now)
with open('load_duns_exec_comp_metrics.json', 'w+') as metrics_file:
json.dump(metrics, metrics_file)
| cc0-1.0 |
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactvalidator/test_fabs33_1.py | 1 | 1700 | from tests.unit.dataactcore.factories.staging import FABSFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs33_1'
def test_column_headers(database):
expected_subset = {'row_number', 'period_of_performance_curr', 'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" PeriodOfPerformanceCurrentEndDate is an optional field, but when provided, must follow YYYYMMDD format """
fabs_1 = FABSFactory(period_of_performance_curr='19990131', correction_delete_indicatr='')
fabs_2 = FABSFactory(period_of_performance_curr=None, correction_delete_indicatr='c')
fabs_3 = FABSFactory(period_of_performance_curr='', correction_delete_indicatr=None)
# Ignore correction delete indicator of D
fabs_4 = FABSFactory(period_of_performance_curr='1234', correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, fabs_4])
assert errors == 0
def test_failure(database):
""" PeriodOfPerformanceCurrentEndDate is an optional field, but when provided, must follow YYYYMMDD format """
fabs_1 = FABSFactory(period_of_performance_curr='19990132', correction_delete_indicatr='')
fabs_2 = FABSFactory(period_of_performance_curr='19991331', correction_delete_indicatr=None)
fabs_3 = FABSFactory(period_of_performance_curr='1234', correction_delete_indicatr='c')
fabs_4 = FABSFactory(period_of_performance_curr='200912', correction_delete_indicatr='C')
errors = number_of_errors(_FILE, database, models=[fabs_1, fabs_2, fabs_3, fabs_4])
assert errors == 4
| cc0-1.0 |
Subsets and Splits