repo_name
stringlengths 7
65
| path
stringlengths 5
189
| copies
stringclasses 611
values | size
stringlengths 4
7
| content
stringlengths 833
1.02M
| license
stringclasses 14
values | hash
stringlengths 32
32
| line_mean
float64 4.65
100
| line_max
int64 16
1k
| alpha_frac
float64 0.25
0.95
| ratio
float64 1.5
7.91
| autogenerated
bool 1
class | config_or_test
bool 2
classes | has_no_keywords
bool 2
classes | has_few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mozilla/normandy | normandy/recipes/api/filters.py | 1 | 4813 | import django_filters
from rest_framework import serializers
from normandy.recipes.models import Recipe
class EnabledStateFilter(django_filters.Filter):
"""A special case filter for filtering recipes by their enabled state"""
def filter(self, qs, value):
if value is not None:
lc_value = value.lower()
if lc_value in ["true", "1"]:
return qs.only_enabled()
elif lc_value in ["false", "0"]:
return qs.only_disabled()
return qs
class ApprovalStateFilter(django_filters.Filter):
"""A special case filter for filtering approval requests by their approval state"""
def filter(self, qs, value):
if value is None:
return qs
lc_value = value.lower()
if lc_value in ["true", "1", "approved"]:
return qs.filter(approved=True)
elif lc_value in ["false", "0", "rejected"]:
return qs.filter(approved=False)
elif lc_value in ["null", "pending"]:
return qs.filter(approved=None)
class BaselineCapabilitiesFilter(django_filters.Filter):
"""Filters recipe by whether they use only baseline capabilities, defaulting to only baseline."""
def __init__(self, *args, default_only_baseline=False, **kwargs):
super().__init__(*args, **kwargs)
self.default_only_baseline = default_only_baseline
def filter(self, qs, value):
baseline_only = self.default_only_baseline
if value is not None:
lc_value = value.lower()
baseline_only = lc_value in ["true", "1"]
if baseline_only:
recipes = list(qs)
if not all(isinstance(recipe, Recipe) for recipe in recipes):
raise TypeError("BaselineCapabilitiesFilter can only be used to filter recipes")
match_ids = []
for recipe in recipes:
if (
recipe.approved_revision
and recipe.approved_revision.uses_only_baseline_capabilities()
):
match_ids.append(recipe.id)
return Recipe.objects.filter(id__in=match_ids)
return qs
class CharSplitFilter(django_filters.CharFilter):
"""Custom CharFilter class that splits the value (if it's set) by `,` into a list
and uses the `__in` operator."""
def filter(self, qs, value):
if value:
qs = qs.filter(**{"{}__in".format(self.field_name): value.split(",")})
return qs
class FilterObjectFieldFilter(django_filters.Filter):
"""
Find recipes that have a filter object with the given field
Format for the filter's value is `key1:value1,key2:value2`. This would
include recipes that have a filter object that has a field `key1` that
contains the value `value1`, and that have a filter object with a field
`key2` that contains `value2`. The two filter objects do not have to be
the same, but may be.
"""
def filter(self, qs, value):
if value is None:
return qs
needles = []
for segment in value.split(","):
if ":" not in segment:
raise serializers.ValidationError(
{"filter_object": "Filters must be of the format `key1:val1,key2:val2,..."}
)
key, val = segment.split(":", 1)
needles.append((key, val))
# Let the database do a first pass filter
for k, v in needles:
qs = qs.filter(latest_revision__filter_object_json__contains=k)
qs = qs.filter(latest_revision__filter_object_json__contains=v)
recipes = list(qs)
if not all(isinstance(recipe, Recipe) for recipe in recipes):
raise TypeError("FilterObjectFieldFilter can only be used to filter recipes")
# For every recipe that contains the right substrings, look through
# their filter objects for an actual match
match_ids = []
for recipe in recipes:
recipe_matches = True
# Recipes needs to have all the keys and values in the needles
for k, v in needles:
for filter_object in recipe.latest_revision.filter_object:
# Don't consider invalid filter objects
if not filter_object.is_valid():
continue
if k in filter_object.data and v in str(filter_object.data[k]):
# Found a match
break
else:
# Did not break, so no match was not found
recipe_matches = False
break
if recipe_matches:
match_ids.append(recipe.id)
return Recipe.objects.filter(id__in=match_ids)
| mpl-2.0 | af41c1ff1cd7430c0dab073b7a6dec19 | 35.462121 | 101 | 0.584043 | 4.363554 | false | false | false | false |
mozilla/normandy | normandy/recipes/migrations/0008_auto_20180510_2252.py | 1 | 1967 | # Generated by Django 2.0.5 on 2018-05-10 22:52
# flake8: noqa
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("recipes", "0007_convert_simple_filters_to_filter_objects"),
]
operations = [
migrations.CreateModel(
name="EnabledState",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("created", models.DateTimeField(default=django.utils.timezone.now)),
("enabled", models.BooleanField(default=False)),
(
"creator",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="enabled_states",
to=settings.AUTH_USER_MODEL,
),
),
(
"revision",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="enabled_states",
to="recipes.RecipeRevision",
),
),
],
options={"ordering": ("-created",)},
),
migrations.AddField(
model_name="reciperevision",
name="enabled_state",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="current_for_revision",
to="recipes.EnabledState",
),
),
]
| mpl-2.0 | 2401bc713daafd41216f75005b3da123 | 32.338983 | 95 | 0.475852 | 5.122396 | false | false | false | false |
mozilla/normandy | normandy/recipes/tests/test_filters.py | 1 | 25613 | from datetime import datetime
import factory.fuzzy
import pytest
import re
from collections import defaultdict
from rest_framework import serializers
from normandy.base.jexl import get_normandy_jexl
from normandy.recipes import filters
from normandy.recipes.tests import (
ChannelFactory,
CountryFactory,
LocaleFactory,
RecipeRevisionFactory,
WindowsVersionFactory,
)
@pytest.mark.django_db
class FilterTestsBase:
"""Common tests for all filter object types"""
should_be_baseline = True
def create_basic_filter(self):
"""To be overwritten by subclasses to create test filters"""
raise NotImplementedError
def create_revision(self, *args, **kwargs):
return RecipeRevisionFactory(*args, **kwargs)
def test_it_can_be_constructed(self):
self.create_basic_filter()
def test_has_capabilities(self):
filter = self.create_basic_filter()
# Would throw if not defined
assert isinstance(filter.get_capabilities(), set)
def test_jexl_works(self):
filter = self.create_basic_filter()
rev = self.create_revision()
# Would throw if not defined
expr = filter.to_jexl(rev)
assert isinstance(expr, str)
jexl = get_normandy_jexl()
errors = jexl.validate(expr)
assert list(errors) == []
def test_uses_only_baseline_capabilities(self, settings):
if self.should_be_baseline == "skip":
return
filter = self.create_basic_filter()
capabilities = filter.get_capabilities()
if self.should_be_baseline:
assert capabilities <= settings.BASELINE_CAPABILITIES
else:
assert capabilities - settings.BASELINE_CAPABILITIES
def test_it_is_in_the_by_type_list(self):
filter_instance = self.create_basic_filter()
filter_class = filter_instance.__class__
assert filter_class in filters.by_type.values()
def test_its_type_is_camelcase(self):
filter_instance = self.create_basic_filter()
assert re.match("[a-zA-Z]+", filter_instance.type)
assert "_" not in filter_instance.type
class TestProfileCreationDateFilter(FilterTestsBase):
def create_basic_filter(self, direction="olderThan", date="2020-02-01"):
return filters.ProfileCreateDateFilter.create(direction=direction, date=date)
def test_generates_jexl_older_than(self):
filter = self.create_basic_filter(direction="olderThan", date="2020-07-30")
assert (
filter.to_jexl(self.create_revision())
== "(normandy.telemetry.main.environment.profile.creationDate<=18473)"
)
def test_generates_jexl_newer_than(self):
filter = self.create_basic_filter(direction="newerThan", date="2020-02-01")
assert set(filter.to_jexl(self.create_revision()).split("||")) == {
"(!normandy.telemetry.main)",
"(normandy.telemetry.main.environment.profile.creationDate>18293)",
}
def test_issue_2242(self):
"""Make sure that dates are parsed correctly"""
epoch = datetime.utcfromtimestamp(0)
datetime_factory = factory.fuzzy.FuzzyNaiveDateTime(epoch)
dt = datetime_factory.fuzz()
# Profile Creation Date is measured in days since epoch.
daystamp = (dt - epoch).days
filter = self.create_basic_filter(date=f"{dt.year}-{dt.month}-{dt.day}")
assert str(daystamp) in filter.to_jexl(self.create_revision())
def test_throws_error_on_bad_direction(self):
filter = self.create_basic_filter(direction="newer", date="2020-02-01")
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
def test_throws_error_on_bad_date(self):
with pytest.raises(AssertionError):
self.create_basic_filter(direction="newerThan", date="Jan 7, 2020")
class TestVersionFilter(FilterTestsBase):
should_be_baseline = False
def create_basic_filter(self, versions=None):
if versions is None:
versions = [72, 74]
return filters.VersionFilter.create(versions=versions)
def test_generates_jexl(self):
filter = self.create_basic_filter(versions=[72, 74])
assert set(filter.to_jexl(self.create_revision()).split("||")) == {
'(env.version|versionCompare("72.!")>=0)&&(env.version|versionCompare("72.*")<0)',
'(env.version|versionCompare("74.!")>=0)&&(env.version|versionCompare("74.*")<0)',
}
class TestVersionRangeFilter(FilterTestsBase):
should_be_baseline = False
def create_basic_filter(self, min_version="72.0b2", max_version="72.0b8"):
return filters.VersionRangeFilter.create(min_version=min_version, max_version=max_version)
def test_generates_jexl(self):
filter = self.create_basic_filter(min_version="72.0b2", max_version="75.0a1")
assert set(filter.to_jexl(self.create_revision()).split("&&")) == {
'(env.version|versionCompare("72.0b2")>=0)',
'(env.version|versionCompare("75.0a1")<0)',
}
class TestDateRangeFilter(FilterTestsBase):
def create_basic_filter(
self, not_before="2020-02-01T00:00:00Z", not_after="2020-03-01T00:00:00Z"
):
return filters.DateRangeFilter.create(not_before=not_before, not_after=not_after)
def test_generates_jexl(self):
filter = self.create_basic_filter()
assert set(filter.to_jexl(self.create_revision()).split("&&")) == {
'(normandy.request_time>="2020-02-01T00:00:00Z"|date)',
'(normandy.request_time<"2020-03-01T00:00:00Z"|date)',
}
class TestWindowsBuildNumberFilter(FilterTestsBase):
def create_basic_filter(self, value=12345, comparison="equal"):
return filters.WindowsBuildNumberFilter.create(value=value, comparison=comparison)
@pytest.mark.parametrize(
"comparison,symbol",
[
("equal", "=="),
("greater_than", ">"),
("greater_than_equal", ">="),
("less_than", "<"),
("less_than_equal", "<="),
],
)
def test_generates_jexl_number_ops(self, comparison, symbol):
filter = self.create_basic_filter(comparison=comparison)
assert (
filter.to_jexl(self.create_revision())
== f"(normandy.os.isWindows && normandy.os.windowsBuildNumber {symbol} 12345)"
)
def test_generates_jexl_error_on_bad_comparison(self):
filter = self.create_basic_filter(comparison="typo")
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
class TestWindowsVersionFilter(FilterTestsBase):
def create_basic_filter(self, versions_list=[6.1]):
WindowsVersionFactory(nt_version=6.1)
return filters.WindowsVersionFilter.create(versions_list=versions_list)
def test_generates_jexl(self):
filter = self.create_basic_filter()
assert (
filter.to_jexl(self.create_revision())
== "(normandy.os.isWindows && normandy.os.windowsVersion in [6.1])"
)
def test_generates_jexl_error_on_bad_version(self):
with pytest.raises(AssertionError):
filters.WindowsVersionFilter.create(versions_list=[8.9])
class TestChannelFilter(FilterTestsBase):
def create_basic_filter(self, channels=None):
if channels:
channel_objs = [ChannelFactory(slug=slug) for slug in channels]
else:
channel_objs = [ChannelFactory()]
return filters.ChannelFilter.create(channels=[c.slug for c in channel_objs])
def test_generates_jexl(self):
filter = self.create_basic_filter(channels=["release", "beta"])
assert filter.to_jexl(self.create_revision()) == 'normandy.channel in ["release","beta"]'
class TestLocaleFilter(FilterTestsBase):
def create_basic_filter(self, locales=None):
if locales:
locale_objs = [LocaleFactory(code=code) for code in locales]
else:
locale_objs = [LocaleFactory()]
return filters.LocaleFilter.create(locales=[locale.code for locale in locale_objs])
def test_generates_jexl(self):
filter = self.create_basic_filter(locales=["en-US", "en-CA"])
assert filter.to_jexl(self.create_revision()) == 'normandy.locale in ["en-US","en-CA"]'
class TestCountryFilter(FilterTestsBase):
def create_basic_filter(self, countries=None):
if countries:
country_objs = [CountryFactory(code=code) for code in countries]
else:
country_objs = [CountryFactory()]
return filters.CountryFilter.create(countries=[c.code for c in country_objs])
def test_generates_jexl(self):
filter = self.create_basic_filter(countries=["SV", "MX"])
assert filter.to_jexl(self.create_revision()) == 'normandy.country in ["SV","MX"]'
class TestPlatformFilter(FilterTestsBase):
def create_basic_filter(self, platforms=["all_mac", "all_windows"]):
return filters.PlatformFilter.create(platforms=platforms)
def test_generates_jexl_list_of_two(self):
filter = self.create_basic_filter()
assert set(filter.to_jexl(self.create_revision()).split("||")) == {
"normandy.os.isMac",
"normandy.os.isWindows",
}
def test_generates_jexl_list_of_one(self):
filter = self.create_basic_filter(platforms=["all_linux"])
assert set(filter.to_jexl(self.create_revision()).split("||")) == {"normandy.os.isLinux"}
def test_throws_error_on_bad_platform(self):
filter = self.create_basic_filter(platforms=["all_linu"])
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
class TestNegateFilter(FilterTestsBase):
def create_basic_filter(self):
data_for_filter = {"type": "channel", "channels": ["release", "beta"]}
return filters.NegateFilter.create(filter_to_negate=data_for_filter)
def test_generates_jexl(self):
negate_filter = self.create_basic_filter()
assert (
negate_filter.to_jexl(self.create_revision())
== '!(normandy.channel in ["release","beta"])'
)
class TestAndFilter(FilterTestsBase):
def create_basic_filter(self, subfilters=None):
if subfilters is None:
subfilters = [
{"type": "channel", "channels": ["release", "beta"]},
{"type": "locale", "locales": ["en-US", "de"]},
]
return filters.AndFilter.create(subfilters=subfilters)
def test_generates_jexl_zero_subfilters(self):
with pytest.raises(AssertionError) as excinfo:
self.create_basic_filter(subfilters=[])
assert "has at least 1 element" in str(excinfo.value)
def test_generates_jexl_one_subfilter(self):
negate_filter = self.create_basic_filter(
subfilters=[{"type": "channel", "channels": ["release"]}]
)
assert negate_filter.to_jexl(self.create_revision()) == '(normandy.channel in ["release"])'
def test_generates_jexl_two_subfilters(self):
negate_filter = self.create_basic_filter(
subfilters=[
{"type": "channel", "channels": ["release"]},
{"type": "locale", "locales": ["en-US"]},
]
)
assert (
negate_filter.to_jexl(self.create_revision())
== '(normandy.channel in ["release"]&&normandy.locale in ["en-US"])'
)
class TestOrFilter(FilterTestsBase):
def create_basic_filter(self, subfilters=None):
if subfilters is None:
subfilters = [
{"type": "channel", "channels": ["release", "beta"]},
{"type": "locale", "locales": ["en-US", "de"]},
]
return filters.OrFilter.create(subfilters=subfilters)
def test_generates_jexl_zero_subfilters(self):
with pytest.raises(AssertionError) as excinfo:
self.create_basic_filter(subfilters=[])
assert "has at least 1 element" in str(excinfo.value)
def test_generates_jexl_one_subfilter(self):
negate_filter = self.create_basic_filter(
subfilters=[{"type": "channel", "channels": ["release"]}]
)
assert negate_filter.to_jexl(self.create_revision()) == '(normandy.channel in ["release"])'
def test_generates_jexl_two_subfilters(self):
negate_filter = self.create_basic_filter(
subfilters=[
{"type": "channel", "channels": ["release"]},
{"type": "locale", "locales": ["en-US"]},
]
)
assert (
negate_filter.to_jexl(self.create_revision())
== '(normandy.channel in ["release"]||normandy.locale in ["en-US"])'
)
class TestAddonInstalledFilter(FilterTestsBase):
def create_basic_filter(self, addons=["@abcdef", "ghijk@lmnop"], any_or_all="any"):
return filters.AddonInstalledFilter.create(addons=addons, any_or_all=any_or_all)
def test_generates_jexl_installed_any(self):
filter = self.create_basic_filter()
assert set(filter.to_jexl(self.create_revision()).split("||")) == {
'normandy.addons["@abcdef"]',
'normandy.addons["ghijk@lmnop"]',
}
def test_generates_jexl_installed_all(self):
filter = self.create_basic_filter(any_or_all="all")
assert set(filter.to_jexl(self.create_revision()).split("&&")) == {
'normandy.addons["@abcdef"]',
'normandy.addons["ghijk@lmnop"]',
}
def test_throws_error_on_bad_any_or_all(self):
filter = self.create_basic_filter(any_or_all="error")
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
class TestAddonActiveFilter(FilterTestsBase):
def create_basic_filter(self, addons=["@abcdef", "ghijk@lmnop"], any_or_all="any"):
return filters.AddonActiveFilter.create(addons=addons, any_or_all=any_or_all)
def test_generates_jexl_active_any(self):
filter = self.create_basic_filter()
assert set(filter.to_jexl(self.create_revision()).split("||")) == {
'normandy.addons["@abcdef"].isActive',
'normandy.addons["ghijk@lmnop"].isActive',
}
def test_generates_jexl_active_all(self):
filter = self.create_basic_filter(any_or_all="all")
assert set(filter.to_jexl(self.create_revision()).split("&&")) == {
'normandy.addons["@abcdef"].isActive',
'normandy.addons["ghijk@lmnop"].isActive',
}
def test_throws_error_on_bad_any_or_all(self):
filter = self.create_basic_filter(any_or_all="error")
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
class TestPrefCompareFilter(FilterTestsBase):
def create_basic_filter(
self, pref="browser.urlbar.maxRichResults", value=10, comparison="equal"
):
return filters.PrefCompareFilter.create(pref=pref, value=value, comparison=comparison)
def test_generates_jexl(self):
filter = self.create_basic_filter()
assert (
filter.to_jexl(self.create_revision())
== "'browser.urlbar.maxRichResults'|preferenceValue == 10"
)
@pytest.mark.parametrize(
"comparison,symbol",
[
("greater_than", ">"),
("greater_than_equal", ">="),
("less_than", "<"),
("less_than_equal", "<="),
],
)
def test_generates_jexl_number_ops(self, comparison, symbol):
filter = self.create_basic_filter(comparison=comparison)
assert (
filter.to_jexl(self.create_revision())
== f"'browser.urlbar.maxRichResults'|preferenceValue {symbol} 10"
)
def test_generates_jexl_boolean(self):
filter = self.create_basic_filter(value=False)
assert (
filter.to_jexl(self.create_revision())
== "'browser.urlbar.maxRichResults'|preferenceValue == false"
)
def test_generates_jexl_string_in(self):
filter = self.create_basic_filter(value="default", comparison="contains")
assert (
filter.to_jexl(self.create_revision())
== "\"default\" in 'browser.urlbar.maxRichResults'|preferenceValue"
)
def test_generates_jexl_error(self):
filter = self.create_basic_filter(comparison="invalid")
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
class TestPrefExistsFilter(FilterTestsBase):
def create_basic_filter(self, pref="browser.urlbar.maxRichResults", value=True):
return filters.PrefExistsFilter.create(pref=pref, value=value)
def test_generates_jexl_pref_exists_true(self):
filter = self.create_basic_filter()
assert (
filter.to_jexl(self.create_revision())
== "'browser.urlbar.maxRichResults'|preferenceExists"
)
def test_generates_jexl_pref_exists_false(self):
filter = self.create_basic_filter(value=False)
assert (
filter.to_jexl(self.create_revision())
== "!('browser.urlbar.maxRichResults'|preferenceExists)"
)
class TestPrefUserSetFilter(FilterTestsBase):
def create_basic_filter(self, pref="browser.urlbar.maxRichResults", value=True):
return filters.PrefUserSetFilter.create(pref=pref, value=value)
def test_generates_jexl_is_user_set_true(self):
filter = self.create_basic_filter()
assert (
filter.to_jexl(self.create_revision())
== "'browser.urlbar.maxRichResults'|preferenceIsUserSet"
)
def test_generates_jexl_is_user_set_false(self):
filter = self.create_basic_filter(value=False)
assert (
filter.to_jexl(self.create_revision())
== "!('browser.urlbar.maxRichResults'|preferenceIsUserSet)"
)
class TestBucketSampleFilter(FilterTestsBase):
def create_basic_filter(self, input=None, start=123, count=10, total=1_000):
if input is None:
input = ["normandy.clientId"]
return filters.BucketSampleFilter.create(
input=input, start=start, count=count, total=total
)
def test_generates_jexl(self):
filter = self.create_basic_filter(input=["A"], start=10, count=20, total=1_000)
assert filter.to_jexl(self.create_revision()) == "[A]|bucketSample(10,20,1000)"
def test_supports_floats(self):
filter = self.create_basic_filter(input=["A"], start=10, count=0.5, total=1_000)
assert filter.to_jexl(self.create_revision()) == "[A]|bucketSample(10,0.5,1000)"
class TestStableSampleFilter(FilterTestsBase):
def create_basic_filter(self, input=None, rate=0.01):
if input is None:
input = ["normandy.clientId"]
return filters.StableSampleFilter.create(input=input, rate=rate)
def test_generates_jexl(self):
filter = self.create_basic_filter(input=["A"], rate=0.1)
assert filter.to_jexl(self.create_revision()) == "[A]|stableSample(0.1)"
class TestNamespaceSampleFilter(FilterTestsBase):
def create_basic_filter(self, namespace="global-v42", start=123, count=10):
return filters.NamespaceSampleFilter.create(namespace=namespace, start=start, count=count)
def test_generates_jexl(self):
filter = self.create_basic_filter(namespace="fancy-rollout", start=10, count=20)
assert (
filter.to_jexl(self.create_revision())
== '["fancy-rollout",normandy.userId]|bucketSample(10,20,10000)'
)
def test_supports_floats(self):
filter = self.create_basic_filter(namespace="risky-experiment", start=123, count=0.5)
assert (
filter.to_jexl(self.create_revision())
== '["risky-experiment",normandy.userId]|bucketSample(123,0.5,10000)'
)
class TestJexlFilter(FilterTestsBase):
should_be_baseline = "skip"
def create_basic_filter(self, expression="true", capabilities=None, comment="a comment"):
if capabilities is None:
capabilities = ["capabilities-v1"]
return filters.JexlFilter.create(
expression=expression, capabilities=capabilities, comment=comment
)
def test_generates_jexl(self):
filter = self.create_basic_filter(expression="2 + 2")
assert filter.to_jexl(self.create_revision()) == "(2 + 2)"
def test_it_rejects_invalid_jexl(self):
filter = self.create_basic_filter(expression="this is an invalid expression")
with pytest.raises(serializers.ValidationError):
filter.to_jexl(self.create_revision())
def test_it_has_capabilities(self):
filter = self.create_basic_filter(capabilities=["a.b", "c.d"])
assert filter.get_capabilities() == {"a.b", "c.d"}
def test_empty_capabilities_is_ok(self):
filter = self.create_basic_filter(capabilities=[])
assert filter.get_capabilities() == set()
filter.to_jexl(None) # should not throw
def test_throws_error_on_non_iterable_capabilities(self):
with pytest.raises(AssertionError) as excinfo:
self.create_basic_filter(capabilities=5)
assert excinfo.value.args[0]["capabilities"][0].code == "not_a_list"
def test_throws_error_on_non_list_capabilities(self):
with pytest.raises(AssertionError) as excinfo:
self.create_basic_filter(capabilities="a mistake")
assert excinfo.value.args[0]["capabilities"][0].code == "not_a_list"
class TestPresetFilter(FilterTestsBase):
def create_basic_filter(self, name="pocket-1"):
return filters.PresetFilter.create(name=name)
def test_all_choices_have_generators(self):
f = filters.PresetFilter()
choices = f.preset_choices
for choice in choices:
identifier = choice.replace("-", "_")
generator_name = f"_get_subfilters_{identifier}"
getattr(f, generator_name)()
def test_pocket_1(self):
filter_object = self.create_basic_filter(name="pocket-1")
# The preset is an and filter
assert filter_object._get_operator() == "&&"
# Pull out the first level subfilters
subfilters = defaultdict(lambda: [])
for filter in filter_object._get_subfilters():
subfilters[type(filter)].append(filter)
# There should be one or filter
or_filters = subfilters.pop(filters.OrFilter)
assert len(or_filters) == 1
or_subfilters = or_filters[0]._get_subfilters()
# It should be made up of negative PrefUserSet filters
for f in or_subfilters:
assert isinstance(f, filters.PrefUserSetFilter)
assert f.initial_data["value"] is False
# And it should use the exected prefs
assert set(f.initial_data["pref"] for f in or_subfilters) == set(
["browser.newtabpage.enabled", "browser.startup.homepage"]
)
# There should be a bunch more negative PrefUserSet filters at the top level
pref_subfilters = subfilters.pop(filters.PrefUserSetFilter)
for f in pref_subfilters:
assert f.initial_data["value"] is False
# and they should be the expected prefs
assert set(f.initial_data["pref"] for f in pref_subfilters) == set(
[
"browser.newtabpage.activity-stream.showSearch",
"browser.newtabpage.activity-stream.feeds.topsites",
"browser.newtabpage.activity-stream.feeds.section.topstories",
"browser.newtabpage.activity-stream.feeds.section.highlights",
]
)
# There should be no other filters
assert subfilters == {}, "no unexpected filters"
class TestQaOnlyFilter(FilterTestsBase):
def create_basic_filter(self):
return filters.QaOnlyFilter.create()
def create_revision(self, *args, **kwargs):
kwargs.setdefault("action__name", "multi-preference-experiment")
return super().create_revision(*args, **kwargs)
def test_it_works_for_multi_preference_experiment(self):
rev = self.create_revision(action__name="multi-preference-experiment")
filter = self.create_basic_filter()
slug = rev.arguments["slug"]
assert (
filter.to_jexl(rev)
== f"\"{slug}\" in 'app.normandy.testing-for-recipes'|preferenceValue"
)
def test_it_works_for_branched_addon_study(self):
rev = self.create_revision(action__name="branched-addon-study")
filter = self.create_basic_filter()
slug = rev.arguments["slug"]
assert (
filter.to_jexl(rev)
== f"\"{slug}\" in 'app.normandy.testing-for-recipes'|preferenceValue"
)
def test_it_works_for_preference_rollout(self):
rev = self.create_revision(action__name="preference-rollout")
filter = self.create_basic_filter()
slug = rev.arguments["slug"]
assert (
filter.to_jexl(rev)
== f"\"{slug}\" in 'app.normandy.testing-for-recipes'|preferenceValue"
)
def test_it_works_for_heartbeat(self):
rev = self.create_revision(action__name="show-heartbeat")
filter = self.create_basic_filter()
slug = rev.arguments["surveyId"]
assert (
filter.to_jexl(rev)
== f"\"{slug}\" in 'app.normandy.testing-for-recipes'|preferenceValue"
)
| mpl-2.0 | c526376c28ac7a4d17ce5c32c521ef97 | 38.163609 | 99 | 0.632218 | 3.654823 | false | true | false | false |
mozilla/normandy | normandy/recipes/migrations/0009_auto_20180510_2328.py | 1 | 1037 | # Generated by Django 2.0.5 on 2018-05-10 23:28
from django.db import migrations
def enabled_to_enabled_state(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
EnabledState = apps.get_model("recipes", "EnabledState")
for recipe in Recipe.objects.filter(enabled=True):
if recipe.approved_revision:
es = EnabledState.objects.create(revision=recipe.approved_revision, enabled=True)
es.current_for_revision.add(recipe.approved_revision)
def enabled_state_to_enabled(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
for recipe in Recipe.objects.exclude(approved_revision=None):
enabled_state = recipe.approved_revision.enabled_state
if enabled_state and enabled_state.enabled:
recipe.enabled = True
recipe.save()
class Migration(migrations.Migration):
dependencies = [("recipes", "0008_auto_20180510_2252")]
operations = [migrations.RunPython(enabled_to_enabled_state, enabled_state_to_enabled)]
| mpl-2.0 | c93381001079413db6083e24aca3a9d3 | 33.566667 | 93 | 0.702025 | 3.743682 | false | false | false | false |
mozilla/normandy | normandy/studies/tests/__init__.py | 1 | 4038 | import factory
import json
import tempfile
import zipfile
from factory.django import DjangoModelFactory
from faker import Faker
from normandy.base.tests import FuzzyUnicode
from normandy.studies.models import Extension
INSTALL_RDF_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?>
<RDF xmlns="http://w3.org/1999/02/22-rdf-syntax-ns#" xmlns:em="http://mozilla.org/2004/em-rdf#">
<Description about="urn:mozilla:install-manifest">
<em:type>2</em:type>
<em:bootstrap>true</em:bootstrap>
<em:unpack>false</em:unpack>
<em:multiprocessCompatible>true</em:multiprocessCompatible>
{}
<em:targetApplication>
<Description>
<em:id>{{ec8030f7-c20a-464f-9b0e-13a3a9e97384}}</em:id>
<em:minVersion>52.0</em:minVersion>
<em:maxVersion>*</em:maxVersion>
</Description>
</em:targetApplication>
</Description>
</RDF>
"""
class XPIFileFactory(object):
def __init__(self, signed=True):
# Generate a unique random path for the new XPI file
f, self._path = tempfile.mkstemp(suffix=".xpi")
# Create a blank zip file on disk
zf = zipfile.ZipFile(self.path, mode="w")
zf.close()
if signed:
self.add_file("META-INF/manifest.mf", b"")
self.add_file("META-INF/mozilla.rsa", b"")
self.add_file("META-INF/mozilla.sf", b"")
@property
def path(self):
return self._path
def add_file(self, filename, data):
with zipfile.ZipFile(self.path, mode="a") as zf:
with zf.open(filename, mode="w") as f:
f.write(data)
def open(self, mode="rb"):
return open(self.path, mode="rb")
class WebExtensionFileFactory(XPIFileFactory):
def __init__(self, signed=True, from_file=None, gecko_id=None, overwrite_data=None):
super().__init__(signed=signed)
if not gecko_id:
gecko_id = f"{Faker().md5()}@normandy.mozilla.org"
if from_file:
self._manifest = json.load(from_file)
else:
self._manifest = {
"manifest_version": 2,
"name": "normandy test addon",
"version": "0.1",
"description": "This is an add-on for us in Normandy's tests",
"applications": {"gecko": {"id": gecko_id}},
}
if overwrite_data:
self._manifest.update(overwrite_data)
self.save_manifest()
@property
def manifest(self):
return self._manifest
def save_manifest(self):
self.add_file("manifest.json", json.dumps(self.manifest).encode())
def update_manifest(self, data):
self._manifest.update(data)
self.save_manifest()
def replace_manifest(self, data):
self._manifest = data
self.save_manifest()
class LegacyAddonFileFactory(XPIFileFactory):
def __init__(self, signed=True, from_file=None, addon_id=None, overwrite_data=None):
super().__init__(signed=signed)
if not addon_id:
name = Faker().md5()
addon_id = f"{name}@normandy.mozilla.org"
if from_file:
with open(from_file, "rb") as f:
self.add_file("install.rdf", f.read())
else:
data = {
"id": addon_id,
"version": "0.1",
"name": "Signed Bootstrap Mozilla Extension Example",
"description": "Example of a bootstrapped addon",
}
if overwrite_data:
data.update(overwrite_data)
self.generate_install_rdf(data)
def generate_install_rdf(self, data):
insert = ""
for k in data:
insert += "<em:{}>{}</em:{}>\n".format(k, data[k], k)
self.add_file("install.rdf", INSTALL_RDF_TEMPLATE.format(insert).encode())
class ExtensionFactory(DjangoModelFactory):
name = FuzzyUnicode()
xpi = factory.django.FileField(from_func=lambda: WebExtensionFileFactory().open())
class Meta:
model = Extension
| mpl-2.0 | 193a501d87316d4193e4f9305219f558 | 28.911111 | 96 | 0.586924 | 3.628032 | false | false | false | false |
mozilla/normandy | normandy/recipes/management/commands/initial_data.py | 1 | 1928 | from django.core.management.base import BaseCommand
from django_countries import countries
from normandy.recipes.models import Channel, Country, WindowsVersion
class Command(BaseCommand):
"""
Adds some helpful initial data to the site's database. If matching
data already exists, it should _not_ be overwritten, making this
safe to run multiple times.
This exists instead of data migrations so that test runs do not load
this data into the test database.
If this file grows too big, we should consider finding a library or
coming up with a more robust way of adding this data.
"""
help = "Adds initial data to database"
def handle(self, *args, **options):
self.add_release_channels()
self.add_countries()
self.add_windows_versions()
def add_release_channels(self):
self.stdout.write("Adding Release Channels...", ending="")
channels = {
"release": "Release",
"beta": "Beta",
"aurora": "Developer Edition",
"nightly": "Nightly",
}
for slug, name in channels.items():
Channel.objects.update_or_create(slug=slug, defaults={"name": name})
self.stdout.write("Done")
def add_countries(self):
self.stdout.write("Adding Countries...", ending="")
for code, name in countries:
Country.objects.update_or_create(code=code, defaults={"name": name})
self.stdout.write("Done")
def add_windows_versions(self):
self.stdout.write("Adding Windows Versions...", ending="")
versions = [
(6.1, "Windows 7"),
(6.2, "Windows 8"),
(6.3, "Windows 8.1"),
(10.0, "Windows 10"),
]
for nt_version, name in versions:
WindowsVersion.objects.update_or_create(nt_version=nt_version, defaults={"name": name})
self.stdout.write("Done")
| mpl-2.0 | 38c6b007ae090d295399b47eca56512a | 32.824561 | 99 | 0.61722 | 4.17316 | false | false | false | false |
mozilla/normandy | contract-tests/v1_api/test_performance.py | 1 | 3083 | from urllib.parse import urljoin
import html5lib
import pytest
"""These are paths hit by self repair that need to be very fast"""
HOT_PATHS = [
"/en-US/repair",
"/en-US/repair/",
"/api/v1/recipe/?enabled=1",
"/api/v1/recipe/signed/?enabled=1",
"/api/v1/action/",
]
@pytest.mark.parametrize("path", HOT_PATHS)
class TestHotPaths(object):
"""
Test for performance-enhancing properties of the site.
This file does not test performance by measuring runtimes and throughput.
Instead it tests for markers of features that would speed up or slow down the
site, such as cache headers.
"""
def test_no_redirects(self, conf, requests_session, path):
r = requests_session.get(conf.getoption("server") + path)
r.raise_for_status()
assert 200 <= r.status_code < 300
def test_no_vary_cookie(self, conf, requests_session, path, only_readonly):
r = requests_session.get(conf.getoption("server") + path)
r.raise_for_status()
assert "cookie" not in r.headers.get("vary", "").lower()
def test_cache_headers(self, conf, requests_session, path, only_readonly):
if path.startswith("/api/"):
pytest.xfail("caching temporarily hidden on api by nginx")
r = requests_session.get(conf.getoption("server") + path)
r.raise_for_status()
cache_control = r.headers.get("cache-control")
assert cache_control is not None
# parse cache-control header.
parts = [part.strip() for part in cache_control.split(",")]
max_age = [part for part in parts if part.startswith("max-age=")][0]
max_age_seconds = int(max_age.split("=")[1])
assert "public" in parts
assert max_age_seconds > 0
def test_static_cache_headers(conf, requests_session):
"""Test that all scripts included from self-repair have long lived cache headers"""
req = requests_session.get(conf.getoption("server") + "/en-US/repair")
req.raise_for_status()
document = html5lib.parse(req.content, treebuilder="dom")
scripts = document.getElementsByTagName("script")
for script in scripts:
src = script.getAttribute("src")
url = urljoin(conf.getoption("server"), src)
script_req = requests_session.get(url)
script_req.raise_for_status()
cache_control = parse_cache_control(script_req.headers["cache-control"])
assert cache_control["public"], f"Cache-control: public for {url}"
ONE_YEAR = 31_536_000
assert cache_control["max-age"] >= ONE_YEAR, f"Cache-control: max-age > 1 year for {url}"
assert cache_control["immutable"], f"Cache-control: immutable for {url}"
def parse_cache_control(header):
parsed = {}
parts = header.split(",")
for part in parts:
part = part.strip()
if "=" in part:
key, val = part.split("=", 1)
try:
val = int(val)
except ValueError:
pass
parsed[key] = val
else:
parsed[part] = True
return parsed
| mpl-2.0 | 5cebd95963e1d0f119d1be4d966f587c | 35.270588 | 97 | 0.626014 | 3.759756 | false | true | false | false |
mozilla/normandy | normandy/recipes/migrations/0004_auto_20180502_2340.py | 1 | 5164 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-02 23:40
# flake8: noqa
from __future__ import unicode_literals
import hashlib
from django.db import migrations
def create_tmp_from_revision(apps, revision, parent=None):
ApprovalRequest = apps.get_model("recipes", "ApprovalRequest")
TmpRecipeRevision = apps.get_model("recipes", "TmpRecipeRevision")
tmp = TmpRecipeRevision(
created=revision.created,
updated=revision.updated,
comment=revision.comment,
name=revision.name,
arguments_json=revision.arguments_json,
extra_filter_expression=revision.extra_filter_expression,
identicon_seed=revision.identicon_seed,
action=revision.action,
parent=parent,
recipe=revision.recipe,
user=revision.user,
)
tmp.save()
if revision.approved_for_recipe.count():
tmp.approved_for_recipe.add(revision.approved_for_recipe.get())
if revision.latest_for_recipe.count():
tmp.latest_for_recipe.add(revision.latest_for_recipe.get())
try:
approval_request = revision.approval_request
approval_request.tmp_revision = tmp
approval_request.save()
except ApprovalRequest.DoesNotExist:
pass
for channel in revision.channels.all():
tmp.channels.add(channel)
for country in revision.countries.all():
tmp.countries.add(country)
for locale in revision.locales.all():
tmp.locales.add(locale)
return tmp
def copy_revisions_to_tmp(apps, schema_editor):
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
for revision in RecipeRevision.objects.filter(parent=None):
current_rev = revision
parent_tmp = create_tmp_from_revision(apps, current_rev)
try:
while current_rev.child:
parent_tmp = create_tmp_from_revision(apps, current_rev.child, parent=parent_tmp)
current_rev = current_rev.child
except RecipeRevision.DoesNotExist:
pass
def get_filter_expression(revision):
parts = []
if revision.locales.count():
locales = ", ".join(["'{}'".format(l.code) for l in revision.locales.all()])
parts.append("normandy.locale in [{}]".format(locales))
if revision.countries.count():
countries = ", ".join(["'{}'".format(c.code) for c in revision.countries.all()])
parts.append("normandy.country in [{}]".format(countries))
if revision.channels.count():
channels = ", ".join(["'{}'".format(c.slug) for c in revision.channels.all()])
parts.append("normandy.channel in [{}]".format(channels))
if revision.extra_filter_expression:
parts.append(revision.extra_filter_expression)
expression = ") && (".join(parts)
return "({})".format(expression) if len(parts) > 1 else expression
def hash(revision):
data = "{}{}{}{}{}{}".format(
revision.recipe.id,
revision.created,
revision.name,
revision.action.id,
revision.arguments_json,
get_filter_expression(revision),
)
return hashlib.sha256(data.encode()).hexdigest()
def create_revision_from_tmp(apps, tmp, parent=None):
ApprovalRequest = apps.get_model("recipes", "ApprovalRequest")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
rev = RecipeRevision(
created=tmp.created,
updated=tmp.updated,
comment=tmp.comment,
name=tmp.name,
arguments_json=tmp.arguments_json,
extra_filter_expression=tmp.extra_filter_expression,
identicon_seed=tmp.identicon_seed,
action=tmp.action,
parent=parent,
recipe=tmp.recipe,
user=tmp.user,
)
initial_id = hash(tmp)
rev.id = initial_id
rev.save()
if tmp.approved_for_recipe.count():
rev.approved_for_recipe.add(tmp.approved_for_recipe.get())
if tmp.latest_for_recipe.count():
rev.latest_for_recipe.add(tmp.latest_for_recipe.get())
try:
approval_request = tmp.approval_request
approval_request.revision = rev
approval_request.save()
except ApprovalRequest.DoesNotExist:
pass
for channel in tmp.channels.all():
rev.channels.add(channel)
for country in tmp.countries.all():
rev.countries.add(country)
for locale in tmp.locales.all():
rev.locales.add(locale)
return rev
def copy_tmp_to_revisions(apps, schema_editor):
TmpRecipeRevision = apps.get_model("recipes", "TmpRecipeRevision")
for tmp in TmpRecipeRevision.objects.filter(parent=None):
current_tmp = tmp
parent_rev = create_revision_from_tmp(apps, current_tmp)
try:
while current_tmp.child:
parent_rev = create_revision_from_tmp(apps, current_tmp.child, parent=parent_rev)
current_tmp = current_tmp.child
except TmpRecipeRevision.DoesNotExist:
pass
class Migration(migrations.Migration):
dependencies = [("recipes", "0003_tmpreciperevision")]
operations = [migrations.RunPython(copy_revisions_to_tmp, copy_tmp_to_revisions)]
| mpl-2.0 | b08fd3e120b5fcd53bc0145f4b760be4 | 28.849711 | 97 | 0.647366 | 3.819527 | false | false | false | false |
mozilla/normandy | normandy/recipes/api/v3/serializers.py | 1 | 11345 | from rest_framework import serializers
from factory.fuzzy import FuzzyText
from normandy.base.api.v3.serializers import UserSerializer
from normandy.base.jexl import get_normandy_jexl
from normandy.recipes import filters
from normandy.recipes.api.fields import (
ActionImplementationHyperlinkField,
FilterObjectField,
)
from normandy.recipes.models import (
Action,
ApprovalRequest,
EnabledState,
Recipe,
RecipeRevision,
Signature,
)
from normandy.recipes.validators import JSONSchemaValidator
class CustomizableSerializerMixin:
"""Serializer Mixin that allows callers to exclude fields on instance of this serializer."""
def __init__(self, *args, **kwargs):
exclude_fields = kwargs.pop("exclude_fields", [])
super().__init__(*args, **kwargs)
if exclude_fields:
for field in exclude_fields:
self.fields.pop(field)
class ActionSerializer(serializers.ModelSerializer):
arguments_schema = serializers.JSONField()
implementation_url = ActionImplementationHyperlinkField()
class Meta:
model = Action
fields = ["arguments_schema", "name", "id", "implementation_url"]
class ApprovalRequestSerializer(serializers.ModelSerializer):
approver = UserSerializer()
created = serializers.DateTimeField(read_only=True)
creator = UserSerializer()
revision = serializers.SerializerMethodField(read_only=True)
class Meta:
model = ApprovalRequest
fields = ["approved", "approver", "comment", "created", "creator", "id", "revision"]
def get_revision(self, instance):
serializer = RecipeRevisionLinkSerializer(instance.revision)
return serializer.data
class EnabledStateSerializer(CustomizableSerializerMixin, serializers.ModelSerializer):
creator = UserSerializer()
class Meta:
model = EnabledState
fields = ["id", "revision_id", "created", "creator", "enabled", "carryover_from"]
class RecipeRevisionSerializer(serializers.ModelSerializer):
action = serializers.SerializerMethodField(read_only=True)
approval_request = ApprovalRequestSerializer(read_only=True)
capabilities = serializers.ListField(read_only=True)
comment = serializers.CharField(required=False)
creator = UserSerializer(source="user", read_only=True)
date_created = serializers.DateTimeField(source="created", read_only=True)
enabled_states = EnabledStateSerializer(many=True, exclude_fields=["revision_id"])
filter_object = serializers.ListField(child=FilterObjectField())
recipe = serializers.SerializerMethodField(read_only=True)
class Meta:
model = RecipeRevision
fields = [
"action",
"approval_request",
"arguments",
"experimenter_slug",
"capabilities",
"comment",
"creator",
"date_created",
"enabled_states",
"enabled",
"extra_capabilities",
"extra_filter_expression",
"filter_expression",
"filter_object",
"id",
"identicon_seed",
"metadata",
"name",
"recipe",
"updated",
]
def get_recipe(self, instance):
serializer = RecipeLinkSerializer(instance.recipe)
return serializer.data
def get_action(self, instance):
serializer = ActionSerializer(
instance.action, read_only=True, context={"request": self.context.get("request")}
)
return serializer.data
class SignatureSerializer(serializers.ModelSerializer):
timestamp = serializers.DateTimeField(read_only=True)
signature = serializers.ReadOnlyField()
x5u = serializers.ReadOnlyField()
public_key = serializers.ReadOnlyField()
class Meta:
model = Signature
fields = ["timestamp", "signature", "x5u", "public_key"]
class RecipeSerializer(CustomizableSerializerMixin, serializers.ModelSerializer):
# read-only fields
approved_revision = RecipeRevisionSerializer(read_only=True)
latest_revision = RecipeRevisionSerializer(read_only=True)
signature = SignatureSerializer(read_only=True)
uses_only_baseline_capabilities = serializers.BooleanField(
source="latest_revision.uses_only_baseline_capabilities", read_only=True
)
# write-only fields
action_id = serializers.PrimaryKeyRelatedField(
source="action", queryset=Action.objects.all(), write_only=True
)
arguments = serializers.JSONField(write_only=True)
extra_filter_expression = serializers.CharField(
required=False, allow_blank=True, write_only=True
)
filter_object = serializers.ListField(
child=FilterObjectField(), required=False, write_only=True
)
name = serializers.CharField(write_only=True)
identicon_seed = serializers.CharField(required=False, write_only=True)
comment = serializers.CharField(required=False, write_only=True)
experimenter_slug = serializers.CharField(
required=False, write_only=True, allow_null=True, allow_blank=True
)
extra_capabilities = serializers.ListField(required=False, write_only=True)
class Meta:
model = Recipe
fields = [
# read-only
"approved_revision",
"id",
"latest_revision",
"signature",
"uses_only_baseline_capabilities",
# write-only
"action_id",
"arguments",
"extra_filter_expression",
"filter_object",
"name",
"identicon_seed",
"comment",
"experimenter_slug",
"extra_capabilities",
]
def get_action(self, instance):
serializer = ActionSerializer(
instance.latest_revision.action,
read_only=True,
context={"request": self.context.get("request")},
)
return serializer.data
def update(self, instance, validated_data):
request = self.context.get("request")
if request and request.user:
validated_data["user"] = request.user
instance.revise(**validated_data)
return instance
def create(self, validated_data):
request = self.context.get("request")
if request and request.user:
validated_data["user"] = request.user
if "identicon_seed" not in validated_data:
validated_data["identicon_seed"] = f"v1:{FuzzyText().fuzz()}"
recipe = Recipe.objects.create()
return self.update(recipe, validated_data)
def validate_extra_filter_expression(self, value):
if value:
jexl = get_normandy_jexl()
errors = list(jexl.validate(value))
if errors:
raise serializers.ValidationError(errors)
return value
def validate(self, data):
data = super().validate(data)
action = data.get("action")
if action is None:
action = self.instance.latest_revision.action
arguments = data.get("arguments")
if arguments is not None:
# Ensure the value is a dict
if not isinstance(arguments, dict):
raise serializers.ValidationError({"arguments": "Must be an object."})
# Get the schema associated with the selected action
schema = action.arguments_schema
schemaValidator = JSONSchemaValidator(schema)
errorResponse = {}
errors = sorted(schemaValidator.iter_errors(arguments), key=lambda e: e.path)
# Loop through ValidationErrors returned by JSONSchema
# Each error contains a message and a path attribute
# message: string human-readable error explanation
# path: list containing path to offending element
for error in errors:
currentLevel = errorResponse
# Loop through the path of the current error
# e.g. ['surveys'][0]['weight']
for index, path in enumerate(error.path):
# If this key already exists in our error response, step into it
if path in currentLevel:
currentLevel = currentLevel[path]
continue
else:
# If we haven't reached the end of the path, add this path
# as a key in our error response object and step into it
if index < len(error.path) - 1:
currentLevel[path] = {}
currentLevel = currentLevel[path]
continue
# If we've reached the final path, set the error message
else:
currentLevel[path] = error.message
if errorResponse:
raise serializers.ValidationError({"arguments": errorResponse})
if self.instance is None:
if data.get("extra_filter_expression", "").strip() == "":
if not data.get("filter_object"):
raise serializers.ValidationError(
"one of extra_filter_expression or filter_object is required"
)
else:
if "extra_filter_expression" in data or "filter_object" in data:
# If either is attempted to be updated, at least one of them must be truthy.
if not data.get("extra_filter_expression", "").strip() and not data.get(
"filter_object"
):
raise serializers.ValidationError(
"if extra_filter_expression is blank, "
"at least one filter_object is required"
)
return data
def validate_filter_object(self, value):
if not isinstance(value, list):
raise serializers.ValidationError(
{"non field errors": ["filter_object must be a list."]}
)
errors = {}
for i, obj in enumerate(value):
if not isinstance(obj, dict):
errors[i] = {"non field errors": ["filter_object members must be objects."]}
continue
if "type" not in obj:
errors[i] = {"type": ["This field is required."]}
break
Filter = filters.by_type.get(obj["type"])
if Filter is not None:
filter = Filter(data=obj)
if not filter.is_valid():
errors[i] = filter.errors
else:
errors[i] = {"type": [f'Unknown filter object type "{obj["type"]}".']}
if errors:
raise serializers.ValidationError(errors)
return value
class RecipeLinkSerializer(RecipeSerializer):
class Meta(RecipeSerializer.Meta):
fields = ["approved_revision_id", "id", "latest_revision_id"]
class RecipeRevisionLinkSerializer(RecipeRevisionSerializer):
recipe_id = serializers.SerializerMethodField(read_only=True)
class Meta(RecipeSerializer.Meta):
fields = ["id", "recipe_id"]
def get_recipe_id(self, instance):
return instance.recipe.id
| mpl-2.0 | 9e69db44885b75be18a03b20bf883646 | 34.676101 | 96 | 0.606435 | 4.634395 | false | false | false | false |
mozilla/normandy | contract-tests/v3_api/test_group_delete.py | 1 | 1231 | import uuid
from support.assertions import assert_valid_schema
from urllib.parse import urljoin
def test_group_delete(conf, requests_session, headers):
# Create a new group
data = {"name": str(uuid.uuid4())}
response = requests_session.post(
urljoin(conf.getoption("server"), "/api/v3/group/"), headers=headers, data=data
)
assert response.status_code == 201
assert_valid_schema(response.json())
group_data = response.json()
group_id = group_data["id"]
# Verify group was stored and contains expected data
response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
group_data = response.json()
assert response.status_code == 200
assert_valid_schema(response.json())
# Delete the group
response = requests_session.delete(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
assert response.status_code == 204
# Verify that it no longer exists
response = requests_session.get(
urljoin(conf.getoption("server"), "/api/v3/group/{}/".format(group_id)), headers=headers
)
assert response.status_code == 404
| mpl-2.0 | 709834fd8468eb48cb136525254f26bc | 33.194444 | 96 | 0.670187 | 3.799383 | false | false | false | false |
rmmh/skybot | plugins/lastfm.py | 3 | 2391 | """
The Last.fm API key is retrieved from the bot config file.
"""
from util import hook, http
api_url = "http://ws.audioscrobbler.com/2.0/?format=json"
@hook.api_key("lastfm")
@hook.command(autohelp=False)
def lastfm(inp, chan="", nick="", reply=None, api_key=None, db=None):
".lastfm <username> [dontsave] | @<nick> -- gets current or last played " "track from lastfm"
db.execute(
"create table if not exists "
"lastfm(chan, nick, user, primary key(chan, nick))"
)
if inp[0:1] == "@":
nick = inp[1:].strip()
user = None
dontsave = True
else:
user = inp
dontsave = user.endswith(" dontsave")
if dontsave:
user = user[:-9].strip().lower()
if not user:
user = db.execute(
"select user from lastfm where chan=? and nick=lower(?)", (chan, nick)
).fetchone()
if not user:
return lastfm.__doc__
user = user[0]
response = http.get_json(
api_url, method="user.getrecenttracks", api_key=api_key, user=user, limit=1
)
if "error" in response:
return "error: %s" % response["message"]
if (
not "track" in response["recenttracks"]
or len(response["recenttracks"]["track"]) == 0
):
return "no recent tracks for user \x02%s\x0F found" % user
tracks = response["recenttracks"]["track"]
if type(tracks) == list:
# if the user is listening to something, the tracks entry is a list
# the first item is the current track
track = tracks[0]
status = "current track"
elif type(tracks) == dict:
# otherwise, they aren't listening to anything right now, and
# the tracks entry is a dict representing the most recent track
track = tracks
status = "last track"
else:
return "error parsing track listing"
title = track["name"]
album = track["album"]["#text"]
artist = track["artist"]["#text"]
ret = "\x02%s\x0F's %s - \x02%s\x0f" % (user, status, title)
if artist:
ret += " by \x02%s\x0f" % artist
if album:
ret += " on \x02%s\x0f" % album
reply(ret)
if inp and not dontsave:
db.execute(
"insert or replace into lastfm(chan, nick, user) " "values (?, ?, ?)",
(chan, nick.lower(), inp),
)
db.commit()
| unlicense | 636c141db11c52dd6b085daa4d1fa441 | 27.129412 | 97 | 0.563363 | 3.531758 | false | false | false | false |
rmmh/skybot | plugins/google.py | 2 | 1308 | from __future__ import unicode_literals
import random
from util import hook, http
def api_get(query, key, is_image=None, num=1):
url = (
"https://www.googleapis.com/customsearch/v1?cx=007629729846476161907:ud5nlxktgcw"
"&fields=items(title,link,snippet)&safe=off&nfpr=1"
+ ("&searchType=image" if is_image else "")
)
return http.get_json(url, key=key, q=query, num=num)
@hook.api_key("google")
@hook.command("can i get a picture of")
@hook.command("can you grab me a picture of")
@hook.command("give me a print out of")
@hook.command
def gis(inp, api_key=None):
""".gis <term> -- finds an image using google images (safesearch off)"""
parsed = api_get(inp, api_key, is_image=True, num=10)
if "items" not in parsed:
return "no images found"
return random.choice(parsed["items"])["link"]
@hook.api_key("google")
@hook.command("g")
@hook.command
def google(inp, api_key=None):
""".g/.google <query> -- returns first google search result"""
parsed = api_get(inp, api_key)
if "items" not in parsed:
return "no results found"
out = '{link} -- \x02{title}\x02: "{snippet}"'.format(**parsed["items"][0])
out = " ".join(out.split())
if len(out) > 300:
out = out[: out.rfind(" ")] + '..."'
return out
| unlicense | f118032a0344f7392e8812fe3793d67c | 26.829787 | 89 | 0.626147 | 3.056075 | false | false | false | false |
rmmh/skybot | plugins/util/timesince.py | 3 | 4139 | # Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, ("year", "years")),
(60 * 60 * 24 * 30, ("month", "months")),
(60 * 60 * 24 * 7, ("week", "weeks")),
(60 * 60 * 24, ("day", "days")),
(60 * 60, ("hour", "hours")),
(60, ("minute", "minutes")),
)
# Convert int or float (unix epoch) to datetime.datetime for comparison
if isinstance(d, int) or isinstance(d, float):
d = datetime.datetime.fromtimestamp(d)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return "0 " + "minutes"
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
if count == 1:
s = "%(number)d %(type)s" % {"number": count, "type": name[0]}
else:
s = "%(number)d %(type)s" % {"number": count, "type": name[1]}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
if count2 == 1:
s += ", %d %s" % (count2, name2[0])
else:
s += ", %d %s" % (count2, name2[1])
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if not now:
now = datetime.datetime.now()
return timesince(now, d)
| unlicense | aa53f92f5fdd6e27a86d9046dc52bf9f | 39.578431 | 80 | 0.650882 | 3.930674 | false | false | false | false |
rmmh/skybot | plugins/mtg.py | 3 | 2470 | from __future__ import print_function
from builtins import range
from util import hook, http
import random
def card_search(name):
matching_cards = http.get_json(
"https://api.magicthegathering.io/v1/cards", name=name
)
for card in matching_cards["cards"]:
if card["name"].lower() == name.lower():
return card
return random.choice(matching_cards["cards"])
@hook.command
def mtg(inp, say=None):
""".mtg <name> - Searches for Magic the Gathering card given <name>"""
try:
card = card_search(inp)
except IndexError:
return "Card not found."
symbols = {
"{0}": "0",
"{1}": "1",
"{2}": "2",
"{3}": "3",
"{4}": "4",
"{5}": "5",
"{6}": "6",
"{7}": "7",
"{8}": "8",
"{9}": "9",
"{10}": "10",
"{11}": "11",
"{12}": "12",
"{13}": "13",
"{14}": "14",
"{15}": "15",
"{16}": "16",
"{17}": "17",
"{18}": "18",
"{19}": "19",
"{20}": "20",
"{T}": "\u27F3",
"{S}": "\u2744",
"{Q}": "\u21BA",
"{C}": "\u27E1",
"{W}": "W",
"{U}": "U",
"{B}": "B",
"{R}": "R",
"{G}": "G",
"{W/P}": "\u03D5",
"{U/P}": "\u03D5",
"{B/P}": "\u03D5",
"{R/P}": "\u03D5",
"{G/P}": "\u03D5",
"{X}": "X",
"\n": " ",
}
results = {
"name": card["name"],
"type": card["type"],
"cost": card.get("manaCost", ""),
"text": card.get("text", ""),
"power": card.get("power"),
"toughness": card.get("toughness"),
"loyalty": card.get("loyalty"),
"multiverseid": card.get("multiverseid"),
}
for fragment, rep in symbols.items():
results["text"] = results["text"].replace(fragment, rep)
results["cost"] = results["cost"].replace(fragment, rep)
template = ["{name} -"]
template.append("{type}")
template.append("- {cost} |")
if results["loyalty"]:
template.append("{loyalty} Loyalty |")
if results["power"]:
template.append("{power}/{toughness} |")
template.append(
"{text} | http://gatherer.wizards.com/Pages/Card/Details.aspx?multiverseid={multiverseid}"
)
return " ".join(template).format(**results)
if __name__ == "__main__":
print(card_search("Black Lotus"))
print(mtg("Black Lotus"))
| unlicense | 32c6db1674583320bea728226b9561ab | 25 | 98 | 0.448178 | 3.130545 | false | false | false | false |
pytube/pytube | tests/test_captions.py | 1 | 5759 | import os
import pytest
from unittest import mock
from unittest.mock import MagicMock, mock_open, patch
from pytube import Caption, CaptionQuery, captions
def test_float_to_srt_time_format():
caption1 = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
assert caption1.float_to_srt_time_format(3.89) == "00:00:03,890"
def test_caption_query_sequence():
caption1 = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
caption2 = Caption(
{"url": "url2", "name": {"simpleText": "name2"}, "languageCode": "fr", "vssId": ".fr"}
)
caption_query = CaptionQuery(captions=[caption1, caption2])
assert len(caption_query) == 2
assert caption_query["en"] == caption1
assert caption_query["fr"] == caption2
with pytest.raises(KeyError):
assert caption_query["nada"] is not None
def test_caption_query_get_by_language_code_when_exists():
caption1 = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
caption2 = Caption(
{"url": "url2", "name": {"simpleText": "name2"}, "languageCode": "fr", "vssId": ".fr"}
)
caption_query = CaptionQuery(captions=[caption1, caption2])
assert caption_query["en"] == caption1
def test_caption_query_get_by_language_code_when_not_exists():
caption1 = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
caption2 = Caption(
{"url": "url2", "name": {"simpleText": "name2"}, "languageCode": "fr", "vssId": ".fr"}
)
caption_query = CaptionQuery(captions=[caption1, caption2])
with pytest.raises(KeyError):
assert caption_query["hello"] is not None
# assert not_found is not None # should never reach here
@mock.patch("pytube.captions.Caption.generate_srt_captions")
def test_download(srt):
open_mock = mock_open()
with patch("builtins.open", open_mock):
srt.return_value = ""
caption = Caption(
{
"url": "url1",
"name": {"simpleText": "name1"},
"languageCode": "en",
"vssId": ".en"
}
)
caption.download("title")
assert (
open_mock.call_args_list[0][0][0].split(os.path.sep)[-1] == "title (en).srt"
)
@mock.patch("pytube.captions.Caption.generate_srt_captions")
def test_download_with_prefix(srt):
open_mock = mock_open()
with patch("builtins.open", open_mock):
srt.return_value = ""
caption = Caption(
{
"url": "url1",
"name": {"simpleText": "name1"},
"languageCode": "en",
"vssId": ".en"
}
)
caption.download("title", filename_prefix="1 ")
assert (
open_mock.call_args_list[0][0][0].split(os.path.sep)[-1]
== "1 title (en).srt"
)
@mock.patch("pytube.captions.Caption.generate_srt_captions")
def test_download_with_output_path(srt):
open_mock = mock_open()
captions.target_directory = MagicMock(return_value="/target")
with patch("builtins.open", open_mock):
srt.return_value = ""
caption = Caption(
{
"url": "url1",
"name": {"simpleText": "name1"},
"languageCode": "en",
"vssId": ".en"
}
)
file_path = caption.download("title", output_path="blah")
assert file_path == os.path.join("/target","title (en).srt")
captions.target_directory.assert_called_with("blah")
@mock.patch("pytube.captions.Caption.xml_captions")
def test_download_xml_and_trim_extension(xml):
open_mock = mock_open()
with patch("builtins.open", open_mock):
xml.return_value = ""
caption = Caption(
{
"url": "url1",
"name": {"simpleText": "name1"},
"languageCode": "en",
"vssId": ".en"
}
)
caption.download("title.xml", srt=False)
assert (
open_mock.call_args_list[0][0][0].split(os.path.sep)[-1] == "title (en).xml"
)
def test_repr():
caption = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
assert str(caption) == '<Caption lang="name1" code="en">'
caption_query = CaptionQuery(captions=[caption])
assert repr(caption_query) == '{\'en\': <Caption lang="name1" code="en">}'
@mock.patch("pytube.request.get")
def test_xml_captions(request_get):
request_get.return_value = "test"
caption = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
assert caption.xml_captions == "test"
@mock.patch("pytube.captions.request")
def test_generate_srt_captions(request):
request.get.return_value = (
'<?xml version="1.0" encoding="utf-8" ?><transcript><text start="6.5" dur="1.7">['
'Herb, Software Engineer]\n本影片包含隱藏式字幕。</text><text start="8.3" dur="2.7">'
"如要啓動字幕,請按一下這裡的圖示。</text></transcript>"
)
caption = Caption(
{"url": "url1", "name": {"simpleText": "name1"}, "languageCode": "en", "vssId": ".en"}
)
assert caption.generate_srt_captions() == (
"1\n"
"00:00:06,500 --> 00:00:08,200\n"
"[Herb, Software Engineer] 本影片包含隱藏式字幕。\n"
"\n"
"2\n"
"00:00:08,300 --> 00:00:11,000\n"
"如要啓動字幕,請按一下這裡的圖示。"
)
| unlicense | d56550fb09306d9c0db927fc41901910 | 32.414201 | 94 | 0.553568 | 3.114727 | false | true | false | false |
pytube/pytube | pytube/cipher.py | 1 | 22529 | """
This module contains all logic necessary to decipher the signature.
YouTube's strategy to restrict downloading videos is to send a ciphered version
of the signature to the client, along with the decryption algorithm obfuscated
in JavaScript. For the clients to play the videos, JavaScript must take the
ciphered version, cycle it through a series of "transform functions," and then
signs the media URL with the output.
This module is responsible for (1) finding and extracting those "transform
functions" (2) maps them to Python equivalents and (3) taking the ciphered
signature and decoding it.
"""
import logging
import re
from itertools import chain
from typing import Any, Callable, Dict, List, Optional, Tuple
from pytube.exceptions import ExtractError, RegexMatchError
from pytube.helpers import cache, regex_search
from pytube.parser import find_object_from_startpoint, throttling_array_split
logger = logging.getLogger(__name__)
class Cipher:
def __init__(self, js: str):
self.transform_plan: List[str] = get_transform_plan(js)
var_regex = re.compile(r"^\w+\W")
var_match = var_regex.search(self.transform_plan[0])
if not var_match:
raise RegexMatchError(
caller="__init__", pattern=var_regex.pattern
)
var = var_match.group(0)[:-1]
self.transform_map = get_transform_map(js, var)
self.js_func_patterns = [
r"\w+\.(\w+)\(\w,(\d+)\)",
r"\w+\[(\"\w+\")\]\(\w,(\d+)\)"
]
self.throttling_plan = get_throttling_plan(js)
self.throttling_array = get_throttling_function_array(js)
self.calculated_n = None
def calculate_n(self, initial_n: list):
"""Converts n to the correct value to prevent throttling."""
if self.calculated_n:
return self.calculated_n
# First, update all instances of 'b' with the list(initial_n)
for i in range(len(self.throttling_array)):
if self.throttling_array[i] == 'b':
self.throttling_array[i] = initial_n
for step in self.throttling_plan:
curr_func = self.throttling_array[int(step[0])]
if not callable(curr_func):
logger.debug(f'{curr_func} is not callable.')
logger.debug(f'Throttling array:\n{self.throttling_array}\n')
raise ExtractError(f'{curr_func} is not callable.')
first_arg = self.throttling_array[int(step[1])]
if len(step) == 2:
curr_func(first_arg)
elif len(step) == 3:
second_arg = self.throttling_array[int(step[2])]
curr_func(first_arg, second_arg)
self.calculated_n = ''.join(initial_n)
return self.calculated_n
def get_signature(self, ciphered_signature: str) -> str:
"""Decipher the signature.
Taking the ciphered signature, applies the transform functions.
:param str ciphered_signature:
The ciphered signature sent in the ``player_config``.
:rtype: str
:returns:
Decrypted signature required to download the media content.
"""
signature = list(ciphered_signature)
for js_func in self.transform_plan:
name, argument = self.parse_function(js_func) # type: ignore
signature = self.transform_map[name](signature, argument)
logger.debug(
"applied transform function\n"
"output: %s\n"
"js_function: %s\n"
"argument: %d\n"
"function: %s",
"".join(signature),
name,
argument,
self.transform_map[name],
)
return "".join(signature)
@cache
def parse_function(self, js_func: str) -> Tuple[str, int]:
"""Parse the Javascript transform function.
Break a JavaScript transform function down into a two element ``tuple``
containing the function name and some integer-based argument.
:param str js_func:
The JavaScript version of the transform function.
:rtype: tuple
:returns:
two element tuple containing the function name and an argument.
**Example**:
parse_function('DE.AJ(a,15)')
('AJ', 15)
"""
logger.debug("parsing transform function")
for pattern in self.js_func_patterns:
regex = re.compile(pattern)
parse_match = regex.search(js_func)
if parse_match:
fn_name, fn_arg = parse_match.groups()
return fn_name, int(fn_arg)
raise RegexMatchError(
caller="parse_function", pattern="js_func_patterns"
)
def get_initial_function_name(js: str) -> str:
"""Extract the name of the function responsible for computing the signature.
:param str js:
The contents of the base.js asset file.
:rtype: str
:returns:
Function name from regex match
"""
function_patterns = [
r"\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', # noqa: E501
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', # noqa: E501
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r"\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(",
r"yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
r"\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(", # noqa: E501
]
logger.debug("finding initial function name")
for pattern in function_patterns:
regex = re.compile(pattern)
function_match = regex.search(js)
if function_match:
logger.debug("finished regex search, matched: %s", pattern)
return function_match.group(1)
raise RegexMatchError(
caller="get_initial_function_name", pattern="multiple"
)
def get_transform_plan(js: str) -> List[str]:
"""Extract the "transform plan".
The "transform plan" is the functions that the ciphered signature is
cycled through to obtain the actual signature.
:param str js:
The contents of the base.js asset file.
**Example**:
['DE.AJ(a,15)',
'DE.VR(a,3)',
'DE.AJ(a,51)',
'DE.VR(a,3)',
'DE.kT(a,51)',
'DE.kT(a,8)',
'DE.VR(a,3)',
'DE.kT(a,21)']
"""
name = re.escape(get_initial_function_name(js))
pattern = r"%s=function\(\w\){[a-z=\.\(\"\)]*;(.*);(?:.+)}" % name
logger.debug("getting transform plan")
return regex_search(pattern, js, group=1).split(";")
def get_transform_object(js: str, var: str) -> List[str]:
"""Extract the "transform object".
The "transform object" contains the function definitions referenced in the
"transform plan". The ``var`` argument is the obfuscated variable name
which contains these functions, for example, given the function call
``DE.AJ(a,15)`` returned by the transform plan, "DE" would be the var.
:param str js:
The contents of the base.js asset file.
:param str var:
The obfuscated variable name that stores an object with all functions
that descrambles the signature.
**Example**:
>>> get_transform_object(js, 'DE')
['AJ:function(a){a.reverse()}',
'VR:function(a,b){a.splice(0,b)}',
'kT:function(a,b){var c=a[0];a[0]=a[b%a.length];a[b]=c}']
"""
pattern = r"var %s={(.*?)};" % re.escape(var)
logger.debug("getting transform object")
regex = re.compile(pattern, flags=re.DOTALL)
transform_match = regex.search(js)
if not transform_match:
raise RegexMatchError(caller="get_transform_object", pattern=pattern)
return transform_match.group(1).replace("\n", " ").split(", ")
def get_transform_map(js: str, var: str) -> Dict:
"""Build a transform function lookup.
Build a lookup table of obfuscated JavaScript function names to the
Python equivalents.
:param str js:
The contents of the base.js asset file.
:param str var:
The obfuscated variable name that stores an object with all functions
that descrambles the signature.
"""
transform_object = get_transform_object(js, var)
mapper = {}
for obj in transform_object:
# AJ:function(a){a.reverse()} => AJ, function(a){a.reverse()}
name, function = obj.split(":", 1)
fn = map_functions(function)
mapper[name] = fn
return mapper
def get_throttling_function_name(js: str) -> str:
"""Extract the name of the function that computes the throttling parameter.
:param str js:
The contents of the base.js asset file.
:rtype: str
:returns:
The name of the function used to compute the throttling parameter.
"""
function_patterns = [
# https://github.com/ytdl-org/youtube-dl/issues/29326#issuecomment-865985377
# https://github.com/yt-dlp/yt-dlp/commit/48416bc4a8f1d5ff07d5977659cb8ece7640dcd8
# var Bpa = [iha];
# ...
# a.C && (b = a.get("n")) && (b = Bpa[0](b), a.set("n", b),
# Bpa.length || iha("")) }};
# In the above case, `iha` is the relevant function name
r'a\.[a-zA-Z]\s*&&\s*\([a-z]\s*=\s*a\.get\("n"\)\)\s*&&\s*'
r'\([a-z]\s*=\s*([a-zA-Z0-9$]+)(\[\d+\])?\([a-z]\)',
]
logger.debug('Finding throttling function name')
for pattern in function_patterns:
regex = re.compile(pattern)
function_match = regex.search(js)
if function_match:
logger.debug("finished regex search, matched: %s", pattern)
if len(function_match.groups()) == 1:
return function_match.group(1)
idx = function_match.group(2)
if idx:
idx = idx.strip("[]")
array = re.search(
r'var {nfunc}\s*=\s*(\[.+?\]);'.format(
nfunc=re.escape(function_match.group(1))),
js
)
if array:
array = array.group(1).strip("[]").split(",")
array = [x.strip() for x in array]
return array[int(idx)]
raise RegexMatchError(
caller="get_throttling_function_name", pattern="multiple"
)
def get_throttling_function_code(js: str) -> str:
"""Extract the raw code for the throttling function.
:param str js:
The contents of the base.js asset file.
:rtype: str
:returns:
The name of the function used to compute the throttling parameter.
"""
# Begin by extracting the correct function name
name = re.escape(get_throttling_function_name(js))
# Identify where the function is defined
pattern_start = r"%s=function\(\w\)" % name
regex = re.compile(pattern_start)
match = regex.search(js)
# Extract the code within curly braces for the function itself, and merge any split lines
code_lines_list = find_object_from_startpoint(js, match.span()[1]).split('\n')
joined_lines = "".join(code_lines_list)
# Prepend function definition (e.g. `Dea=function(a)`)
return match.group(0) + joined_lines
def get_throttling_function_array(js: str) -> List[Any]:
"""Extract the "c" array.
:param str js:
The contents of the base.js asset file.
:returns:
The array of various integers, arrays, and functions.
"""
raw_code = get_throttling_function_code(js)
array_start = r",c=\["
array_regex = re.compile(array_start)
match = array_regex.search(raw_code)
array_raw = find_object_from_startpoint(raw_code, match.span()[1] - 1)
str_array = throttling_array_split(array_raw)
converted_array = []
for el in str_array:
try:
converted_array.append(int(el))
continue
except ValueError:
# Not an integer value.
pass
if el == 'null':
converted_array.append(None)
continue
if el.startswith('"') and el.endswith('"'):
# Convert e.g. '"abcdef"' to string without quotation marks, 'abcdef'
converted_array.append(el[1:-1])
continue
if el.startswith('function'):
mapper = (
(r"{for\(\w=\(\w%\w\.length\+\w\.length\)%\w\.length;\w--;\)\w\.unshift\(\w.pop\(\)\)}", throttling_unshift), # noqa:E501
(r"{\w\.reverse\(\)}", throttling_reverse),
(r"{\w\.push\(\w\)}", throttling_push),
(r";var\s\w=\w\[0\];\w\[0\]=\w\[\w\];\w\[\w\]=\w}", throttling_swap),
(r"case\s\d+", throttling_cipher_function),
(r"\w\.splice\(0,1,\w\.splice\(\w,1,\w\[0\]\)\[0\]\)", throttling_nested_splice), # noqa:E501
(r";\w\.splice\(\w,1\)}", js_splice),
(r"\w\.splice\(-\w\)\.reverse\(\)\.forEach\(function\(\w\){\w\.unshift\(\w\)}\)", throttling_prepend), # noqa:E501
(r"for\(var \w=\w\.length;\w;\)\w\.push\(\w\.splice\(--\w,1\)\[0\]\)}", throttling_reverse), # noqa:E501
)
found = False
for pattern, fn in mapper:
if re.search(pattern, el):
converted_array.append(fn)
found = True
if found:
continue
converted_array.append(el)
# Replace null elements with array itself
for i in range(len(converted_array)):
if converted_array[i] is None:
converted_array[i] = converted_array
return converted_array
def get_throttling_plan(js: str):
"""Extract the "throttling plan".
The "throttling plan" is a list of tuples used for calling functions
in the c array. The first element of the tuple is the index of the
function to call, and any remaining elements of the tuple are arguments
to pass to that function.
:param str js:
The contents of the base.js asset file.
:returns:
The full function code for computing the throttlign parameter.
"""
raw_code = get_throttling_function_code(js)
transform_start = r"try{"
plan_regex = re.compile(transform_start)
match = plan_regex.search(raw_code)
transform_plan_raw = find_object_from_startpoint(raw_code, match.span()[1] - 1)
# Steps are either c[x](c[y]) or c[x](c[y],c[z])
step_start = r"c\[(\d+)\]\(c\[(\d+)\](,c(\[(\d+)\]))?\)"
step_regex = re.compile(step_start)
matches = step_regex.findall(transform_plan_raw)
transform_steps = []
for match in matches:
if match[4] != '':
transform_steps.append((match[0],match[1],match[4]))
else:
transform_steps.append((match[0],match[1]))
return transform_steps
def reverse(arr: List, _: Optional[Any]):
"""Reverse elements in a list.
This function is equivalent to:
.. code-block:: javascript
function(a, b) { a.reverse() }
This method takes an unused ``b`` variable as their transform functions
universally sent two arguments.
**Example**:
>>> reverse([1, 2, 3, 4])
[4, 3, 2, 1]
"""
return arr[::-1]
def splice(arr: List, b: int):
"""Add/remove items to/from a list.
This function is equivalent to:
.. code-block:: javascript
function(a, b) { a.splice(0, b) }
**Example**:
>>> splice([1, 2, 3, 4], 2)
[1, 2]
"""
return arr[b:]
def swap(arr: List, b: int):
"""Swap positions at b modulus the list length.
This function is equivalent to:
.. code-block:: javascript
function(a, b) { var c=a[0];a[0]=a[b%a.length];a[b]=c }
**Example**:
>>> swap([1, 2, 3, 4], 2)
[3, 2, 1, 4]
"""
r = b % len(arr)
return list(chain([arr[r]], arr[1:r], [arr[0]], arr[r + 1 :]))
def throttling_reverse(arr: list):
"""Reverses the input list.
Needs to do an in-place reversal so that the passed list gets changed.
To accomplish this, we create a reversed copy, and then change each
indvidual element.
"""
reverse_copy = arr.copy()[::-1]
for i in range(len(reverse_copy)):
arr[i] = reverse_copy[i]
def throttling_push(d: list, e: Any):
"""Pushes an element onto a list."""
d.append(e)
def throttling_mod_func(d: list, e: int):
"""Perform the modular function from the throttling array functions.
In the javascript, the modular operation is as follows:
e = (e % d.length + d.length) % d.length
We simply translate this to python here.
"""
return (e % len(d) + len(d)) % len(d)
def throttling_unshift(d: list, e: int):
"""Rotates the elements of the list to the right.
In the javascript, the operation is as follows:
for(e=(e%d.length+d.length)%d.length;e--;)d.unshift(d.pop())
"""
e = throttling_mod_func(d, e)
new_arr = d[-e:] + d[:-e]
d.clear()
for el in new_arr:
d.append(el)
def throttling_cipher_function(d: list, e: str):
"""This ciphers d with e to generate a new list.
In the javascript, the operation is as follows:
var h = [A-Za-z0-9-_], f = 96; // simplified from switch-case loop
d.forEach(
function(l,m,n){
this.push(
n[m]=h[
(h.indexOf(l)-h.indexOf(this[m])+m-32+f--)%h.length
]
)
},
e.split("")
)
"""
h = list('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_')
f = 96
# by naming it "this" we can more closely reflect the js
this = list(e)
# This is so we don't run into weirdness with enumerate while
# we change the input list
copied_list = d.copy()
for m, l in enumerate(copied_list):
bracket_val = (h.index(l) - h.index(this[m]) + m - 32 + f) % len(h)
this.append(
h[bracket_val]
)
d[m] = h[bracket_val]
f -= 1
def throttling_nested_splice(d: list, e: int):
"""Nested splice function in throttling js.
In the javascript, the operation is as follows:
function(d,e){
e=(e%d.length+d.length)%d.length;
d.splice(
0,
1,
d.splice(
e,
1,
d[0]
)[0]
)
}
While testing, all this seemed to do is swap element 0 and e,
but the actual process is preserved in case there was an edge
case that was not considered.
"""
e = throttling_mod_func(d, e)
inner_splice = js_splice(
d,
e,
1,
d[0]
)
js_splice(
d,
0,
1,
inner_splice[0]
)
def throttling_prepend(d: list, e: int):
"""
In the javascript, the operation is as follows:
function(d,e){
e=(e%d.length+d.length)%d.length;
d.splice(-e).reverse().forEach(
function(f){
d.unshift(f)
}
)
}
Effectively, this moves the last e elements of d to the beginning.
"""
start_len = len(d)
# First, calculate e
e = throttling_mod_func(d, e)
# Then do the prepending
new_arr = d[-e:] + d[:-e]
# And update the input list
d.clear()
for el in new_arr:
d.append(el)
end_len = len(d)
assert start_len == end_len
def throttling_swap(d: list, e: int):
"""Swap positions of the 0'th and e'th elements in-place."""
e = throttling_mod_func(d, e)
f = d[0]
d[0] = d[e]
d[e] = f
def js_splice(arr: list, start: int, delete_count=None, *items):
"""Implementation of javascript's splice function.
:param list arr:
Array to splice
:param int start:
Index at which to start changing the array
:param int delete_count:
Number of elements to delete from the array
:param *items:
Items to add to the array
Reference: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/splice # noqa:E501
"""
# Special conditions for start value
try:
if start > len(arr):
start = len(arr)
# If start is negative, count backwards from end
if start < 0:
start = len(arr) - start
except TypeError:
# Non-integer start values are treated as 0 in js
start = 0
# Special condition when delete_count is greater than remaining elements
if not delete_count or delete_count >= len(arr) - start:
delete_count = len(arr) - start # noqa: N806
deleted_elements = arr[start:start + delete_count]
# Splice appropriately.
new_arr = arr[:start] + list(items) + arr[start + delete_count:]
# Replace contents of input array
arr.clear()
for el in new_arr:
arr.append(el)
return deleted_elements
def map_functions(js_func: str) -> Callable:
"""For a given JavaScript transform function, return the Python equivalent.
:param str js_func:
The JavaScript version of the transform function.
"""
mapper = (
# function(a){a.reverse()}
(r"{\w\.reverse\(\)}", reverse),
# function(a,b){a.splice(0,b)}
(r"{\w\.splice\(0,\w\)}", splice),
# function(a,b){var c=a[0];a[0]=a[b%a.length];a[b]=c}
(r"{var\s\w=\w\[0\];\w\[0\]=\w\[\w\%\w.length\];\w\[\w\]=\w}", swap),
# function(a,b){var c=a[0];a[0]=a[b%a.length];a[b%a.length]=c}
(
r"{var\s\w=\w\[0\];\w\[0\]=\w\[\w\%\w.length\];\w\[\w\%\w.length\]=\w}",
swap,
),
)
for pattern, fn in mapper:
if re.search(pattern, js_func):
return fn
raise RegexMatchError(caller="map_functions", pattern="multiple")
| unlicense | 9409df7792d2d6d6536ad2fa31022a4e | 31.322812 | 154 | 0.566337 | 3.357526 | false | false | false | false |
pytube/pytube | pytube/request.py | 1 | 8512 | """Implements a simple wrapper around urlopen."""
import http.client
import json
import logging
import re
import socket
from functools import lru_cache
from urllib import parse
from urllib.error import URLError
from urllib.request import Request, urlopen
from pytube.exceptions import RegexMatchError, MaxRetriesExceeded
from pytube.helpers import regex_search
logger = logging.getLogger(__name__)
default_range_size = 9437184 # 9MB
def _execute_request(
url,
method=None,
headers=None,
data=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT
):
base_headers = {"User-Agent": "Mozilla/5.0", "accept-language": "en-US,en"}
if headers:
base_headers.update(headers)
if data:
# encode data for request
if not isinstance(data, bytes):
data = bytes(json.dumps(data), encoding="utf-8")
if url.lower().startswith("http"):
request = Request(url, headers=base_headers, method=method, data=data)
else:
raise ValueError("Invalid URL")
return urlopen(request, timeout=timeout) # nosec
def get(url, extra_headers=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Send an http GET request.
:param str url:
The URL to perform the GET request for.
:param dict extra_headers:
Extra headers to add to the request
:rtype: str
:returns:
UTF-8 encoded string of response
"""
if extra_headers is None:
extra_headers = {}
response = _execute_request(url, headers=extra_headers, timeout=timeout)
return response.read().decode("utf-8")
def post(url, extra_headers=None, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
"""Send an http POST request.
:param str url:
The URL to perform the POST request for.
:param dict extra_headers:
Extra headers to add to the request
:param dict data:
The data to send on the POST request
:rtype: str
:returns:
UTF-8 encoded string of response
"""
# could technically be implemented in get,
# but to avoid confusion implemented like this
if extra_headers is None:
extra_headers = {}
if data is None:
data = {}
# required because the youtube servers are strict on content type
# raises HTTPError [400]: Bad Request otherwise
extra_headers.update({"Content-Type": "application/json"})
response = _execute_request(
url,
headers=extra_headers,
data=data,
timeout=timeout
)
return response.read().decode("utf-8")
def seq_stream(
url,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
max_retries=0
):
"""Read the response in sequence.
:param str url: The URL to perform the GET request for.
:rtype: Iterable[bytes]
"""
# YouTube expects a request sequence number as part of the parameters.
split_url = parse.urlsplit(url)
base_url = '%s://%s/%s?' % (split_url.scheme, split_url.netloc, split_url.path)
querys = dict(parse.parse_qsl(split_url.query))
# The 0th sequential request provides the file headers, which tell us
# information about how the file is segmented.
querys['sq'] = 0
url = base_url + parse.urlencode(querys)
segment_data = b''
for chunk in stream(url, timeout=timeout, max_retries=max_retries):
yield chunk
segment_data += chunk
# We can then parse the header to find the number of segments
stream_info = segment_data.split(b'\r\n')
segment_count_pattern = re.compile(b'Segment-Count: (\\d+)')
for line in stream_info:
match = segment_count_pattern.search(line)
if match:
segment_count = int(match.group(1).decode('utf-8'))
# We request these segments sequentially to build the file.
seq_num = 1
while seq_num <= segment_count:
# Create sequential request URL
querys['sq'] = seq_num
url = base_url + parse.urlencode(querys)
yield from stream(url, timeout=timeout, max_retries=max_retries)
seq_num += 1
return # pylint: disable=R1711
def stream(
url,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
max_retries=0
):
"""Read the response in chunks.
:param str url: The URL to perform the GET request for.
:rtype: Iterable[bytes]
"""
file_size: int = default_range_size # fake filesize to start
downloaded = 0
while downloaded < file_size:
stop_pos = min(downloaded + default_range_size, file_size) - 1
range_header = f"bytes={downloaded}-{stop_pos}"
tries = 0
# Attempt to make the request multiple times as necessary.
while True:
# If the max retries is exceeded, raise an exception
if tries >= 1 + max_retries:
raise MaxRetriesExceeded()
# Try to execute the request, ignoring socket timeouts
try:
response = _execute_request(
url,
method="GET",
headers={"Range": range_header},
timeout=timeout
)
except URLError as e:
# We only want to skip over timeout errors, and
# raise any other URLError exceptions
if isinstance(e.reason, socket.timeout):
pass
else:
raise
except http.client.IncompleteRead:
# Allow retries on IncompleteRead errors for unreliable connections
pass
else:
# On a successful request, break from loop
break
tries += 1
if file_size == default_range_size:
try:
content_range = response.info()["Content-Range"]
file_size = int(content_range.split("/")[1])
except (KeyError, IndexError, ValueError) as e:
logger.error(e)
while True:
chunk = response.read()
if not chunk:
break
downloaded += len(chunk)
yield chunk
return # pylint: disable=R1711
@lru_cache()
def filesize(url):
"""Fetch size in bytes of file at given URL
:param str url: The URL to get the size of
:returns: int: size in bytes of remote file
"""
return int(head(url)["content-length"])
@lru_cache()
def seq_filesize(url):
"""Fetch size in bytes of file at given URL from sequential requests
:param str url: The URL to get the size of
:returns: int: size in bytes of remote file
"""
total_filesize = 0
# YouTube expects a request sequence number as part of the parameters.
split_url = parse.urlsplit(url)
base_url = '%s://%s/%s?' % (split_url.scheme, split_url.netloc, split_url.path)
querys = dict(parse.parse_qsl(split_url.query))
# The 0th sequential request provides the file headers, which tell us
# information about how the file is segmented.
querys['sq'] = 0
url = base_url + parse.urlencode(querys)
response = _execute_request(
url, method="GET"
)
response_value = response.read()
# The file header must be added to the total filesize
total_filesize += len(response_value)
# We can then parse the header to find the number of segments
segment_count = 0
stream_info = response_value.split(b'\r\n')
segment_regex = b'Segment-Count: (\\d+)'
for line in stream_info:
# One of the lines should contain the segment count, but we don't know
# which, so we need to iterate through the lines to find it
try:
segment_count = int(regex_search(segment_regex, line, 1))
except RegexMatchError:
pass
if segment_count == 0:
raise RegexMatchError('seq_filesize', segment_regex)
# We make HEAD requests to the segments sequentially to find the total filesize.
seq_num = 1
while seq_num <= segment_count:
# Create sequential request URL
querys['sq'] = seq_num
url = base_url + parse.urlencode(querys)
total_filesize += int(head(url)['content-length'])
seq_num += 1
return total_filesize
def head(url):
"""Fetch headers returned http GET request.
:param str url:
The URL to perform the GET request for.
:rtype: dict
:returns:
dictionary of lowercase headers
"""
response_headers = _execute_request(url, method="HEAD").info()
return {k.lower(): v for k, v in response_headers.items()}
| unlicense | 7717abb37be2a302ac4afc9880d28652 | 31.120755 | 85 | 0.620301 | 4.112077 | false | false | false | false |
mozilla-iam/cis | python-modules/cis_crypto/cis_crypto/operation.py | 1 | 5491 | import json
import logging
import os
import yaml
from jose import jwk
from jose import jws
from jose.exceptions import JWSError
from cis_crypto import secret
from cis_crypto import common
logger = logging.getLogger(__name__)
# Note:
# These attrs on sign/verify could be refactored to use object inheritance. Leaving as is for now for readability.
class Sign(object):
def __init__(self):
self.config = common.get_config()
self.key_name = self.config("signing_key_name", namespace="cis", default="file")
self._jwk = None
self.secret_manager = self.config("secret_manager", namespace="cis", default="file")
self.payload = None
def load(self, data):
"""Loads a payload to the object and ensures that the thing is serializable."""
try:
data = yaml.safe_load(data)
except yaml.scanner.ScannerError:
logger.debug("This file is likely not YAML. Attempting JSON load.")
except AttributeError:
logger.debug("This file is likely not YAML. Attempting JSON load.")
if isinstance(data, str):
data = json.loads(data)
else:
pass
self.payload = data
return self.payload
def jws(self, keyname=None):
"""Assumes you loaded a payload. Returns a jws."""
# Override key name
if keyname is not None:
self.key_name = keyname
key_jwk = self._get_key()
sig = jws.sign(self.payload, key_jwk.to_dict(), algorithm="RS256")
return sig
def _get_key(self):
if self._jwk is None:
manager = secret.Manager(provider_type=self.secret_manager)
self._jwk = manager.get_key(key_name=self.key_name)
return self._jwk
class Verify(object):
def __init__(self):
self.config = common.get_config()
# Provide file or URL as opts.
self.well_known_mode = self.config("well_known_mode", namespace="cis", default="file")
self.public_key_name = None # Optional for use with file based well known mode
self.jws_signature = None
self.well_known = None # Well known JSON data
def load(self, jws_signature):
"""Takes data in the form of a dict() and a JWS sig."""
# Store the original form in the jws_signature attribute
self.jws_signature = jws_signature
def _get_public_key(self, keyname=None):
"""Returns a jwk construct for the public key and mode specified."""
if self.well_known_mode == "file":
key_dir = self.config(
"secret_manager_file_path",
namespace="cis",
default=("{}/.mozilla-iam/keys/".format(os.path.expanduser("~"))),
)
key_name = self.config("public_key_name", namespace="cis", default="access-file-key")
file_name = "{}".format(key_name)
fh = open((os.path.join(key_dir, file_name)), "rb")
key_content = fh.read()
key_construct = jwk.construct(key_content, "RS256")
return [key_construct.to_dict()]
elif self.well_known_mode == "http" or self.well_known_mode == "https":
logger.debug("Well known mode engaged. Reducing key structure.", extra={"well_known": self.well_known})
return self._reduce_keys(keyname)
def _reduce_keys(self, keyname):
access_file_keys = self.well_known["access_file"]["jwks"]["keys"]
publishers_supported = self.well_known["api"]["publishers_jwks"]
keys = []
if "access-file-key" in self.config("public_key_name", namespace="cis"):
logger.debug("This is an access file verification.")
return access_file_keys
else:
# If not an access key verification this will attempt to verify against any listed publisher.
keys = publishers_supported[keyname]["keys"]
logger.debug("Publisher based verification, will use {} public keys for verification.".format(keys))
return keys
def jws(self, keyname=None):
"""Assumes you loaded a payload. Return the same jws or raise a custom exception."""
key_material = self._get_public_key(keyname)
logger.debug(
"The key material for the payload was loaded for: {}".format(keyname), extra={"key_material": key_material}
)
if isinstance(key_material, list):
logger.debug("Multiple keys returned. Attempting match.")
for key in key_material:
try:
key.pop("x5t", None)
key.pop("x5c", None)
except AttributeError:
logger.warn("x5t and x5c attrs do not exist in key material.")
logger.debug("Attempting to match against: {}".format(key))
try:
sig = jws.verify(self.jws_signature, key, algorithms="RS256", verify=True)
logger.debug(
"Matched a verified signature for: {}".format(key), extra={"signature": self.jws_signature}
)
return sig
except JWSError as e:
logger.error(
"The signature was not valid for the payload.", extra={"signature": self.jws_signature}
)
logger.error(e)
raise JWSError("The signature could not be verified for any trusted key", key_material)
| mpl-2.0 | 41f5b14b1044c337dd5dde3addfa3cd4 | 40.285714 | 119 | 0.590056 | 4.082528 | false | true | false | false |
mozilla-iam/cis | python-modules/cis_logger/cis_logger/__init__.py | 1 | 1579 | import logging.handlers
from pythonjsonlogger import jsonlogger
import datetime
class JsonFormatter(jsonlogger.JsonFormatter, object):
def __init__(
self,
fmt="%(asctime) %(name) %(processName) %(filename) \
%(funcName) %(levelname) %(lineno) %(module) %(threadName) %(message)",
datefmt="%Y-%m-%dT%H:%M:%SZ%z",
style="%",
extra={},
*args,
**kwargs
):
self._extra = extra
jsonlogger.JsonFormatter.__init__(self, fmt=fmt, datefmt=datefmt, *args, **kwargs)
def process_log_record(self, log_record):
if "asctime" in log_record:
log_record["timestamp"] = log_record["asctime"]
else:
log_record["timestamp"] = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ%z")
if self._extra is not None:
for key, value in self._extra.items():
log_record[key] = value
return super(JsonFormatter, self).process_log_record(log_record)
class SysLogJsonHandler(logging.handlers.SysLogHandler, object):
def __init__(
self,
address=("localhost", logging.handlers.SYSLOG_UDP_PORT),
facility=logging.handlers.SysLogHandler.LOG_USER,
socktype=None,
prefix="",
):
super(SysLogJsonHandler, self).__init__(address, facility, socktype)
self._prefix = prefix
if self._prefix != "":
self._prefix = prefix + ": "
def format(self, record):
return self._prefix + super(SysLogJsonHandler, self).format(record)
| mpl-2.0 | d294b79b643857c42fe48606e33c9387 | 33.326087 | 118 | 0.59658 | 3.879607 | false | false | false | false |
ibm-watson-iot/iot-python | test/test_device_command.py | 2 | 1862 | # *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import testUtils
import wiotp.sdk
import pytest
class FakePahoMessageCommand:
topic = "iot-2/cmd/commandid/fmt/json"
payload = b'{"a":4}'
class FakeFakePahoMessageCommand:
topic = "hi"
payload = b'{"a":4}'
class TestDeviceCommand(testUtils.AbstractTest):
def testCommand(self):
pahoMessage = FakePahoMessageCommand()
messageEncoderModules = {"json": wiotp.sdk.JsonCodec()}
command = wiotp.sdk.device.Command(pahoMessage, messageEncoderModules)
assert command.format == "json"
assert command.commandId == "commandid"
assert "a" in command.data
assert command.data["a"] == 4
def testCommandMissingCodec(self):
with pytest.raises(wiotp.sdk.MissingMessageDecoderException) as e:
pahoMessage = FakePahoMessageCommand()
messageEncoderModules = {"fidaa": wiotp.sdk.JsonCodec()}
command = wiotp.sdk.device.Command(pahoMessage, messageEncoderModules)
assert e.value.format == "json"
def testInvalidCommandTopic(self):
with pytest.raises(wiotp.sdk.InvalidEventException) as e:
pahoMessage = FakeFakePahoMessageCommand()
messageEncoderModules = {"b": wiotp.sdk.JsonCodec()}
command = wiotp.sdk.device.Command(pahoMessage, messageEncoderModules)
assert e.value.reason == "Received command on invalid topic: hi"
| epl-1.0 | 4b63f4c3515c7ba1cf2ce07c25229730 | 37.791667 | 82 | 0.639635 | 4.039046 | false | true | false | false |
ibm-watson-iot/iot-python | src/wiotp/sdk/api/registry/devices.py | 2 | 15894 | # *****************************************************************************
# Copyright (c) 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import iso8601
from datetime import datetime
import json
from collections import defaultdict
from wiotp.sdk.api.common import IterableList
from wiotp.sdk.exceptions import ApiException
from wiotp.sdk.api.registry.diag import DeviceLogs, DeviceErrorCodes
class LogEntry(defaultdict):
def __init__(self, **kwargs):
if not set(["message", "timestamp"]).issubset(kwargs):
raise Exception("message and timestamp are required properties for a LogEntry")
kwargs["timestamp"] = iso8601.parse_date(kwargs["timestamp"])
dict.__init__(self, **kwargs)
@property
def message(self):
return self["message"]
@property
def timestamp(self):
return self["timestamp"]
class DeviceUid(defaultdict):
def __init__(self, **kwargs):
if not set(["deviceId", "typeId"]).issubset(kwargs):
raise Exception("typeId and deviceId are required properties to uniquely identify a device")
dict.__init__(self, **kwargs)
@property
def typeId(self):
return self["typeId"]
@property
def deviceId(self):
return self["deviceId"]
def __str__(self):
return self["typeId"] + ":" + self["deviceId"]
def __repr__(self):
return json.dumps(self, sort_keys=True, indent=2)
class DeviceCreateRequest(defaultdict):
def __init__(self, typeId, deviceId, authToken=None, deviceInfo=None, location=None, metadata=None):
dict.__init__(
self,
typeId=typeId,
deviceId=deviceId,
authToken=authToken,
deviceInfo=deviceInfo,
location=location,
metadata=metadata,
)
@property
def typeId(self):
return self["typeId"]
@property
def deviceId(self):
return self["deviceId"]
@property
def authToken(self):
return self["authToken"]
@property
def deviceInfo(self):
return DeviceInfo(**self["deviceInfo"])
@property
def location(self):
return self["location"]
@property
def metadata(self):
return self["metadata"]
class DeviceLocation(defaultdict):
def __init__(self, **kwargs):
if not set(["latitude", "longitude"]).issubset(kwargs):
raise Exception("Data passed to Device is not correct: %s" % (json.dumps(kwargs, sort_keys=True)))
if "measuredDateTime" in kwargs and not isinstance(kwargs["measuredDateTime"], datetime):
kwargs["measuredDateTime"] = iso8601.parse_date(kwargs["measuredDateTime"])
if "updatedDateTime" in kwargs and not isinstance(kwargs["updatedDateTime"], datetime):
kwargs["updatedDateTime"] = iso8601.parse_date(kwargs["updatedDateTime"])
dict.__init__(self, **kwargs)
@property
def latitude(self):
return self["latitude"]
@property
def longitude(self):
return self["longitude"]
@property
def measuredDateTime(self):
return self.get("measuredDateTime", None)
@property
def updatedDateTime(self):
return self.get("updatedDateTime", None)
class DeviceCreateResponse(defaultdict):
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@property
def typeId(self):
return self["typeId"]
@property
def deviceId(self):
return self["deviceId"]
@property
def success(self):
return self.get("success", None)
@property
def authToken(self):
return self["authToken"]
class DeviceInfo(defaultdict):
def __init__(
self,
description=None,
deviceClass=None,
fwVersion=None,
hwVersion=None,
manufacturer=None,
model=None,
serialNumber=None,
descriptiveLocation=None,
):
dict.__init__(
self,
description=description,
deviceClass=deviceClass,
fwVersion=fwVersion,
hwVersion=hwVersion,
manufacturer=manufacturer,
model=model,
serialNumber=serialNumber,
descriptiveLocation=descriptiveLocation,
)
@property
def description(self):
return self["description"]
@property
def deviceClass(self):
return self["deviceClass"]
@property
def fwVersion(self):
return self["fwVersion"]
@property
def hwVersion(self):
return self["hwVersion"]
@property
def manufacturer(self):
return self["manufacturer"]
@property
def model(self):
return self["model"]
@property
def serialNumber(self):
return self["serialNumber"]
@property
def descriptiveLocation(self):
return self["descriptiveLocation"]
class Device(defaultdict):
def __init__(self, apiClient, **kwargs):
self._apiClient = apiClient
if not set(["clientId", "deviceId", "typeId"]).issubset(kwargs):
raise Exception("Data passed to Device is not correct: %s" % (json.dumps(kwargs, sort_keys=True)))
self.diagLogs = DeviceLogs(self._apiClient, kwargs["typeId"], kwargs["deviceId"])
self.diagErrorCodes = DeviceErrorCodes(self._apiClient, kwargs["typeId"], kwargs["deviceId"])
dict.__init__(self, **kwargs)
# {u'clientId': u'xxxxxxxxx',
# u'deviceId': u'xxxxxxx',
# u'deviceInfo': {u'description': u'None (xxxxxxxx)',
# u'deviceClass': u'None',
# u'fwVersion': u'xxxxx',
# u'hwVersion': u'xxxxx',
# u'manufacturer': u'xxxx.',
# u'model': u'xxxx',
# u'serialNumber': u'xxxxxxxxx'},
# u'metadata': {},
# u'refs': {u'diag': {u'errorCodes': u'/api/v0002/device/types/xxx/devices/xxxx/diag/errorCodes',
# u'logs': u'/api/v0002/device/types/xxx/devices/xxxx/diag/logs'},
# u'location': u'/api/v0002/device/types/xxxx/devices/xxxx/location',
# u'mgmt': u'/api/v0002/device/types/xx/devices/xxxx/mgmt'},
# u'registration': {u'auth': {u'id': u'xxxxxx',
# u'type': u'person'},
# u'date': u'2015-09-18T06:44:02.000Z'},
# u'status': {u'alert': {u'enabled': False,
# u'timestamp': u'2016-01-21T02:25:55.543Z'}},
# u'typeId': u'vm'}
@property
def clientId(self):
return self["clientId"]
@property
def deviceId(self):
return self["deviceId"]
@property
def authToken(self):
if "authToken" in self:
return self["authToken"]
else:
return None
@property
def metadata(self):
if "metadata" in self:
return self["metadata"]
else:
return None
@property
def total_rows(self):
return self["total_rows"]
@property
def deviceInfo(self):
# Unpack the deviceInfo dictionary into keyword arguments so that we
# can return a DeviceInfo object instead of a plain dictionary
return DeviceInfo(**self["deviceInfo"])
@property
def typeId(self):
return self["typeId"]
def __str__(self):
return "[%s] %s" % (self.clientId, self.deviceInfo.description or "<No description>")
def __repr__(self):
return json.dumps(self, sort_keys=True, indent=2)
def json(self):
return dict(self)
# Extended properties
def getMgmt(self):
r = self._apiClient.get("api/v0002/device/types/%s/devices/%s/mgmt" % (self.typeId, self.deviceId))
if r.status_code == 200:
return r.json()
if r.status_code == 404:
# It's perfectly valid for a device to not have a location set, if this is the case, set response to None
return None
else:
raise ApiException(r)
def getLocation(self):
r = self._apiClient.get("api/v0002/device/types/%s/devices/%s/location" % (self.typeId, self.deviceId))
if r.status_code == 200:
return DeviceLocation(**r.json())
if r.status_code == 404:
# It's perfectly valid for a device to not have a location set, if this is the case, set response to None
return None
else:
raise ApiException(r)
def setLocation(self, value):
r = self._apiClient.put("api/v0002/device/types/%s/devices/%s/location" % (self.typeId, self.deviceId), value)
if r.status_code == 200:
return DeviceLocation(**r.json())
else:
raise ApiException(r)
def getConnectionLogs(self):
r = self._apiClient.get(
"api/v0002/logs/connection", parameters={"typeId": self.typeId, "deviceId": self.deviceId}
)
if r.status_code == 200:
responseList = []
for entry in r.json():
responseList.append(LogEntry(**entry))
return responseList
else:
raise ApiException(r)
class IterableDeviceList(IterableList):
def __init__(self, apiClient, typeId=None):
if typeId is None:
super(IterableDeviceList, self).__init__(apiClient, Device, "api/v0002/bulk/devices", "typeId,deviceId")
else:
super(IterableDeviceList, self).__init__(
apiClient, Device, "api/v0002/device/types/%s/devices/" % (typeId), "deviceId"
)
class Devices(defaultdict):
"""
Use the global unique identifier of a device, it's `clientId` to address devices.
# Delete
```python
del devices["d:orgId:typeId:deviceId"]
```
# Get
Use the global unique identifier of a device, it's `clientId`.
```python
device = devices["d:orgId:typeId:deviceId"]
print(device.clientId)
print(device)
# Is a device registered?
```python
if "d:orgId:typeId:deviceId" in devices:
print("The device exists")
```
# Iterate through all registered devices
```python
for device in devices:
print(device)
```
"""
# https://docs.python.org/2/library/collections.html#defaultdict-objects
def __init__(self, apiClient, typeId=None):
self._apiClient = apiClient
self.typeId = typeId
def __contains__(self, key):
"""
Does a device exist?
"""
if self.typeId is None:
(classIdentifier, orgId, typeId, deviceId) = key.split(":")
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (typeId, deviceId)
else:
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (self.typeId, key)
r = self._apiClient.get(deviceUrl)
if r.status_code == 200:
return True
elif r.status_code == 404:
return False
else:
raise ApiException(r)
def __getitem__(self, key):
"""
Get a device from the registry
"""
if self.typeId is None:
(classIdentifier, orgId, typeId, deviceId) = key.split(":")
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (typeId, deviceId)
else:
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (self.typeId, key)
r = self._apiClient.get(deviceUrl)
if r.status_code == 200:
return Device(apiClient=self._apiClient, **r.json())
elif r.status_code == 404:
self.__missing__(key)
else:
raise ApiException(r)
def __setitem__(self, key, value):
"""
Register a new device - not currently supported via this interface, use: `registry.devices.create()`
"""
raise Exception("Unable to register or update a device via this interface at the moment.")
def __delitem__(self, key):
"""
Delete a device
"""
if self.typeId is None:
(classIdentifier, orgId, typeId, deviceId) = key.split(":")
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (typeId, deviceId)
else:
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (self.typeId, key)
r = self._apiClient.delete(deviceUrl)
if r.status_code == 404:
self.__missing__(key)
elif r.status_code != 204:
raise ApiException(r)
def __missing__(self, key):
"""
Device does not exist
"""
raise KeyError("Device %s does not exist" % (key))
def __iter__(self, *args, **kwargs):
"""
Iterate through all devices
"""
return IterableDeviceList(self._apiClient, self.typeId)
@property
def total_rows(self):
"""
Returns total devices
"""
return self["total_rows"]
def create(self, devices):
"""
Register one or more new devices, each request can contain a maximum of 512KB.
The response body will contain the generated authentication tokens for all devices.
You must make sure to record these tokens when processing the response.
We are not able to retrieve lost authentication tokens
It accepts accepts a list of devices (List of Dictionary of Devices), or a single device
If you provide a list as the parameter it will return a list in response
If you provide a singular device it will return a singular response
"""
if not isinstance(devices, list):
listOfDevices = [devices]
returnAsAList = False
else:
listOfDevices = devices
returnAsAList = True
r = self._apiClient.post("api/v0002/bulk/devices/add", listOfDevices)
if r.status_code in [201, 202]:
if returnAsAList:
responseList = []
for entry in r.json():
responseList.append(DeviceCreateResponse(**entry))
return responseList
else:
return DeviceCreateResponse(**r.json()[0])
else:
raise ApiException(r)
def update(self, deviceUid, metadata=None, deviceInfo=None, status=None):
"""
Update an existing device
"""
if not isinstance(deviceUid, DeviceUid) and isinstance(deviceUid, dict):
deviceUid = DeviceUid(**deviceUid)
deviceUrl = "api/v0002/device/types/%s/devices/%s" % (deviceUid.typeId, deviceUid.deviceId)
data = {"status": status, "deviceInfo": deviceInfo, "metadata": metadata}
r = self._apiClient.put(deviceUrl, data)
if r.status_code == 200:
return Device(apiClient=self._apiClient, **r.json())
else:
raise ApiException(r)
def delete(self, devices):
"""
Delete one or more devices, each request can contain a maximum of 512Kb
It accepts accepts a list of devices (List of Dictionary of Devices)
In case of failure it throws APIException
"""
if not isinstance(devices, list):
listOfDevices = [devices]
else:
listOfDevices = devices
r = self._apiClient.post("api/v0002/bulk/devices/remove", listOfDevices)
if r.status_code in [200, 202]:
return r.json()
else:
raise ApiException(r)
| epl-1.0 | 2ee6e0ceefe91a835b0e38b3a5447d3d | 29.624277 | 118 | 0.577702 | 4.125097 | false | false | false | false |
ibm-watson-iot/iot-python | src/wiotp/sdk/api/state/state.py | 2 | 2752 | # *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from collections import defaultdict
import iso8601
from wiotp.sdk.exceptions import ApiException
from wiotp.sdk.api.common import IterableList
from wiotp.sdk.api.common import RestApiDict
from wiotp.sdk.api.common import RestApiItemBase
from wiotp.sdk.api.common import RestApiDictReadOnly
# See docs @ https://orgid.internetofthings.ibmcloud.com/docs/v0002-beta/State-mgr-beta.html
class State(defaultdict):
def __init__(self, apiClient, url, **kwargs):
self._apiClient = apiClient
self._url = url
dict.__init__(self, **kwargs)
@property
def state(self):
return self["state"]
@property
def timestamp(self):
return iso8601.parse_date(self["timestamp"])
@property
def updated(self):
return iso8601.parse_date(self["updated"])
def __callPatchOperation__(self, body):
r = self._apiClient.patch(self._url, body)
if r.status_code == 200:
return r.json()
else:
raise Exception("Unexpected response from API (%s) = %s %s" % (self._url, r.status_code, r.text))
def reset(self):
return self.__callPatchOperation__({"operation": "reset-state"})
class States(RestApiDictReadOnly):
def __init__(self, apiClient, typeId, instanceId):
url = "api/v0002/device/types/%s/devices/%s/state" % (typeId, instanceId)
super(States, self).__init__(apiClient, State, None, url)
# TBD this method overrides the base class method to pass the state URL to the constructed state
# without this, we can't invoke reset-state api call.
def __getitem__(self, key):
url = self._singleItemUrl % (key)
r = self._apiClient.get(url)
if r.status_code == 200:
return self._castToClass(apiClient=self._apiClient, url=url, **r.json())
if r.status_code == 404:
self.__missing__(key)
else:
raise ApiException(r)
# override the standard iterator as there is no api to get all state itetrating over LIs
def __iter__(self, *args, **kwargs):
raise Exception("Unable to iterate through device state. Retrieve it for a specific LI.")
def find(self, query_params={}):
raise Exception("Unable to find device state. Retrieve it for a specific LI.")
| epl-1.0 | 58605d9eeb71bac265a21eb036021baf | 36.69863 | 109 | 0.628634 | 3.97114 | false | false | false | false |
ibm-watson-iot/iot-python | src/wiotp/sdk/api/usage/__init__.py | 2 | 2241 | # *****************************************************************************
# Copyright (c) 2018 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from datetime import datetime
from collections import defaultdict
from wiotp.sdk.exceptions import ApiException
class DataTransferSummary(defaultdict):
def __init__(self, **kwargs):
daysAsObj = []
if "days" in kwargs and kwargs["days"] is not None:
for day in kwargs["days"]:
daysAsObj.append(DayDataTransfer(**day))
del kwargs["days"]
dict.__init__(self, days=daysAsObj, **kwargs)
@property
def start(self):
return datetime.strptime(self["start"], "%Y-%m-%d").date()
@property
def end(self):
return datetime.strptime(self["end"], "%Y-%m-%d").date()
@property
def average(self):
return self["average"]
@property
def total(self):
return self["total"]
@property
def days(self):
return self["days"]
class DayDataTransfer(defaultdict):
def __init__(self, **kwargs):
dict.__init__(self, **kwargs)
@property
def date(self):
return datetime.strptime(self["date"], "%Y-%m-%d").date()
@property
def total(self):
return self["total"]
class Usage:
def __init__(self, apiClient):
self._apiClient = apiClient
def dataTransfer(self, start, end, detail=False):
"""
Retrieve the organization-specific status of each of the services offered by the IBM Watson IoT Platform.
In case of failure it throws APIException
"""
r = self._apiClient.get(
"api/v0002/usage/data-traffic?start=%s&end=%s&detail=%s"
% (start.strftime("%Y-%m-%d"), end.strftime("%Y-%m-%d"), detail)
)
if r.status_code == 200:
return DataTransferSummary(**r.json())
else:
raise ApiException(r)
| epl-1.0 | a70bd166a2e5bef336742ee9905bf3e0 | 28.486842 | 113 | 0.57162 | 4.18097 | false | false | false | false |
mbj4668/pyang | pyang/repository.py | 1 | 5853 | """A repository for searching and holding loaded pyang modules"""
import os
import sys
import io
from . import util
from . import syntax
class Repository(object):
"""Abstract base class that represents a module repository"""
def get_modules_and_revisions(self, ctx):
"""Return a list of all modules and their revisons
Returns a tuple (`modulename`, `revision`, `handle`), where
`handle' is used in the call to get_module_from_handle() to
retrieve the module.
"""
def get_module_from_handle(self, handle):
"""Return the raw module text from the repository
Returns (`ref`, `in_format`, `text`) if found, or None if not found.
`ref` is a string which is used to identify the source of
the text for the user. used in error messages
`in_format` is one of 'yang' or 'yin' or None.
`text` is the raw text data
Raises `ReadError`
"""
class ReadError(Exception):
"""Signals that an error occured during module retrieval"""
class FileRepository(Repository):
def __init__(self, path="", use_env=True, no_path_recurse=False,
verbose=False):
"""Create a Repository which searches the filesystem for modules
`path` is a `os.pathsep`-separated string of directories
"""
Repository.__init__(self)
self.dirs = []
self.no_path_recurse = no_path_recurse
self.modules = None
self.verbose = verbose
for directory in path.split(os.pathsep):
self._add_directory(directory)
while use_env:
use_env = False
modpath = os.getenv('YANG_MODPATH')
if modpath is not None:
for directory in modpath.split(os.pathsep):
self._add_directory(directory)
home = os.getenv('HOME')
if home is not None:
self._add_directory(os.path.join(home, 'yang', 'modules'))
inst = os.getenv('YANG_INSTALL')
if inst is not None:
self._add_directory(os.path.join(inst, 'yang', 'modules'))
break # skip search if install location is indicated
default_install = os.path.join(
sys.prefix, 'share', 'yang', 'modules')
if os.path.exists(default_install):
self._add_directory(default_install)
break # end search if default location exists
# for some systems, sys.prefix returns `/usr`
# but the real location is `/usr/local`
# if the package is installed with pip
# this information can be easily retrieved
import pkgutil
if not pkgutil.find_loader('pip'):
break # abort search if pip is not installed
# hack below to handle pip 10 internals
# if someone knows pip and how to fix this, it would be great!
location = None
try:
import pip.locations as locations
location = locations.distutils_scheme('pyang')
except:
try:
import pip._internal.locations as locations
location = locations.distutils_scheme('pyang')
except:
pass
if location is not None:
self._add_directory(
os.path.join(location['data'], 'share', 'yang', 'modules'))
if verbose:
sys.stderr.write('# module search path: %s\n'
% os.pathsep.join(self.dirs))
def _add_directory(self, directory):
if (not directory
or directory in self.dirs
or not os.path.isdir(directory)):
return False
self.dirs.append(directory)
return True
def _setup(self, ctx):
# check all dirs for yang and yin files
self.modules = []
def add_files_from_dir(d):
try:
files = os.listdir(d)
except OSError:
files = []
for fname in files:
absfilename = os.path.join(d, fname)
if os.path.isfile(absfilename):
m = syntax.re_filename.search(fname)
if m is not None:
name, rev, in_format = m.groups()
if not os.access(absfilename, os.R_OK):
continue
if absfilename.startswith("./"):
absfilename = absfilename[2:]
handle = in_format, absfilename
self.modules.append((name, rev, handle))
elif (not self.no_path_recurse
and d != '.' and os.path.isdir(absfilename)):
add_files_from_dir(absfilename)
for d in self.dirs:
add_files_from_dir(d)
def get_modules_and_revisions(self, ctx):
if self.modules is None:
self._setup(ctx)
return self.modules
def get_module_from_handle(self, handle):
in_format, absfilename = handle
fd = None
try:
fd = io.open(absfilename, "r", encoding="utf-8")
text = fd.read()
if self.verbose:
util.report_file_read(absfilename)
except IOError as ex:
raise self.ReadError("%s: %s" % (absfilename, ex))
except UnicodeDecodeError as ex:
s = str(ex).replace('utf-8', 'utf8')
raise self.ReadError("%s: unicode error: %s" % (absfilename, s))
finally:
if fd is not None:
fd.close()
if in_format is None:
in_format = util.guess_format(text)
return absfilename, in_format, text
| isc | 17e8f7f53c9b7c93601f109e9dc07617 | 35.12963 | 79 | 0.537502 | 4.491942 | false | false | false | false |
mbj4668/pyang | test/test_issues/test_i225/test_prefix_deviation.py | 1 | 3107 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=redefined-outer-name
"""
tests for PYANG data files
"""
import os
import sys
# hack to handle pip 10 internals
try:
import pip.locations as locations
except ImportError:
import pip._internal.locations as locations
from pyang.context import Context
from pyang.repository import FileRepository
EXISTING_MODULE = 'ietf-yang-types'
DEFAULT_OPTIONS = {
'format': 'yang',
'verbose': True,
'list_errors': True,
'print_error_code': True,
'yang_remove_unused_imports': True,
'yang_canonical': True,
'trim_yin': False,
'keep_comments': True,
'features': [],
'deviations': [],
'path': []
}
"""Default options for pyang command line"""
class objectify(object):
"""Utility for providing object access syntax (.attr) to dicts"""
def __init__(self, *args, **kwargs):
for entry in args:
self.__dict__.update(entry)
self.__dict__.update(kwargs)
def __getattr__(self, _):
return None
def __setattr__(self, attr, value):
self.__dict__[attr] = value
def create_context(path='.', *options, **kwargs):
"""Generates a pyang context
Arguments:
path (str): location of YANG modules.
*options: list of dicts, with options to be passed to context.
**kwargs: similar to ``options`` but have a higher precedence.
Returns:
pyang.Context: Context object for ``pyang`` usage
"""
opts = objectify(DEFAULT_OPTIONS, *options, **kwargs)
repo = FileRepository(path, no_path_recurse=opts.no_path_recurse)
ctx = Context(repo)
ctx.opts = opts
return ctx
def test_can_find_modules_with_pip_install():
"""
context should find the default installed modules even when pyang
is installed using pip
"""
# remove obfuscation from env vars
if os.environ.get('YANG_INSTALL'):
del os.environ['YANG_INSTALL']
if os.environ.get('YANG_MODPATH'):
del os.environ['YANG_MODPATH']
ctx = create_context()
module = ctx.search_module(None, EXISTING_MODULE)
assert module is not None
def test_can_find_modules_when_prefix_differ(monkeypatch):
"""
context should find the default installed modules, without the help
of environment variables, even of the pip install location
differs from ``sys.prefix``
"""
# store pip location.
# monkeypatching sys.prefix will side_effect scheme.
try:
scheme = locations.distutils_scheme('pyang')
monkeypatch.setattr(
locations, 'distutils_scheme', lambda *_: scheme)
except:
print("cannot get scheme from pip, skipping")
return
# simulate #225 description
monkeypatch.setattr(sys, 'prefix', '/usr')
# remove obfuscation from env vars
if os.environ.get('YANG_INSTALL'):
del os.environ['YANG_INSTALL']
if os.environ.get('YANG_MODPATH'):
del os.environ['YANG_MODPATH']
ctx = create_context()
module = ctx.search_module(None, EXISTING_MODULE)
assert module is not None
| isc | 48b0babf28f81699b5b6a833358dc176 | 24.891667 | 71 | 0.641455 | 3.86924 | false | false | false | false |
mbj4668/pyang | pyang/plugins/omni.py | 1 | 11901 |
import optparse
from pyang import plugin
paths_in_module = []
leafrefs = []
key = ''
class_keywords = ["container", "list", "case", "choice", "augment"]
servicepoints = ["servicepoint", "productpoint"]
classnamecolor = " {0.113725, 0.352941, 0.670588}"
mandatoryconfig = " {0.600000, 0.152941, 0.152941}"
optionalconfig = " {0.129412, 0.501961, 0.254902}"
notconfig = " {0.549020, 0.486275, 0.133333}"
#which line for containment, omnigraffles makes some bezier, override this
containsline = " tail type: \"FilledDiamond\", head type: \"None\", line type: \"Straight\" "
leafrefline = " line type: \"Straight\", head type: \"FilledArrow\" "
def pyang_plugin_init():
plugin.register_plugin(OmniPlugin())
class OmniPlugin(plugin.PyangPlugin):
def add_output_format(self, fmts):
self.multiple_modules = True
fmts['omni'] = self
def add_opts(self, optparser):
optlist = [
optparse.make_option("--omni-path",
dest="omni_tree_path",
help="Subtree to print"),
]
g = optparser.add_option_group("OmniGraffle output specific options")
g.add_options(optlist)
def setup_fmt(self, ctx):
ctx.implicit_errors = False
def emit(self, ctx, modules, fd):
if ctx.opts.omni_tree_path is not None:
path = ctx.opts.omni_tree_path.split('/')
if path[0] == '':
path = path[1:]
else:
path = None
print_omni_header(modules, fd, path, ctx)
emit_modules(modules, fd, path, ctx)
post_process(fd, ctx)
print_omni_footer(modules, fd, path, ctx)
def print_omni_header(modules, fd, path, ctx):
# Build doc name from module names
name = ''
for m in modules:
name += m.arg
name = name[:32]
fd.write("""
tell application id "com.omnigroup.OmniGraffle6"
activate
make new document with properties {name:\"%s\"}
set bounds of window 1 to {50, 50, 1200, 800}
tell first canvas of document \"%s\"
set canvasSize to {600, 600}
set name to \"YANG Model\"
set adjusts pages to true
make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {32.000000, 20.000000}, text: {size: 8, alignment: center, font: "HelveticaNeue", text: "leafref"}, origin: {2403.202333, 169.219094}}
make new line at end of graphics with properties {point list: {{2513.245592418806, 185.5962102698529}, {2373.745592418806, 185.3149602698529}}, draws shadow: false, head type: "FilledArrow"}
make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {105.000000, 20.000000}, text: {size: 8, alignment: center, font: "HelveticaNeue", text: "Schema tree, containment"}, origin: {2397.741930, 138.863190}}
make new line at end of graphics with properties {point list: {{2374.993645107464, 154.4881903780727}, {2514.493645107464, 154.4881903780727}}, draws shadow: false, tail type: "FilledDiamond"}
make new shape at end of graphics with properties {autosizing: vertically only, size: {139.500000, 14.000000}, text: {alignment: center, font: "Helvetica-Bold", text: "Legend"}, text placement: top, origin: {2366.929155, 43.937008}, vertical padding: 0}
make new shape at end of graphics with properties {autosizing: vertically only, size: {139.500000, 56.000000}, text: {{color: {0.600000, 0.152941, 0.152941}, text: "Mandatory config
"}, {color: {0.129412, 0.501961, 0.254902}, text: "Optional config
"}, {color: {0.129412, 0.501961, 0.254902}, text: "Key leaf", underlined: true}, {color: {0.129412, 0.501961, 0.254902}, text: "
"}, {color: {0.549020, 0.486275, 0.133333}, text: "Not config"}}, text placement: top, origin: {2366.929155, 57.937008}, vertical padding: 0}
assemble graphics -2 through -1 table shape { 2, 1 }
assemble graphics -5 through -1
""" %(name, name))
def post_process(fd, ctx):
for s in leafrefs:
# dont try to connect to class not given as input to pyang
if s.strip().split(" to ")[1].split(" with ")[0] in paths_in_module:
fd.write(s)
def print_omni_footer(modules, fd, path, ctx):
fd.write("""
layout
end tell
end tell
""")
def print_module_info(module, fd, ctx):
title = module.arg
print_text(title, fd, ctx)
def emit_modules(modules, fd, path, ctx):
for module in modules:
print_module_info(module, fd, ctx)
chs = [ch for ch in module.i_children]
if path is not None and len(path) > 0:
chs = [ch for ch in chs
if ch.arg == path[0]]
path = path[1:]
# TEST
for ch in chs:
print_node(module, ch, module, fd, path, ctx, 'true')
for augment in module.search('augment'):
print_node(module, augment, module, fd, path, ctx, 'true')
def iterate_children(parent, s, module, fd, path, ctx):
if hasattr(s, 'i_children'):
for ch in s.i_children:
print_node(s, ch, module, fd, path, ctx)
def print_class_header(s, fd, ctx, root='false'):
global servicepoints
service = ""
for sub in s.substmts:
if sub.keyword[1] in servicepoints:
service = "SERVICE\n"
fd.write("make new shape at end of graphics with properties {autosizing: full, size: {187.500000, 14.000000}, text: {{alignment: center, font: \"Helvetica-Bold\", text: \"%s \"}, {alignment: center, color:%s, font: \"Helvetica-Bold\", text: \"%s \"}}, text placement: top, origin: {150.000000, 11.500000}, vertical padding: 0}\n" %(service + s.keyword, classnamecolor, s.arg))
def print_class_stuff(s, fd, ctx):
number = print_attributes(s, fd, ctx)
#print_actions(s,fd, ctx)
close_class(number, s, fd, ctx)
print_associations(s,fd, ctx)
def print_attributes(s,fd, ctx):
global key
if s.keyword == 'list':
keystring = s.search_one('key')
if keystring is not None:
key = keystring.arg.split(" ")
else:
key = ''
if hasattr(s, 'i_children'):
found_attrs = False
found_actions = False
index = False
# Search attrs
for ch in s.i_children:
index = False
if ch.keyword in ["leaf", "leaf-list"]:
if not found_attrs:
# first attr in attr section
fd.write("make new shape at end of graphics with properties {autosizing:full, size:{187.5, 28.0}, text:{")
found_attrs = True
else:
# comma before new textitem
fd.write(", ")
if ch.keyword == "leaf-list":
append = "[]"
else:
append = ""
if ch.arg in key:
index = True
print_leaf(ch, append, index, fd, ctx)
if found_attrs:
# close attr section
fd.write("}, text placement:top, origin:{150.0, 25.5}, vertical padding:0}\n")
# Search actions
for ch in s.i_children:
if ch.keyword == ('tailf-common', 'action'):
if not found_actions:
fd.write("make new shape at end of graphics with properties {autosizing:full, size:{187.5, 28.0}, text:{text:\"")
found_actions = True
print_action(ch, fd, ctx)
if found_actions:
fd.write("\"}, text placement:top, origin:{150.0, 25.5}, vertical padding:0}\n")
# return number of sections in class
return (found_attrs + found_actions) + 1
def close_class(number, s, fd, ctx):
fd.write("local %s\n" % fullpath(s))
fd.write("set %s to assemble ( graphics -%s through -1 ) table shape {%s, 1}\n"
% (fullpath(s), number, number))
def print_node(parent, s, module, fd, path, ctx, root='false'):
# We have a class
if s.keyword in class_keywords:
print_class_header(s, fd, ctx, root)
paths_in_module.append(fullpath(s))
print_class_stuff(s, fd, ctx)
# Do not try to create relationship to module
if parent != module:
presence = s.search_one("presence")
if presence is not None:
print_aggregation(parent, s, fd, "0", "1", ctx)
else:
print_aggregation(parent, s, fd, "1", "1", ctx)
iterate_children(parent, s, module, fd, path, ctx)
def print_associations(s, fd, ctx):
# find leafrefs and identityrefs
if hasattr(s, 'i_children'):
for ch in s.i_children:
if hasattr(ch, 'i_leafref_ptr') and (ch.i_leafref_ptr is not None):
to = ch.i_leafref_ptr[0]
print_association(s, to.parent, ch, to, "leafref", fd, ctx)
def print_aggregation(parent, this, fd, lower, upper, ctx):
fd.write("connect %s to %s with properties {%s} \n" %(fullpath(parent),fullpath(this), containsline))
def print_rpc(rpc, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s\' " %(fullpath(rpc), rpc.arg))
def print_action(action, fd, ctx, root='false'):
fd.write("%s()\n" %action.arg)
def print_notification(notification, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s\' " %(fullpath(notification), notification.arg))
def print_inout(parent, s, fd, ctx, root='false'):
fd.write("<UML:Class xmi.id = \'%s\' name = \'%s-%s\' " %(fullpath(s), parent.arg, s.keyword))
def print_leaf(leaf, append, index, fd, ctx):
if leaf.i_config:
c = '(rw)'
color = optionalconfig
else:
c = '(ro)'
color = notconfig
m = leaf.search_one('mandatory')
if m is None or m.arg == 'false':
mand = '?'
else:
mand = ''
color = mandatoryconfig
if not index:
fd.write("{font: \"Helvetica-Oblique\", color: %s, text: \"%s%s%s %s %s\n\"}"
% (color, leaf.arg, append, mand, c, get_typename(leaf)))
else:
fd.write("{font: \"Helvetica-Oblique\", color: %s, underlined: true, text: \"%s%s%s %s %s\n\"}"
% (color, leaf.arg, append, mand, c, get_typename(leaf)))
def print_association(fromclass, toclass, fromleaf, toleaf, association, fd, ctx):
leafrefs.append("connect " + (fullpath(fromclass)) + " to " + fullpath(toclass) + " with properties {" + leafrefline + "}\n", )
def print_text(t, fd, ctx):
fd.write("make new shape at end of graphics with properties {fill: no fill, draws stroke: false, draws shadow: false, autosizing: full, size: {57.000000, 30.000000}, text: {size: 16, alignment: center, font: \"HelveticaNeue\", text: \"%s\"}, origin: {100, 4.500000}}\n" %t)
def get_typename(s):
t = s.search_one('type')
if t is not None:
s = t.arg
# if t.arg == 'enumeration':
# s = s + ' : {'
# for enums in t.substmts[:10]:
# s = s + enums.arg + ','
# if len(t.substmts) > 3:
# s = s + "..."
# s = s + '}'
# elif t.arg == 'leafref':
# s = s + ' : '
# p = t.search_one('path')
# if p is not None:
# s = s + p.arg
return s
def fullpath(stmt):
pathsep = "_"
path = stmt.arg
# for augment paths we need to remove initial /
if path.startswith("/"):
path = path[1:]
else:
if stmt.keyword == 'case':
path = path + '-case'
elif stmt.keyword == 'grouping':
path = path + '-grouping'
while stmt.parent is not None:
stmt = stmt.parent
if stmt.arg is not None:
path = stmt.arg + pathsep + path
path = path.replace('-', '_')
path = path.replace(':', '_')
path = path.replace('/', '_')
return path
| isc | be68baeefe1ad69a4479aae3892de1f6 | 36.780952 | 380 | 0.580035 | 3.356176 | false | false | false | false |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
- Downloads last month
- 4