id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
100 | #/*##########################################################################
# Copyright (C) 2004-2014 V.A. Sole, European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
"""This plugin allows to configure and execute a batch fitting for all
spectra in the stack.
The user can select the fit function and a background function from a
selection of functions, and must provide the initial estimation for
the iterative fit.
The fit result is saved to file, at the end. A 2D map is created for each
fitted parameter, and saved in EDF and ASCII formats."""
__author__ = "V.A. Sole - ESRF Data Analysis"
__contact__ = "sole@esrf.fr"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
import logging
_logger = logging.getLogger(__name__)
try:
from PyMca5 import StackPluginBase
from PyMca5.PyMcaGui import StackSimpleFitWindow
from PyMca5.PyMcaGui import PyMca_Icons
except ImportError:
_logger.warning("FitStackPlugin importing from somewhere else")
class FitStackPlugin(StackPluginBase.StackPluginBase):
def __init__(self, stackWindow, **kw):
if _logger.getEffectiveLevel() == logging.DEBUG:
StackPluginBase.pluginBaseLogger.setLevel(logging.DEBUG)
StackPluginBase.StackPluginBase.__init__(self, stackWindow, **kw)
self.methodDict = {}
function = self.METHOD_NAME
info = "Fit stack with user defined functions"
icon = PyMca_Icons.fit
self.methodDict["Fit Stack"] =[function,
info,
icon]
self.__methodKeys = ["Fit Stack"]
self.simpleFitWindow = None
def stackUpdated(self):
if self.simpleFitWindow is None:
return
self.__updateOwnData()
def selectionMaskUpdated(self):
if self.simpleFitWindow is None:
return
self.simpleFitWindow.setMask(self.getStackSelectionMask())
def stackClosed(self):
if self.simpleFitWindow is not None:
self.simpleFitWindow.close()
#Methods implemented by the plugin
def getMethods(self):
return self.__methodKeys
def getMethodToolTip(self, name):
return self.methodDict[name][1]
def getMethodPixmap(self, name):
return self.methodDict[name][2]
def applyMethod(self, name):
return self.methodDict[name][0]()
def __updateOwnData(self):
activeCurve = self.getActiveCurve()
if activeCurve is None:
return
#this can be problematic if a fit is going on...
x, spectrum, legend, info = activeCurve
xlabel = info['xlabel']
ylabel = info['ylabel']
xmin, xmax = self.getGraphXLimits()
ymin, ymax = self.getGraphYLimits()
mcaIndex = self.getStackInfo()['McaIndex']
self.simpleFitWindow.setSpectrum(x,
spectrum,
xmin=xmin,
xmax=xmax)
self.simpleFitWindow.setData(x,
self.getStackData(),
data_index=mcaIndex,
mask=self.getStackSelectionMask())
def METHOD_NAME(self):
if self.simpleFitWindow is None:
self.simpleFitWindow = StackSimpleFitWindow.StackSimpleFitWindow()
self.__updateOwnData()
self.simpleFitWindow.show()
MENU_TEXT = "Stack Simple Fitting"
def getStackPluginInstance(stackWindow, **kw):
ob = FitStackPlugin(stackWindow)
return ob | null |
101 | ################################################################################
# Creme is a free/open-source Customer Relationship Management software
# Copyright (C) 2009-2023 Hybird
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from creme.creme_core.models import CREME_REPLACE_NULL, CremeEntity
from creme.creme_core.models.manager import CremeEntityManager
from ..constants import (
CREATION_LABELS,
NARROW,
REL_OBJ_ACTIVITY_SUBJECT,
REL_OBJ_LINKED_2_ACTIVITY,
REL_OBJ_PART_2_ACTIVITY,
)
from . import other_models
from .calendar import Calendar
class ActivityManager(CremeEntityManager):
linked_rtype_ids = (
REL_OBJ_PART_2_ACTIVITY,
REL_OBJ_ACTIVITY_SUBJECT,
REL_OBJ_LINKED_2_ACTIVITY,
)
def _linked(self, entity):
return self.filter(
is_deleted=False,
relations__object_entity=entity,
relations__type__in=self.linked_rtype_ids,
).distinct()
def _linked_to_organisation(self, orga):
return self.filter(
is_deleted=False,
relations__object_entity__in=[
orga,
*orga.get_managers().values_list('id', flat=True),
*orga.get_employees().values_list('id', flat=True),
],
relations__type__in=self.linked_rtype_ids,
).distinct()
def future_linked(self, entity, today):
# TODO: end greater than today or floating type equal to floating
return self._linked(entity).filter(end__gt=today).order_by('start')
def past_linked(self, entity, today):
return self._linked(entity).filter(end__lte=today)
def future_linked_to_organisation(self, orga, today):
return self._linked_to_organisation(orga).filter(end__gt=today).order_by('start')
def METHOD_NAME(self, orga, today):
return self._linked_to_organisation(orga).filter(end__lte=today)
class AbstractActivity(CremeEntity):
"""Activity : task, meeting, phone call, unavailability ..."""
title = models.CharField(_('Title'), max_length=100)
start = models.DateTimeField(_('Start'), blank=True, null=True)
end = models.DateTimeField(_('End'), blank=True, null=True)
minutes = models.TextField(_('Minutes'), blank=True)
place = models.CharField(
_('Activity place'), max_length=500, blank=True
).set_tags(optional=True)
duration = models.PositiveIntegerField(
verbose_name=_('Duration (in hour)'),
blank=True, null=True,
help_text=_('It is only informative and is not used to compute the end time.'),
)
type = models.ForeignKey(
other_models.ActivityType, verbose_name=_('Activity type'), on_delete=models.PROTECT,
)
sub_type = models.ForeignKey(
other_models.ActivitySubType, verbose_name=_('Activity sub-type'),
on_delete=models.PROTECT,
)
status = models.ForeignKey(
other_models.Status, verbose_name=_('Status'),
blank=True, null=True, on_delete=CREME_REPLACE_NULL,
)
calendars = models.ManyToManyField(Calendar, verbose_name=_('Calendars'), editable=False)
is_all_day = models.BooleanField(_('All day?'), default=False)
busy = models.BooleanField(_('Busy?'), default=False)
# TODO: use choices ?
floating_type = models.PositiveIntegerField(
_('Floating type'), default=NARROW, editable=False,
).set_tags(viewable=False)
objects = ActivityManager()
creation_label = _('Create an activity')
save_label = _('Save the activity')
class Meta:
abstract = True
app_label = 'activities'
verbose_name = _('Activity')
verbose_name_plural = _('Activities')
ordering = ('-start',)
@classmethod
def get_creation_title(cls, type_id):
return CREATION_LABELS.get(type_id, cls.creation_label)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('activities__view_activity', args=(self.id,))
@staticmethod
def get_create_absolute_url():
return reverse('activities__create_activity')
def get_edit_absolute_url(self):
return reverse('activities__edit_activity', args=(self.id,))
@staticmethod
def get_lv_absolute_url():
return reverse('activities__list_activities')
def get_participant_relations(self):
return self.get_relations(REL_OBJ_PART_2_ACTIVITY, real_obj_entities=True)
def get_subject_relations(self, real_entities=True):
"""Get the list of models.Relation instances which link the Activity
with its subjects.
@param real_entities: Retrieve (efficiently) the real entities which are related.
"""
return self.get_relations(REL_OBJ_ACTIVITY_SUBJECT, real_obj_entities=real_entities)
def get_linkedto_relations(self):
return self.get_relations(REL_OBJ_LINKED_2_ACTIVITY, real_obj_entities=True)
def handle_all_day(self):
if self.is_all_day:
self.start = self.start.replace(hour=0, minute=0)
self.end = self.end.replace(hour=23, minute=59)
def _pre_save_clone(self, source):
# TODO: Explicit this into description ? Move the activity to another time-slot ?
if source.busy:
self.busy = False
def _copy_relations(self, source, allowed_internal=()):
super()._copy_relations(
source,
allowed_internal=[*allowed_internal, REL_OBJ_PART_2_ACTIVITY],
)
def _pre_delete(self):
for relation in self.relations.filter(type=REL_OBJ_PART_2_ACTIVITY):
relation._delete_without_transaction()
class Activity(AbstractActivity):
class Meta(AbstractActivity.Meta):
swappable = 'ACTIVITIES_ACTIVITY_MODEL' | null |
102 | #!/usr/bin/env python
# CCP4InterRadiationDamageDetector.py
# Copyright (C) 2006 CCLRC, Graeme Winter
#
# This code is distributed under the BSD license, a copy of which is
# included in the root directory of this package.
#
# 15th January 2007
#
# A detector for radiation damage between wavelengths - in particular for
# detecting radiation damage and eliminating wavelengths at the end of
# the scaling process.
#
# Will be used in:
#
# CCP4 Scaler, XDS Scaler.
#
# Uses:
#
# Scaleit.py
#
from __future__ import annotations
import os
from xia2.lib.bits import auto_logfiler
from xia2.Wrappers.CCP4.Scaleit import Scaleit
# Operation:
#
# This will work by comparing the relative scale and b factors between
# the data sets - presuming that they are input in a reasonable approximation
# of collection order - and decide when radiation damage has become a problem.
# Exactly what to do about this - that is the decision to be made by
# the calling routine.
#
# This will return a list of wavelengths which are "ok" and a list of ones
# which are "damaged".
class CCP4InterRadiationDamageDetector:
"""A class to detect radiation damage."""
def __init__(self):
self._working_directory = os.getcwd()
self._hklin = None
self._hklout = None
self._anomalous = False
def set_hklin(self, hklin):
self._hklin = hklin
def set_anomalous(self, anomalous):
self._anomalous = anomalous
def get_hklin(self):
return self._hklin
def check_hklin(self):
if self._hklin is None:
raise RuntimeError("hklin not defined")
if not os.path.exists(self._hklin):
raise RuntimeError("hklin %s does not exist" % self._hklin)
def METHOD_NAME(self, hklout):
self._hklout = hklout
def get_hklout(self):
return self._hklout
def check_hklout(self):
if self._hklout is None:
raise RuntimeError("hklout not defined")
# check that these are different files!
if self._hklout == self._hklin:
raise RuntimeError("hklout and hklin are the same file")
def set_working_directory(self, working_directory):
self._working_directory = working_directory
def get_working_directory(self):
return self._working_directory
def detect(self):
"""Detect radiation damage between wavelengths / datasets in a
reflection file. Will assume that the input is in order of data
collection. Will further assume that this is for MAD phasing."""
self.check_hklin()
self.check_hklout()
# check that hklin is an mtz file.
scaleit = Scaleit()
scaleit.set_working_directory(self.get_working_directory())
auto_logfiler(scaleit)
if self._anomalous:
scaleit.set_anomalous(True)
scaleit.set_hklin(self.get_hklin())
scaleit.METHOD_NAME(self.get_hklout())
try:
scaleit.scaleit()
except RuntimeError:
return ()
statistics = scaleit.get_statistics()
wavelengths = statistics["mapping"]
b_factors = statistics["b_factor"]
derivatives = sorted(wavelengths.keys())
status = []
for j in derivatives:
name = b_factors[j]["dname"]
b = b_factors[j]["b"]
r = b_factors[j]["r"]
# this is arbitrary!
if r > 0.50:
misindexed = ", misindexed"
else:
misindexed = ""
if b < -3:
status.append((name, f"{b:5.1f} {r:4.2f} (damaged{misindexed})"))
else:
status.append((name, f"{b:5.1f} {r:4.2f} (ok{misindexed})"))
return status
if __name__ == "__main__":
c = CCP4InterRadiationDamageDetector()
hklin = os.path.join(
os.environ["X2TD_ROOT"],
"Test",
"UnitTest",
"Wrappers",
"Scaleit",
"TS03_INTER_RD.mtz",
)
c.set_hklin(hklin)
c.METHOD_NAME("junk.mtz")
status = c.detect()
for s in status:
print("%s %s" % s) | null |
103 | import pytest
from pybind11_tests import ConstructorStats
from pybind11_tests import operators as m
def test_operator_overloading():
v1 = m.Vector2(1, 2)
v2 = m.Vector(3, -1)
v3 = m.Vector2(1, 2) # Same value as v1, but different instance.
assert v1 is not v3
assert str(v1) == "[1.000000, 2.000000]"
assert str(v2) == "[3.000000, -1.000000]"
assert str(-v2) == "[-3.000000, 1.000000]"
assert str(v1 + v2) == "[4.000000, 1.000000]"
assert str(v1 - v2) == "[-2.000000, 3.000000]"
assert str(v1 - 8) == "[-7.000000, -6.000000]"
assert str(v1 + 8) == "[9.000000, 10.000000]"
assert str(v1 * 8) == "[8.000000, 16.000000]"
assert str(v1 / 8) == "[0.125000, 0.250000]"
assert str(8 - v1) == "[7.000000, 6.000000]"
assert str(8 + v1) == "[9.000000, 10.000000]"
assert str(8 * v1) == "[8.000000, 16.000000]"
assert str(8 / v1) == "[8.000000, 4.000000]"
assert str(v1 * v2) == "[3.000000, -2.000000]"
assert str(v2 / v1) == "[3.000000, -0.500000]"
assert v1 == v3
assert v1 != v2
assert hash(v1) == 4
# TODO(eric.cousineau): Make this work.
# assert abs(v1) == "abs(Vector2)"
v1 += 2 * v2
assert str(v1) == "[7.000000, 0.000000]"
v1 -= v2
assert str(v1) == "[4.000000, 1.000000]"
v1 *= 2
assert str(v1) == "[8.000000, 2.000000]"
v1 /= 16
assert str(v1) == "[0.500000, 0.125000]"
v1 *= v2
assert str(v1) == "[1.500000, -0.125000]"
v2 /= v1
assert str(v2) == "[2.000000, 8.000000]"
cstats = ConstructorStats.get(m.Vector2)
assert cstats.alive() == 3
del v1
assert cstats.alive() == 2
del v2
assert cstats.alive() == 1
del v3
assert cstats.alive() == 0
assert cstats.values() == [
"[1.000000, 2.000000]",
"[3.000000, -1.000000]",
"[1.000000, 2.000000]",
"[-3.000000, 1.000000]",
"[4.000000, 1.000000]",
"[-2.000000, 3.000000]",
"[-7.000000, -6.000000]",
"[9.000000, 10.000000]",
"[8.000000, 16.000000]",
"[0.125000, 0.250000]",
"[7.000000, 6.000000]",
"[9.000000, 10.000000]",
"[8.000000, 16.000000]",
"[8.000000, 4.000000]",
"[3.000000, -2.000000]",
"[3.000000, -0.500000]",
"[6.000000, -2.000000]",
]
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
assert cstats.move_constructions >= 10
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
def test_operators_notimplemented():
"""#393: need to return NotSupported to ensure correct arithmetic operator behavior"""
c1, c2 = m.C1(), m.C2()
assert c1 + c1 == 11
assert c2 + c2 == 22
assert c2 + c1 == 21
assert c1 + c2 == 12
def METHOD_NAME():
"""#328: first member in a class can't be used in operators"""
a = m.NestA()
b = m.NestB()
c = m.NestC()
a += 10
assert m.get_NestA(a) == 13
b.a += 100
assert m.get_NestA(b.a) == 103
c.b.a += 1000
assert m.get_NestA(c.b.a) == 1003
b -= 1
assert m.get_NestB(b) == 3
c.b -= 3
assert m.get_NestB(c.b) == 1
c *= 7
assert m.get_NestC(c) == 35
abase = a.as_base()
assert abase.value == -2
a.as_base().value += 44
assert abase.value == 42
assert c.b.a.as_base().value == -2
c.b.a.as_base().value += 44
assert c.b.a.as_base().value == 42
del c
pytest.gc_collect()
del a # Shouldn't delete while abase is still alive
pytest.gc_collect()
assert abase.value == 42
del abase, b
pytest.gc_collect()
def test_overriding_eq_reset_hash():
assert m.Comparable(15) is not m.Comparable(15)
assert m.Comparable(15) == m.Comparable(15)
with pytest.raises(TypeError) as excinfo:
hash(m.Comparable(15))
assert str(excinfo.value).startswith("unhashable type:")
for hashable in (m.Hashable, m.Hashable2):
assert hashable(15) is not hashable(15)
assert hashable(15) == hashable(15)
assert hash(hashable(15)) == 15
assert hash(hashable(15)) == hash(hashable(15))
def test_return_set_of_unhashable():
with pytest.raises(TypeError) as excinfo:
m.get_unhashable_HashMe_set()
assert str(excinfo.value.__cause__).startswith("unhashable type:") | null |
104 | #!/usr/bin/env python3
import argparse
import logging
import os
import re
import sys
# logging.basicConfig(level="INFO")
_logger = logging.getLogger(__name__)
def METHOD_NAME():
desc = """Process C/C++ files for '#if 0' and '#if 1' blocks"""
epilog = """This tool is for removing dead code from C/C++ files.
It searches for '#if 0' and '#if 1' preprocessor directives (along
with their '#else' clauses), and removes them along with any code
which they disable. All other '#if', '#ifdef' and '#ifundef'
directives are left untouched.
"""
parser = argparse.ArgumentParser(description=desc, epilog=epilog)
parser.add_argument("--input-file-name", help="Path to the input file",
required=True)
parser.add_argument("--output-file-name", help="Path to the output file",
required=True)
return parser
# -----------------------------------------------------
patterns = {
"if0": re.compile(r"^\s*#if\s+0\s+"),
"if1": re.compile(r"^\s*#if\s+1\s+"),
"ifother": re.compile(r"^\s*#if\s+[^ 0-1]\w*"),
"ifdef": re.compile(r"^\s*#ifdef\s+[^ 0-1]\w*"),
"ifndef": re.compile(r"^\s*#ifndef\s+[^ 0-1]\w*"),
"else": re.compile(r"^\s*#else\s+"),
"endif": re.compile(r"^\s*#endif"),
}
_START_KEYS = [ "if0", "if1", "ifother", "ifdef", "ifndef" ]
_START_PRESERVE_KEYS = [ "ifother", "ifdef", "ifndef" ]
_ELSE_KEY = "else"
_END_KEY = "endif"
_PLAIN_KEY = "plain"
def discover_line_type(line):
result = _PLAIN_KEY
match_names = []
_logger.debug("Processing: %s", line)
for name, pattern in patterns.items():
_logger.debug("Considering %s", name)
if pattern.match(line):
_logger.debug("Matched %s", name)
match_names.append(name)
assert len(match_names) < 2
if len(match_names)==1:
result = match_names[0]
return result
_PRESERVE_KEY = "preserve"
_ACTIVE_KEY = "active"
def check_active(if_stack):
if len(if_stack)==0:
return True
checks = [x[_ACTIVE_KEY] for x in if_stack]
return all(checks)
def create_if_stack_item(line_type):
"""Creates a dictionary for the if_stack
The dictionary has two keys. One indicates whether the
current #if block is active or not (i.e. whether the
contents should go into the output). For our purposes,
the only "inactive" start is "#if 0".
The second key indicates whether or not the preprocessor
directives should be preserved. We are only eliminating
"#if 0" and "#if 1" blocks. Any "#else" and "#endif"
statements belonging to other "#if" or "#ifdef" statements
must be preseved.
"""
assert line_type in _START_KEYS
result = dict()
result[_ACTIVE_KEY] = line_type != "if0"
result[_PRESERVE_KEY] = line_type in _START_PRESERVE_KEYS
return result
def process_lines(lines):
_logger.info("Processing lines")
result_lines = []
# A stack of the various #if statements currently relevant
if_stack = []
for l in lines:
pop_if_stack = False
on_preproc_element = True
line_type = discover_line_type(l)
# See if the line is a preprocessor directive we care about
if line_type in _START_KEYS:
# We're starting a new block so push the stack
if_stack.append(create_if_stack_item(line_type))
elif line_type == _ELSE_KEY:
# An '#else' will apply to the last #if
last_element = if_stack[-1]
if not last_element[_PRESERVE_KEY]:
# We only flip whether we are active or not
# for "#if 0" and "#if 1" blocks
last_element[_ACTIVE_KEY] = not last_element[_ACTIVE_KEY]
if_stack[-1] = last_element
elif line_type == _END_KEY:
# Flag that we've ended a block
# Have to use this because we might need to output the "#endif"
pop_if_stack = True
elif line_type == _PLAIN_KEY:
on_preproc_element = False
else:
raise ValueError("Unrecognised line_type: {0}".format(line_type))
append_current_line = False
if check_active(if_stack):
if on_preproc_element:
if if_stack[-1][_PRESERVE_KEY]:
append_current_line = True
else:
append_current_line = True
if append_current_line:
result_lines.append(l)
# Remove the last element in the stack if we just exited a block
if pop_if_stack:
if_stack.pop()
# If the file is well-formed and we processed it correctly then
# We should not be inside any preprocessor directives
assert len(if_stack) == 0
_logger.info("Line processing complete");
return result_lines
def process_file(input_file_name, output_file_name):
_logger.info("Reading file {}".format(input_file_name))
text_lines = []
with open(input_file_name, 'r') as input_file:
text_lines = input_file.readlines()
result_lines = process_lines(text_lines)
_logger.info("Writing file {}".format(output_file_name))
with open(output_file_name, 'w') as output_file:
output_file.writelines(result_lines)
# -------------------------------------------------------
def main(argv):
parser = METHOD_NAME()
args = parser.parse_args(argv)
process_file(args.input_file_name, args.output_file_name)
if __name__ == "__main__":
main(sys.argv[1:]) | null |
105 | import argparse
import json
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def init_flags():
global FLAGS
parser = argparse.ArgumentParser()
parser.add_argument(
"--datadir",
default="/tmp/MNIST_data",
)
parser.add_argument("--rundir", default="/tmp/MNIST_train")
parser.add_argument("--batch_size", type=int, default=100)
parser.add_argument("--epochs", type=int, default=10)
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument("--prepare", dest='just_data', action="store_true")
parser.add_argument("--test", action="store_true")
FLAGS, _ = parser.parse_known_args()
def init_data():
global mnist
mnist = input_data.read_data_sets(FLAGS.datadir, one_hot=True)
def init_train():
init_model()
init_train_op()
init_eval_op()
init_summaries()
init_collections()
METHOD_NAME()
def init_model():
global x, y
# Input layer
x = tf.placeholder(tf.float32, [None, 784])
# First convolutional layer
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# Second convolutional layer
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# First fully connected layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Dropout
keep_prob = tf.placeholder_with_default(1.0, [])
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Output layer
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
def weight_variable(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1))
def bias_variable(shape):
return tf.Variable(tf.constant(0.1, shape=shape))
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def init_train_op():
global y_, loss, train_op
y_ = tf.placeholder(tf.float32, [None, 10])
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
train_op = tf.train.AdamOptimizer(FLAGS.lr).minimize(loss)
def init_eval_op():
global accuracy
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def init_summaries():
init_inputs_summary()
init_op_summaries()
init_summary_writers()
def init_inputs_summary():
tf.summary.image("inputs", tf.reshape(x, [-1, 28, 28, 1]), 10)
def init_op_summaries():
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy", accuracy)
def init_summary_writers():
global summaries, train_writer, validate_writer
summaries = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(
FLAGS.rundir + "/train", tf.get_default_graph()
)
validate_writer = tf.summary.FileWriter(FLAGS.rundir + "/validate")
def init_collections():
tf.add_to_collection("inputs", json.dumps({"image": x.name}))
tf.add_to_collection("outputs", json.dumps({"prediction": y.name}))
tf.add_to_collection("x", x.name)
tf.add_to_collection("y_", y_.name)
tf.add_to_collection("accuracy", accuracy.name)
def METHOD_NAME():
global sess
sess = tf.Session()
sess.run(tf.global_variables_initializer())
def train():
steps = (mnist.train.num_examples // FLAGS.batch_size) * FLAGS.epochs
for step in range(steps):
images, labels = mnist.train.next_batch(FLAGS.batch_size)
batch = {x: images, y_: labels}
sess.run(train_op, batch)
maybe_log_accuracy(step, batch)
maybe_save_model(step)
save_model()
def maybe_log_accuracy(step, last_training_batch):
if step % 20 == 0:
evaluate(step, last_training_batch, train_writer, "training")
validate_data = {x: mnist.validation.images, y_: mnist.validation.labels}
evaluate(step, validate_data, validate_writer, "validate")
def evaluate(step, data, writer, name):
accuracy_val, summary = sess.run([accuracy, summaries], data)
writer.add_summary(summary, step)
writer.flush()
print("Step %i: %s=%f" % (step, name, accuracy_val))
def maybe_save_model(step):
epoch_step = mnist.train.num_examples / FLAGS.batch_size
if step != 0 and step % epoch_step == 0:
save_model()
def save_model():
print("Saving trained model")
tf.gfile.MakeDirs(FLAGS.rundir + "/model")
tf.train.Saver().save(sess, FLAGS.rundir + "/model/export")
def init_test():
METHOD_NAME()
init_exported_collections()
init_test_writer()
def init_exported_collections():
global x, y_, accuracy
saver = tf.train.import_meta_graph(FLAGS.rundir + "/model/export.meta")
saver.restore(sess, FLAGS.rundir + "/model/export")
x = sess.graph.get_tensor_by_name(tf.get_collection("x")[0])
y_ = sess.graph.get_tensor_by_name(tf.get_collection("y_")[0])
accuracy = sess.graph.get_tensor_by_name(tf.get_collection("accuracy")[0])
def init_test_writer():
global summaries, writer
summaries = tf.summary.merge_all()
writer = tf.summary.FileWriter(FLAGS.rundir)
def test():
data = {x: mnist.test.images, y_: mnist.test.labels}
test_accuracy, summary = sess.run([accuracy, summaries], data)
writer.add_summary(summary)
writer.flush()
print("Test accuracy=%f" % test_accuracy)
if __name__ == "__main__":
init_flags()
init_data()
if FLAGS.just_data:
pass
elif FLAGS.test:
init_test()
test()
else:
init_train()
train() | null |
106 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdts.endpoint import endpoint_data
class ConfigureSubscriptionInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dts', '2020-01-01', 'ConfigureSubscriptionInstance','dts')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SourceEndpointInstanceID(self): # String
return self.get_query_params().get('SourceEndpoint.InstanceID')
def set_SourceEndpointInstanceID(self, SourceEndpointInstanceID): # String
self.add_query_param('SourceEndpoint.InstanceID', SourceEndpointInstanceID)
def get_SourceEndpointOracleSID(self): # String
return self.get_query_params().get('SourceEndpoint.OracleSID')
def set_SourceEndpointOracleSID(self, SourceEndpointOracleSID): # String
self.add_query_param('SourceEndpoint.OracleSID', SourceEndpointOracleSID)
def get_SourceEndpointIP(self): # String
return self.get_query_params().get('SourceEndpoint.IP')
def set_SourceEndpointIP(self, SourceEndpointIP): # String
self.add_query_param('SourceEndpoint.IP', SourceEndpointIP)
def get_SubscriptionDataTypeDML(self): # Boolean
return self.get_query_params().get('SubscriptionDataType.DML')
def set_SubscriptionDataTypeDML(self, SubscriptionDataTypeDML): # Boolean
self.add_query_param('SubscriptionDataType.DML', SubscriptionDataTypeDML)
def get_SourceEndpointInstanceType(self): # String
return self.get_query_params().get('SourceEndpoint.InstanceType')
def set_SourceEndpointInstanceType(self, SourceEndpointInstanceType): # String
self.add_query_param('SourceEndpoint.InstanceType', SourceEndpointInstanceType)
def get_AccountId(self): # String
return self.get_query_params().get('AccountId')
def set_AccountId(self, AccountId): # String
self.add_query_param('AccountId', AccountId)
def get_SubscriptionObject(self): # String
return self.get_body_params().get('SubscriptionObject')
def set_SubscriptionObject(self, SubscriptionObject): # String
self.add_body_params('SubscriptionObject', SubscriptionObject)
def get_SubscriptionInstanceVSwitchId(self): # String
return self.get_query_params().get('SubscriptionInstance.VSwitchId')
def set_SubscriptionInstanceVSwitchId(self, SubscriptionInstanceVSwitchId): # String
self.add_query_param('SubscriptionInstance.VSwitchId', SubscriptionInstanceVSwitchId)
def get_SourceEndpointUserName(self): # String
return self.get_query_params().get('SourceEndpoint.UserName')
def set_SourceEndpointUserName(self, SourceEndpointUserName): # String
self.add_query_param('SourceEndpoint.UserName', SourceEndpointUserName)
def get_SourceEndpointDatabaseName(self): # String
return self.get_query_params().get('SourceEndpoint.DatabaseName')
def set_SourceEndpointDatabaseName(self, SourceEndpointDatabaseName): # String
self.add_query_param('SourceEndpoint.DatabaseName', SourceEndpointDatabaseName)
def get_SourceEndpointPort(self): # String
return self.get_query_params().get('SourceEndpoint.Port')
def set_SourceEndpointPort(self, SourceEndpointPort): # String
self.add_query_param('SourceEndpoint.Port', SourceEndpointPort)
def get_SourceEndpointOwnerID(self): # String
return self.get_query_params().get('SourceEndpoint.OwnerID')
def set_SourceEndpointOwnerID(self, SourceEndpointOwnerID): # String
self.add_query_param('SourceEndpoint.OwnerID', SourceEndpointOwnerID)
def get_SubscriptionInstanceVPCId(self): # String
return self.get_query_params().get('SubscriptionInstance.VPCId')
def METHOD_NAME(self, SubscriptionInstanceVPCId): # String
self.add_query_param('SubscriptionInstance.VPCId', SubscriptionInstanceVPCId)
def get_SubscriptionInstanceNetworkType(self): # String
return self.get_query_params().get('SubscriptionInstanceNetworkType')
def set_SubscriptionInstanceNetworkType(self, SubscriptionInstanceNetworkType): # String
self.add_query_param('SubscriptionInstanceNetworkType', SubscriptionInstanceNetworkType)
def get_SubscriptionInstanceId(self): # String
return self.get_query_params().get('SubscriptionInstanceId')
def set_SubscriptionInstanceId(self, SubscriptionInstanceId): # String
self.add_query_param('SubscriptionInstanceId', SubscriptionInstanceId)
def get_SourceEndpointRole(self): # String
return self.get_query_params().get('SourceEndpoint.Role')
def set_SourceEndpointRole(self, SourceEndpointRole): # String
self.add_query_param('SourceEndpoint.Role', SourceEndpointRole)
def get_OwnerId(self): # String
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # String
self.add_query_param('OwnerId', OwnerId)
def get_SubscriptionDataTypeDDL(self): # Boolean
return self.get_query_params().get('SubscriptionDataType.DDL')
def set_SubscriptionDataTypeDDL(self, SubscriptionDataTypeDDL): # Boolean
self.add_query_param('SubscriptionDataType.DDL', SubscriptionDataTypeDDL)
def get_SourceEndpointPassword(self): # String
return self.get_query_params().get('SourceEndpoint.Password')
def set_SourceEndpointPassword(self, SourceEndpointPassword): # String
self.add_query_param('SourceEndpoint.Password', SourceEndpointPassword)
def get_SubscriptionInstanceName(self): # String
return self.get_query_params().get('SubscriptionInstanceName')
def set_SubscriptionInstanceName(self, SubscriptionInstanceName): # String
self.add_query_param('SubscriptionInstanceName', SubscriptionInstanceName) | null |
107 | ################################################################################
# Creme is a free/open-source Customer Relationship Management software
# Copyright (C) 2009-2023 Hybird
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import logging
from django.conf import settings
from django.db import models
from django.db.transaction import atomic
from django.db.utils import IntegrityError
from django.urls import reverse
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from django.utils.translation import pgettext_lazy
from creme.creme_core.models import CremeEntity, CremeModel
from creme.creme_core.models.fields import UnsafeHTMLField
from .. import utils
from ..constants import SUBJECT_LENGTH
from .signature import EmailSignature
logger = logging.getLogger(__name__)
ID_LENGTH = 32
class _Email(CremeModel):
class Status(models.IntegerChoices):
SENT = 1, pgettext_lazy('emails', 'Sent'),
NOT_SENT = 2, pgettext_lazy('emails', 'Not sent'),
SENDING_ERROR = 3, _('Sending error'),
SYNCHRONIZED = 4, pgettext_lazy('emails', 'Synchronized'),
reads = models.PositiveIntegerField(
_('Number of reads'), null=True, default=0, editable=False,
)
status = models.PositiveSmallIntegerField(
_('Status'), editable=False,
choices=Status.choices, default=Status.NOT_SENT,
)
sender = models.CharField(_('Sender'), max_length=100)
subject = models.CharField(_('Subject'), max_length=SUBJECT_LENGTH, blank=True)
recipient = models.CharField(_('Recipient'), max_length=100)
body = models.TextField(_('Body'))
sending_date = models.DateTimeField(_('Sending date'), null=True, editable=False)
reception_date = models.DateTimeField(_('Reception date'), null=True, editable=False)
class Meta:
abstract = True
app_label = 'emails'
def __str__(self):
return (
f'Mail<from: {self.sender}> '
f'<to: {self.recipient}> '
f'<sent: {self.sending_date}> '
f'<id: {self.id}>'
)
@property
def sent(self):
return self.status == self.Status.SENT
@property
def synchronised(self):
return self.status == self.Status.SYNCHRONIZED
class EntityEmailSender(utils.EMailSender):
def get_subject(self, mail):
return mail.subject
class AbstractEntityEmail(_Email, CremeEntity):
identifier = models.CharField(
_('Email ID'), unique=True, max_length=ID_LENGTH, editable=False,
default=utils.generate_id, # TODO: lambda for this
)
body_html = UnsafeHTMLField(_('Body (HTML)'))
signature = models.ForeignKey(
EmailSignature, verbose_name=_('Signature'),
blank=True, null=True, on_delete=models.SET_NULL,
) # TODO: merge with body ????
attachments = models.ManyToManyField(
settings.DOCUMENTS_DOCUMENT_MODEL, verbose_name=_('Attachments'), blank=True,
)
creation_label = _('Create an email')
save_label = _('Save the email')
sending_label = _('Send the email')
email_sender_cls = EntityEmailSender
class Meta:
abstract = True
app_label = 'emails'
verbose_name = pgettext_lazy('emails', 'Email')
verbose_name_plural = pgettext_lazy('emails', 'Emails')
ordering = ('-sending_date',)
def genid_n_save(self):
for __ in range(10000): # NB: avoid infinite loop
self.identifier = utils.generate_id()
try:
with atomic():
self.save(force_insert=True)
except IntegrityError: # A mail with this id already exists
logger.debug('Mail id already exists: %s', self.identifier)
self.pk = None
else:
return
def __str__(self):
return gettext('Email <from: {sender}> <to: {to}> <status: {status}>').format(
sender=self.sender,
to=self.recipient,
status=self.get_status_display(),
)
def get_absolute_url(self):
return reverse('emails__view_email', args=(self.pk,))
@staticmethod
def get_clone_absolute_url():
return '' # Cannot be cloned
@staticmethod
def get_lv_absolute_url():
return reverse('emails__list_emails')
# TODO: in a manager ?
@classmethod
def METHOD_NAME(cls, sender, recipient, subject, user, body,
body_html='', signature=None, attachments=None):
email = cls(
sender=sender,
recipient=recipient,
subject=subject,
body=body,
body_html=body_html,
signature=signature,
user=user,
)
email.genid_n_save()
if attachments:
email.attachments.set(attachments)
email.send()
return email
def _pre_save_clone(self, source):
self.genid_n_save()
def restore(self):
CremeEntity.restore(self)
# TODO: in a signal handler instead ?
# (we need a restore signal, or an official "backup" feature -- see HistoryLine)
if self.status in (self.Status.NOT_SENT, self.Status.SENDING_ERROR):
# TODO: regroup the 'refresh' message, to avoid flooding the job manager
from ..creme_jobs import entity_emails_send_type
entity_emails_send_type.refresh_job()
def send(self):
sender = self.email_sender_cls(
body=self.body,
body_html=self.body_html,
signature=self.signature,
attachments=self.attachments.all(),
)
if sender.send(self):
logger.debug('Mail sent to %s', self.recipient)
class EntityEmail(AbstractEntityEmail):
class Meta(AbstractEntityEmail.Meta):
swappable = 'EMAILS_EMAIL_MODEL' | null |
108 | """Tests for anomaly detection with OTX CLI."""
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import os
import pytest
from otx.cli.registry import Registry
from tests.test_suite.e2e_test_system import e2e_pytest_component
from tests.test_suite.run_test_command import (
nncf_eval_openvino_testing,
nncf_eval_testing,
nncf_export_testing,
nncf_optimize_testing,
nncf_validate_fq_testing,
otx_demo_deployment_testing,
otx_demo_openvino_testing,
otx_demo_testing,
otx_deploy_openvino_testing,
otx_eval_deployment_testing,
otx_eval_openvino_testing,
otx_eval_testing,
otx_export_testing,
otx_train_testing,
ptq_eval_testing,
ptq_optimize_testing,
ptq_validate_fq_testing,
)
args = {
"--train-data-roots": "tests/assets/anomaly/hazelnut/train",
"--val-data-roots": "tests/assets/anomaly/hazelnut/test",
"--test-data-roots": "tests/assets/anomaly/hazelnut/test",
"--input": "tests/assets/anomaly/hazelnut/test/colour",
"train_params": [],
}
otx_dir = os.getcwd()
templates = Registry("src/otx/algorithms").filter(task_type="ANOMALY_DETECTION").templates
templates_ids = [template.model_template_id for template in templates]
class TestToolsAnomalyDetection:
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_train(self, template, tmp_dir_path):
otx_train_testing(template, tmp_dir_path, otx_dir, args, deterministic=True)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_export(self, template, tmp_dir_path):
otx_export_testing(template, tmp_dir_path, check_ir_meta=True)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_eval(self, template, tmp_dir_path):
otx_eval_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_eval_openvino(self, template, tmp_dir_path):
otx_eval_openvino_testing(template, tmp_dir_path, otx_dir, args, threshold=0.2)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_demo(self, template, tmp_dir_path):
otx_demo_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_demo_openvino(self, template, tmp_dir_path):
otx_demo_openvino_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_deploy_openvino(self, template, tmp_dir_path):
otx_deploy_openvino_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_eval_deployment(self, template, tmp_dir_path):
otx_eval_deployment_testing(template, tmp_dir_path, otx_dir, args, threshold=0.0)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def METHOD_NAME(self, template, tmp_dir_path):
otx_demo_deployment_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_optimize(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_optimize_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_export(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_export_testing(template, tmp_dir_path)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_validate_fq(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_validate_fq_testing(template, tmp_dir_path, otx_dir, "anomaly", type(self).__name__)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_eval(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_eval_testing(template, tmp_dir_path, otx_dir, args, threshold=0.01)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_eval_openvino(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_eval_openvino_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ptq_optimize(self, template, tmp_dir_path):
ptq_optimize_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ptq_validate_fq(self, template, tmp_dir_path):
ptq_validate_fq_testing(template, tmp_dir_path, otx_dir, "anomaly", type(self).__name__)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ptq_eval(self, template, tmp_dir_path):
ptq_eval_testing(template, tmp_dir_path, otx_dir, args) | null |
109 | from __future__ import annotations
import os
import subprocess
import sys
import time
import fsspec
import pytest
from fsspec.core import open_files
from packaging.version import parse as parse_version
import dask.bag as db
from dask.utils import tmpdir
files = ["a", "b"]
requests = pytest.importorskip("requests")
errs: tuple[type[Exception], ...] = (
requests.exceptions.RequestException,
FileNotFoundError,
)
if parse_version(fsspec.__version__) > parse_version("0.7.4"):
aiohttp = pytest.importorskip("aiohttp")
errs = errs + (aiohttp.client_exceptions.ClientResponseError,)
@pytest.fixture(scope="module")
def dir_server():
with tmpdir() as d:
for fn in files:
with open(os.path.join(d, fn), "wb") as f:
f.write(b"a" * 10000)
cmd = [sys.executable, "-m", "http.server", "8999"]
p = subprocess.Popen(cmd, cwd=d)
timeout = 10
while True:
try:
requests.get("http://localhost:8999")
break
except requests.exceptions.ConnectionError as e:
time.sleep(0.1)
timeout -= 0.1
if timeout < 0:
raise RuntimeError("Server did not appear") from e
yield d
p.terminate()
def test_simple(dir_server):
root = "http://localhost:8999/"
fn = files[0]
f = open_files(root + fn)[0]
with f as f:
data = f.read()
with open(os.path.join(dir_server, fn), "rb") as expected:
assert data == expected.read()
def METHOD_NAME(dir_server):
root = "http://localhost:8999/"
fn = files[0]
f = open_files(root + fn)[0]
with open(os.path.join(dir_server, fn), "rb") as expected:
expected = expected.read()
with f as f:
data = f.read(2)
assert data == expected[:2]
assert f.loc == 2
f.seek(0)
data = f.read(3)
assert data == expected[:3]
f.seek(1, 1)
assert f.loc == 4
def test_fetch_range_with_headers(dir_server):
# https://github.com/dask/dask/issues/4479
root = "http://localhost:8999/"
fn = files[0]
headers = {"Date": "Wed, 21 Oct 2015 07:28:00 GMT"}
f = open_files(root + fn, headers=headers)[0]
with f as f:
data = f.read(length=1) + f.read(length=-1)
with open(os.path.join(dir_server, fn), "rb") as expected:
assert data == expected.read()
@pytest.mark.parametrize("block_size", [None, 99999])
def test_ops(dir_server, block_size):
root = "http://localhost:8999/"
fn = files[0]
f = open_files(root + fn)[0]
with open(os.path.join(dir_server, fn), "rb") as expected:
expected = expected.read()
with f as f:
# these pass because the default
assert f.read(10) == expected[:10]
f.seek(0)
assert f.read(10) == expected[:10]
assert f.read(10) == expected[10:20]
f.seek(-10, 2)
assert f.read() == expected[-10:]
def test_ops_blocksize(dir_server):
root = "http://localhost:8999/"
fn = files[0]
f = open_files(root + fn, block_size=2)[0]
with open(os.path.join(dir_server, fn), "rb") as expected:
expected = expected.read()
with f as f:
# it's OK to read the whole file
assert f.read() == expected
# and now the file magically has a size
assert f.size == len(expected)
# note that if we reuse f from above, because it is tokenized, we get
# the same open file - where is this cached?
fn = files[1]
f = open_files(root + fn, block_size=2)[0]
with f as f:
if parse_version(fsspec.__version__) < parse_version("2021.11.1"):
# fails because we want only 12 bytes
with pytest.raises(ValueError):
assert f.read(10) == expected[:10]
else:
# fixed in https://github.com/fsspec/filesystem_spec/pull/830
assert f.read(10) == expected[:10]
def test_errors(dir_server):
f = open_files("http://localhost:8999/doesnotexist")[0]
with pytest.raises(errs):
with f as f:
f.read()
f = open_files("http://nohost/")[0]
expected = FileNotFoundError
with pytest.raises(expected):
with f as f:
f.read()
root = "http://localhost:8999/"
fn = files[0]
f = open_files(root + fn, mode="wb")[0]
with pytest.raises(NotImplementedError):
with f:
pass
f = open_files(root + fn)[0]
with f as f:
with pytest.raises(ValueError):
f.seek(-1)
def test_files(dir_server):
root = "http://localhost:8999/"
fs = open_files([root + f for f in files])
for f, f2 in zip(fs, files):
with f as f:
with open(os.path.join(dir_server, f2), "rb") as expected:
assert f.read() == expected.read()
def test_open_glob(dir_server):
root = "http://localhost:8999/"
fs = open_files(root + "/*")
assert fs[0].path == "http://localhost:8999/a"
assert fs[1].path == "http://localhost:8999/b"
@pytest.mark.network
@pytest.mark.parametrize("engine", ["pyarrow", "fastparquet"])
def test_parquet(engine):
pytest.importorskip("requests", minversion="2.21.0")
dd = pytest.importorskip("dask.dataframe")
pytest.importorskip(engine)
df = dd.read_parquet(
[
"https://github.com/Parquet/parquet-compatibility/raw/"
"master/parquet-testdata/impala/1.1.1-NONE/"
"nation.impala.parquet"
],
engine=engine,
).compute()
assert df.n_nationkey.tolist() == list(range(25))
assert df.columns.tolist() == ["n_nationkey", "n_name", "n_regionkey", "n_comment"]
@pytest.mark.flaky(
reruns=10, reruns_delay=5, reason="https://github.com/dask/dask/issues/3696"
)
@pytest.mark.network
def test_bag():
# This test pulls from different hosts
urls = [
"https://raw.githubusercontent.com/weierophinney/pastebin/"
"master/public/js-src/dojox/data/tests/stores/patterns.csv",
"https://en.wikipedia.org",
]
b = db.read_text(urls)
assert b.npartitions == 2
b.compute()
@pytest.mark.network
def test_read_csv():
dd = pytest.importorskip("dask.dataframe")
url = (
"https://raw.githubusercontent.com/weierophinney/pastebin/"
"master/public/js-src/dojox/data/tests/stores/patterns.csv"
)
b = dd.read_csv(url)
b.compute() | null |
110 | # Copyright 2022 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import webdataset as wds
from neu.comm import CommunicatorWrapper
from nnabla.logger import logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla_diffusion.config import DatasetConfig
from .common import resize_center_crop, resize_random_crop
class WebDatasetDataSourceLocal(DataSource):
'''
DataSource for webdataset format.
Assumes all tar files are already donwnloaded on the same server and accecible as a file.
'''
def __init__(self,
tar_files,
conf: DatasetConfig,
rng=None):
super(WebDatasetDataSourceLocal, self).__init__(
shuffle=conf.shuffle_dataset, rng=rng)
shuffle_size = 1
if conf.shuffle_dataset:
shuffle_size = 10000
self.dataset = iter(wds.DataPipeline(
wds.ResampledShards(tar_files),
wds.tarfile_to_samples(),
wds.shuffle(shuffle_size),
wds.decode("rgb"),
wds.to_tuple("jpg", "json", "npz")
))
self._size = -1
self._variables = ("image", "caption", "t5_emb")
self.im_size = conf.image_size
self.fix_aspect_ratio = conf.fix_aspect_ratio
self.random_crop = conf.random_crop
self.channel_last = conf.channel_last
self.max_text_length = conf.max_text_length
def _get_data(self, position):
# Note that position is not used in this data source
data = next(self.dataset)
image = data[0] # numpy whose value is between [0, 1], channel last
cap = data[1]["caption"] # str
emb = data[2]["t5_emb"] # (length, 1024)
# rescale pixel intensity to [-1, 1]
image = 2 * image - 1
# resize image to align config
if self.random_crop:
image = resize_random_crop(
image, size=self.im_size[0], channel_first=False)
else:
image = resize_center_crop(
image, size=self.im_size[0], channel_first=False)
# padding text sequence
# Truncate emb if it's longer than max length.
emb = emb[:self.max_text_length]
# padding to max length
emb = np.pad(emb, ((0, self.max_text_length - emb.shape[0]), (0, 0)))
if not self.channel_last:
# channel last -> first
image = np.transpose(image, (2, 0, 1))
emb = np.transpose(emb, (1, 0))
return (image, cap, emb)
TAR_FILES = {
"400m": "{00000..41407}.tar",
}
def METHOD_NAME(conf: DatasetConfig, comm: CommunicatorWrapper):
# set worker info to avoid loading pytorch in webdataset
os.environ["RANK"] = str(comm.rank)
os.environ["WORLD_SIZE"] = str(comm.n_procs)
os.environ["WORKER"] = str(comm.rank)
os.environ["NUM_WORKERS"] = str(comm.n_procs)
# create datasource
# tar_files = os.path.join(conf.dataset_root_dir, TAR_FILES["400m"])
tar_list_path = os.path.join(conf.data_dir, "tarlist.txt")
with open(tar_list_path, "r") as f:
tar_files = [x.strip() for x in f.readlines()]
logger.info(
f"[LAION400mDataIterator] {len(tar_files)} tarfiles are found.")
ds = WebDatasetDataSourceLocal(tar_files, conf)
return data_iterator(ds,
conf.batch_size,
with_memory_cache=False,
use_thread=True,
with_file_cache=False) | null |
111 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateHybridMonitorTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'CreateHybridMonitorTask','cms')
self.set_method('POST')
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_TaskName(self): # String
return self.get_query_params().get('TaskName')
def set_TaskName(self, TaskName): # String
self.add_query_param('TaskName', TaskName)
def get_CollectInterval(self): # String
return self.get_query_params().get('CollectInterval')
def set_CollectInterval(self, CollectInterval): # String
self.add_query_param('CollectInterval', CollectInterval)
def get_TargetUserId(self): # String
return self.get_query_params().get('TargetUserId')
def set_TargetUserId(self, TargetUserId): # String
self.add_query_param('TargetUserId', TargetUserId)
def get_CollectTargetType(self): # String
return self.get_query_params().get('CollectTargetType')
def set_CollectTargetType(self, CollectTargetType): # String
self.add_query_param('CollectTargetType', CollectTargetType)
def get_AttachLabelss(self): # RepeatList
return self.get_query_params().get('AttachLabels')
def set_AttachLabelss(self, AttachLabels): # RepeatList
for depth1 in range(len(AttachLabels)):
if AttachLabels[depth1].get('Name') is not None:
self.add_query_param('AttachLabels.' + str(depth1 + 1) + '.Name', AttachLabels[depth1].get('Name'))
if AttachLabels[depth1].get('Value') is not None:
self.add_query_param('AttachLabels.' + str(depth1 + 1) + '.Value', AttachLabels[depth1].get('Value'))
def get_TaskType(self): # String
return self.get_query_params().get('TaskType')
def set_TaskType(self, TaskType): # String
self.add_query_param('TaskType', TaskType)
def get_GroupId(self): # String
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_query_param('GroupId', GroupId)
def get_TargetUserIdList(self): # String
return self.get_query_params().get('TargetUserIdList')
def set_TargetUserIdList(self, TargetUserIdList): # String
self.add_query_param('TargetUserIdList', TargetUserIdList)
def get_YARMConfig(self): # String
return self.get_query_params().get('YARMConfig')
def set_YARMConfig(self, YARMConfig): # String
self.add_query_param('YARMConfig', YARMConfig)
def METHOD_NAME(self): # String
return self.get_query_params().get('Namespace')
def set_Namespace(self, Namespace): # String
self.add_query_param('Namespace', Namespace)
def get_SLSProcessConfig(self): # Struct
return self.get_query_params().get('SLSProcessConfig')
def set_SLSProcessConfig(self, SLSProcessConfig): # Struct
if SLSProcessConfig.get('Filter') is not None:
if SLSProcessConfig.get('Filter').get('Filters') is not None:
for index1, value1 in enumerate(SLSProcessConfig.get('Filter').get('Filters')):
if value1.get('SLSKeyName') is not None:
self.add_query_param('SLSProcessConfig.Filter.Filters.' + str(index1 + 1) + '.SLSKeyName', value1.get('SLSKeyName'))
if value1.get('Value') is not None:
self.add_query_param('SLSProcessConfig.Filter.Filters.' + str(index1 + 1) + '.Value', value1.get('Value'))
if value1.get('Operator') is not None:
self.add_query_param('SLSProcessConfig.Filter.Filters.' + str(index1 + 1) + '.Operator', value1.get('Operator'))
if SLSProcessConfig.get('Filter').get('Relation') is not None:
self.add_query_param('SLSProcessConfig.Filter.Relation', SLSProcessConfig.get('Filter').get('Relation'))
if SLSProcessConfig.get('Express') is not None:
for index1, value1 in enumerate(SLSProcessConfig.get('Express')):
if value1.get('Alias') is not None:
self.add_query_param('SLSProcessConfig.Express.' + str(index1 + 1) + '.Alias', value1.get('Alias'))
if value1.get('Express') is not None:
self.add_query_param('SLSProcessConfig.Express.' + str(index1 + 1) + '.Express', value1.get('Express'))
if SLSProcessConfig.get('GroupBy') is not None:
for index1, value1 in enumerate(SLSProcessConfig.get('GroupBy')):
if value1.get('SLSKeyName') is not None:
self.add_query_param('SLSProcessConfig.GroupBy.' + str(index1 + 1) + '.SLSKeyName', value1.get('SLSKeyName'))
if value1.get('Alias') is not None:
self.add_query_param('SLSProcessConfig.GroupBy.' + str(index1 + 1) + '.Alias', value1.get('Alias'))
if SLSProcessConfig.get('Statistics') is not None:
for index1, value1 in enumerate(SLSProcessConfig.get('Statistics')):
if value1.get('SLSKeyName') is not None:
self.add_query_param('SLSProcessConfig.Statistics.' + str(index1 + 1) + '.SLSKeyName', value1.get('SLSKeyName'))
if value1.get('Function') is not None:
self.add_query_param('SLSProcessConfig.Statistics.' + str(index1 + 1) + '.Function', value1.get('Function'))
if value1.get('Alias') is not None:
self.add_query_param('SLSProcessConfig.Statistics.' + str(index1 + 1) + '.Alias', value1.get('Alias'))
if value1.get('Parameter2') is not None:
self.add_query_param('SLSProcessConfig.Statistics.' + str(index1 + 1) + '.Parameter2', value1.get('Parameter2'))
if value1.get('Parameter1') is not None:
self.add_query_param('SLSProcessConfig.Statistics.' + str(index1 + 1) + '.Parameter1', value1.get('Parameter1')) | null |
112 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvs.endpoint import endpoint_data
class ModifyTemplateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vs', '2018-12-12', 'ModifyTemplate')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_HlsTs(self):
return self.get_query_params().get('HlsTs')
def set_HlsTs(self,HlsTs):
self.add_query_param('HlsTs',HlsTs)
def get_OssEndpoint(self):
return self.get_query_params().get('OssEndpoint')
def set_OssEndpoint(self,OssEndpoint):
self.add_query_param('OssEndpoint',OssEndpoint)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_OssFilePrefix(self):
return self.get_query_params().get('OssFilePrefix')
def METHOD_NAME(self,OssFilePrefix):
self.add_query_param('OssFilePrefix',OssFilePrefix)
def get_JpgOverwrite(self):
return self.get_query_params().get('JpgOverwrite')
def set_JpgOverwrite(self,JpgOverwrite):
self.add_query_param('JpgOverwrite',JpgOverwrite)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_JpgOnDemand(self):
return self.get_query_params().get('JpgOnDemand')
def set_JpgOnDemand(self,JpgOnDemand):
self.add_query_param('JpgOnDemand',JpgOnDemand)
def get_Id(self):
return self.get_query_params().get('Id')
def set_Id(self,Id):
self.add_query_param('Id',Id)
def get_Retention(self):
return self.get_query_params().get('Retention')
def set_Retention(self,Retention):
self.add_query_param('Retention',Retention)
def get_HlsM3u8(self):
return self.get_query_params().get('HlsM3u8')
def set_HlsM3u8(self,HlsM3u8):
self.add_query_param('HlsM3u8',HlsM3u8)
def get_OssBucket(self):
return self.get_query_params().get('OssBucket')
def set_OssBucket(self,OssBucket):
self.add_query_param('OssBucket',OssBucket)
def get_TransConfigsJSON(self):
return self.get_query_params().get('TransConfigsJSON')
def set_TransConfigsJSON(self,TransConfigsJSON):
self.add_query_param('TransConfigsJSON',TransConfigsJSON)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_Trigger(self):
return self.get_query_params().get('Trigger')
def set_Trigger(self,Trigger):
self.add_query_param('Trigger',Trigger)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_JpgSequence(self):
return self.get_query_params().get('JpgSequence')
def set_JpgSequence(self,JpgSequence):
self.add_query_param('JpgSequence',JpgSequence)
def get_Mp4(self):
return self.get_query_params().get('Mp4')
def set_Mp4(self,Mp4):
self.add_query_param('Mp4',Mp4)
def get_Flv(self):
return self.get_query_params().get('Flv')
def set_Flv(self,Flv):
self.add_query_param('Flv',Flv)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Callback(self):
return self.get_query_params().get('Callback')
def set_Callback(self,Callback):
self.add_query_param('Callback',Callback)
def get_Interval(self):
return self.get_query_params().get('Interval')
def set_Interval(self,Interval):
self.add_query_param('Interval',Interval)
def get_FileFormat(self):
return self.get_query_params().get('FileFormat')
def set_FileFormat(self,FileFormat):
self.add_query_param('FileFormat',FileFormat)
def get_Region(self):
return self.get_query_params().get('Region')
def set_Region(self,Region):
self.add_query_param('Region',Region | null |
113 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkretailcloud.endpoint import endpoint_data
class CreateAppRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'retailcloud', '2018-03-13', 'CreateApp')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BizTitle(self): # String
return self.get_body_params().get('BizTitle')
def set_BizTitle(self, BizTitle): # String
self.add_body_params('BizTitle', BizTitle)
def get_OperatingSystem(self): # String
return self.get_body_params().get('OperatingSystem')
def set_OperatingSystem(self, OperatingSystem): # String
self.add_body_params('OperatingSystem', OperatingSystem)
def get_Description(self): # String
return self.get_body_params().get('Description')
def set_Description(self, Description): # String
self.add_body_params('Description', Description)
def get_Language(self): # String
return self.get_body_params().get('Language')
def set_Language(self, Language): # String
self.add_body_params('Language', Language)
def get_Title(self): # String
return self.get_body_params().get('Title')
def METHOD_NAME(self, Title): # String
self.add_body_params('Title', Title)
def get_GroupName(self): # String
return self.get_body_params().get('GroupName')
def set_GroupName(self, GroupName): # String
self.add_body_params('GroupName', GroupName)
def get_MiddleWareIdLists(self): # RepeatList
return self.get_body_params().get('MiddleWareIdList')
def set_MiddleWareIdLists(self, MiddleWareIdList): # RepeatList
for depth1 in range(len(MiddleWareIdList)):
self.add_body_params('MiddleWareIdList.' + str(depth1 + 1), MiddleWareIdList[depth1])
def get_StateType(self): # Integer
return self.get_body_params().get('StateType')
def set_StateType(self, StateType): # Integer
self.add_body_params('StateType', StateType)
def get_ServiceType(self): # String
return self.get_body_params().get('ServiceType')
def set_ServiceType(self, ServiceType): # String
self.add_body_params('ServiceType', ServiceType)
def get_UserRoless(self): # RepeatList
return self.get_body_params().get('UserRoles')
def set_UserRoless(self, UserRoles): # RepeatList
for depth1 in range(len(UserRoles)):
if UserRoles[depth1].get('RoleName') is not None:
self.add_body_params('UserRoles.' + str(depth1 + 1) + '.RoleName', UserRoles[depth1].get('RoleName'))
if UserRoles[depth1].get('UserType') is not None:
self.add_body_params('UserRoles.' + str(depth1 + 1) + '.UserType', UserRoles[depth1].get('UserType'))
if UserRoles[depth1].get('UserId') is not None:
self.add_body_params('UserRoles.' + str(depth1 + 1) + '.UserId', UserRoles[depth1].get('UserId'))
def get_BizCode(self): # String
return self.get_body_params().get('BizCode')
def set_BizCode(self, BizCode): # String
self.add_body_params('BizCode', BizCode)
def get_Namespace(self): # String
return self.get_body_params().get('Namespace')
def set_Namespace(self, Namespace): # String
self.add_body_params('Namespace', Namespace) | null |
114 | # coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Version utils."""
from __future__ import annotations
import enum
import re
from typing import List, Tuple, Union
from etils import epath
_VERSION_TMPL = r"^(?P<major>{v})" r"\.(?P<minor>{v})" r"\.(?P<patch>{v})$"
_VERSION_WILDCARD_REG = re.compile(_VERSION_TMPL.format(v=r"\d+|\*"))
_VERSION_RESOLVED_REG = re.compile(_VERSION_TMPL.format(v=r"\d+"))
class Experiment(enum.Enum):
"""Experiments which can be enabled/disabled on a per version basis.
Experiments are designed to gradually apply changes to datasets while
maintaining backward compatibility with previous versions. All experiments
should eventually be deleted, once used by all versions of all datasets.
Eg:
class Experiment(enum.Enum):
EXP_A = enum.auto() # Short description of experiment.
class MyBuilder(...):
VERSION = tfds.core.Version('1.2.3', experiments={
tfds.core.Experiment.EXP_A: True,
})
"""
# A Dummy experiment, which should NOT be used, except for testing.
DUMMY = 1
class Version:
"""Dataset version MAJOR.MINOR.PATCH."""
_DEFAULT_EXPERIMENTS = {
Experiment.DUMMY: False,
}
def __init__(
self,
version: Union[Version, str],
experiments=None,
tfds_version_to_prepare=None,
):
"""Version init.
Args:
version: string. Eg: "1.2.3".
experiments: dict of experiments. See Experiment.
tfds_version_to_prepare: string, defaults to None. If set, indicates that
current version of TFDS cannot be used to `download_and_prepare` the
dataset, but that TFDS at version {tfds_version_to_prepare} should be
used instead.
"""
if isinstance(version, Version):
version_str = str(version)
experiments = experiments or version._experiments
tfds_version_to_prepare = (
tfds_version_to_prepare or version.tfds_version_to_prepare
)
else:
version_str = version
self._experiments = self._DEFAULT_EXPERIMENTS.copy()
self.tfds_version_to_prepare = tfds_version_to_prepare
if experiments:
if isinstance(experiments, str):
raise ValueError(
f"Invalid Version('{version}', '{experiments}'). Description is "
"deprecated. RELEASE_NOTES should be used instead."
)
self._experiments.update(experiments)
self.major, self.minor, self.patch = _str_to_version(version_str)
def implements(self, experiment):
"""Returns True if version implements given experiment."""
return self._experiments[experiment]
def __str__(self):
return "{}.{}.{}".format(*self.tuple)
def __repr__(self) -> str:
return f"{type(self).__name__}('{str(self)}')"
@property
def tuple(self):
return self.major, self.minor, self.patch
def METHOD_NAME(self, other):
if isinstance(other, str):
return Version(other)
elif isinstance(other, Version):
return other
raise AssertionError(
"{} (type {}) cannot be compared to version.".format(other, type(other))
)
def __eq__(self, other):
other = self.METHOD_NAME(other)
return self.tuple == other.tuple
def __ne__(self, other):
other = self.METHOD_NAME(other)
return self.tuple != other.tuple
def __lt__(self, other):
other = self.METHOD_NAME(other)
return self.tuple < other.tuple
def __le__(self, other):
other = self.METHOD_NAME(other)
return self.tuple <= other.tuple
def __gt__(self, other):
other = self.METHOD_NAME(other)
return self.tuple > other.tuple
def __ge__(self, other):
other = self.METHOD_NAME(other)
return self.tuple >= other.tuple
def __hash__(self) -> int:
return hash(self.tuple)
def match(self, other_version) -> bool:
"""Returns True if other_version matches.
Args:
other_version: string, of the form "x[.y[.x]]" where {x,y,z} can be a
number or a wildcard.
"""
major, minor, patch = _str_to_version(other_version, allow_wildcard=True)
return (
major in [self.major, "*"]
and minor in [self.minor, "*"]
and patch in [self.patch, "*"]
)
@classmethod
def is_valid(cls, version: str) -> bool:
"""Returns True if the version can be parsed."""
try:
return cls(version) and True
except ValueError: # Invalid version (ex: incomplete data dir)
return False
def _str_to_version(
version_str: str, allow_wildcard=False
) -> Tuple[Union[int, str], Union[int, str], Union[int, str]]:
"""Return the tuple (major, minor, patch) version extracted from the str."""
if not isinstance(version_str, str):
raise TypeError(
"Can only convert strings to versions. "
f"Got: {type(version_str)} with value {version_str}."
)
reg = _VERSION_WILDCARD_REG if allow_wildcard else _VERSION_RESOLVED_REG
res = reg.match(version_str)
if not res:
msg = "Invalid version '{}'. Format should be x.y.z".format(version_str)
if allow_wildcard:
msg += " with {x,y,z} being digits or wildcard."
else:
msg += " with {x,y,z} being digits."
raise ValueError(msg)
return tuple(
v if v == "*" else int(v) # pylint:disable=g-complex-comprehension
for v in [res.group("major"), res.group("minor"), res.group("patch")]
)
def list_all_versions(root_dir: epath.PathLike) -> List[Version]:
"""Lists all dataset versions present on disk, sorted."""
root_dir = epath.Path(root_dir)
versions = []
try:
for path in root_dir.iterdir():
if Version.is_valid(path.name) and path.is_dir():
versions.append(Version(path.name))
except OSError:
return versions
return sorted(versions) | null |
115 | #!/usr/bin/env python3
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
"""
Project:
glideinWMS
Description:
unit tests for glideinwms/lib/condorExe.py
Author:
tiradani <tiradani>
"""
import os
import sys
import unittest
import xmlrunner
from glideinwms.lib import condorExe
from glideinwms.lib.condorExe import exe_cmd, exe_cmd_sbin, ExeError, iexe_cmd
class TestCondorExe(unittest.TestCase):
def setUp(self):
# set the paths to the worker script directory for the purposes of our
# unittests
condorExe.condor_bin_path = os.path.join(sys.path[0], "worker_scripts")
condorExe.condor_sbin_path = condorExe.condor_bin_path
self.normal_exit_scripts = ["write_exit_0.sh", "write_exit_0.py"]
self.abnormal_exit_scripts = ["write_exit_1.sh", "write_exit_1.py"]
# exe_cmd and exe_cmd_sbin expect args but the worker scripts don't
# nor do they care, so just add some dummy args to complete the calls
self.dummy_args = "blah"
def test_iexe_cmd(self):
"""
Test the iexe_cmd function for errors. There are two sets of worker
functions that will be executed. The first set writes 20k lines to
stdout and exits normally (exit code: 0). The second also writes 20k
lines to stdout, but these exit abnormally (exit code: 1). Both sets
of scripts consist of one written in python and one written in shell
script (bash).
The original code for iexe_cmd would block if the buffer was filled and
EOF wasn't in the buffer. The code was re-written to handle that use
case, but still blocked because the write side could still fill the
buffer without appending EOF. There are two solutions. One, give the
buffer read command a ridiculously small buffer size (but even that
isn't a guarantee since the read side doesn't know hat the buffer size
should be), or two, make the read buffers non-blocking.
Option two was selected. This unittest tests both the blocking
condition and error handling in the function.
"""
# Execution should proceed normally and exit with no exceptions.
try:
for script in self.normal_exit_scripts:
cmd = os.path.join(condorExe.condor_bin_path, script)
output = iexe_cmd(cmd)
except Exception as e:
self.fail("Exception Occurred: %s" % str(e))
# Execution should exit with an exception. If no exception, then fail
for script in self.abnormal_exit_scripts:
cmd = os.path.join(condorExe.condor_bin_path, script)
self.assertRaises(ExeError, iexe_cmd, cmd)
def test_exe_cmd(self):
"""
exe_cmd is a wrapper for iexe_cmd. See test_iexe_cmd docstring for
full details.
"""
# Execution should proceed normally and exit with no exceptions.
try:
for script in self.normal_exit_scripts:
output = exe_cmd(script, self.dummy_args)
except Exception as e:
self.fail("Exception Occurred: %s" % str(e))
# Execution should exit with an exception. If no exception, then fail
for script in self.abnormal_exit_scripts:
self.assertRaises(ExeError, exe_cmd, script, self.dummy_args)
def METHOD_NAME(self):
"""
exe_cmd_sbin is a wrapper for iexe_cmd. See test_iexe_cmd docstring
for full details.
"""
# Execution should proceed normally and exit with no exceptions.
try:
for script in self.normal_exit_scripts:
output = exe_cmd_sbin(script, self.dummy_args)
except Exception as e:
self.fail("Exception Occurred: %s" % str(e))
# Execution should exit with an exception. If no exception, then fail
for script in self.abnormal_exit_scripts:
self.assertRaises(ExeError, exe_cmd_sbin, script, self.dummy_args)
if __name__ == "__main__":
unittest.main(testRunner=xmlrunner.XMLTestRunner(output="unittests-reports")) | null |
116 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from typing import Iterator
import torch
from torch import Tensor, autograd, nn
from torch.nn.modules.loss import _Loss
from torch.optim.optimizer import Optimizer
from archai.common import ml_utils
from archai.common.config import Config
from archai.common.utils import zip_eq
from archai.supergraph.nas.model import Model
def _flatten_concate(xs):
"""
flatten all tensor from [d1,d2,...dn] to [d]
and then concat all [d_1] to [d_1+d_2+d_3+...]
:param xs:
:return:
"""
return torch.cat([x.view(-1) for x in xs])
def METHOD_NAME(model:Model)->Iterator[nn.Parameter]:
return model.all_owned().param_by_kind('alphas')
def _get_loss(model:Model, lossfn, x, y):
logits, *_ = model(x) # might also return aux tower logits
return lossfn(logits, y)
class BilevelOptimizer:
def __init__(self, conf_alpha_optim:Config, w_momentum: float, w_decay: float,
model: Model, lossfn: _Loss) -> None:
self._w_momentum = w_momentum # momentum for w
self._w_weight_decay = w_decay # weight decay for w
self._lossfn = lossfn
self._model = model # main model with respect to w and alpha
self._alphas = list(METHOD_NAME(self._model))
# this is the optimizer to optimize alphas parameter
self._alpha_optim = ml_utils.create_optimizer(conf_alpha_optim, self._alphas)
def state_dict(self)->dict:
return {
'alpha_optim': self._alpha_optim.state_dict()
}
def load_state_dict(self, state_dict)->None:
self._alpha_optim.load_state_dict(state_dict['alpha_optim'])
def _unrolled_model(self, x, y, lr: float, main_optim: Optimizer)->Model:
# TODO: should this loss be stored for later use?
loss = _get_loss(self._model, self._lossfn, x, y)
params = _flatten_concate(self._model.parameters()).detach()
try:
moment = _flatten_concate(main_optim.state[v]['momentum_buffer'] for v in self._model.parameters())
moment.mul_(self._w_momentum)
except:
moment = torch.zeros_like(params)
# flatten all gradients
grads = _flatten_concate(autograd.grad(loss, self._model.parameters())).data
# indeed, here we implement a simple SGD with momentum and weight decay
# theta = theta - eta * (moment + weight decay + dtheta)
params = params.sub(lr, moment + grads + self._w_weight_decay*params)
# construct a new model
return self._params2model(params)
def _params2model(self, params)->Model:
"""
construct a new model with initialized weight from params
it use .state_dict() and load_state_dict() instead of
.parameters() + fill_()
:params: flatten weights, need to reshape to original shape
:return:
"""
params_d, offset = {}, 0
for k, v in self._model.named_parameters():
v_length = v.numel()
# restore params[] value to original shape
params_d[k] = params[offset: offset + v_length].view(v.size())
offset += v_length
assert offset == len(params)
model_new = copy.deepcopy(self._model)
model_dict = self._model.state_dict()
model_dict.update(params_d)
model_new.load_state_dict(model_dict)
return model_new.cuda()
def step(self, x_train: Tensor, y_train: Tensor, x_valid: Tensor, y_valid: Tensor,
main_optim: Optimizer) -> None:
# TODO: unlike darts paper, we get lr from optimizer insead of scheduler
lr = main_optim.param_groups[0]['lr']
self._alpha_optim.zero_grad()
# compute the gradient and write it into tensor.grad
# instead of generated by loss.backward()
self._backward_bilevel(x_train, y_train, x_valid, y_valid,
lr, main_optim)
# at this point we should have model with updated gradients for w and alpha
self._alpha_optim.step()
def _backward_bilevel(self, x_train, y_train, x_valid, y_valid, lr, main_optim):
""" Compute unrolled loss and backward its gradients """
# update vmodel with w', but leave alphas as-is
# w' = w - lr * grad
unrolled_model = self._unrolled_model(x_train, y_train, lr, main_optim)
# compute loss on validation set for model with w'
# wrt alphas. The autograd.grad is used instead of backward()
# to avoid having to loop through params
vloss = _get_loss(unrolled_model, self._lossfn, x_valid, y_valid)
vloss.backward()
dalpha = [v.grad for v in METHOD_NAME(unrolled_model)]
dparams = [v.grad.data for v in unrolled_model.parameters()]
hessian = self._hessian_vector_product(dparams, x_train, y_train)
# dalpha we have is from the unrolled model so we need to
# transfer those grades back to our main model
# update final gradient = dalpha - xi*hessian
# TODO: currently alphas lr is same as w lr
with torch.no_grad():
for alpha, da, h in zip_eq(self._alphas, dalpha, hessian):
alpha.grad = da - lr*h
# now that model has both w and alpha grads,
# we can run main_optim.step() to update the param values
def _hessian_vector_product(self, dw, x, y, epsilon_unit=1e-2):
"""
Implements equation 8
dw = dw` {L_val(w`, alpha)}
w+ = w + eps * dw
w- = w - eps * dw
hessian = (dalpha {L_trn(w+, alpha)} -dalpha {L_trn(w-, alpha)})/(2*eps)
eps = 0.01 / ||dw||
"""
"""scale epsilon with grad magnitude. The dw
is a multiplier on RHS of eq 8. So this scalling is essential
in making sure that finite differences approximation is not way off
Below, we flatten each w, concate all and then take norm"""
# TODO: is cat along dim 0 correct?
dw_norm = torch.cat([w.view(-1) for w in dw]).norm()
epsilon = epsilon_unit / dw_norm
# w+ = w + epsilon * grad(w')
with torch.no_grad():
for p, v in zip_eq(self._model.parameters(), dw):
p += epsilon * v
# Now that we have model with w+, we need to compute grads wrt alphas
# This loss needs to be on train set, not validation set
loss = _get_loss(self._model, self._lossfn, x, y)
dalpha_plus = autograd.grad(
loss, self._alphas) # dalpha{L_trn(w+)}
# get model with w- and then compute grads wrt alphas
# w- = w - eps*dw`
with torch.no_grad():
for p, v in zip_eq(self._model.parameters(), dw):
# we had already added dw above so sutracting twice gives w-
p -= 2. * epsilon * v
# similarly get dalpha_minus
loss = _get_loss(self._model, self._lossfn, x, y)
dalpha_minus = autograd.grad(loss, self._alphas)
# reset back params to original values by adding dw
with torch.no_grad():
for p, v in zip_eq(self._model.parameters(), dw):
p += epsilon * v
# apply eq 8, final difference to compute hessian
h = [(p - m) / (2. * epsilon)
for p, m in zip_eq(dalpha_plus, dalpha_minus)]
return h | null |
117 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class AddCustomLiveStreamTranscodeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'AddCustomLiveStreamTranscode','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResWithSource(self): # String
return self.get_query_params().get('ResWithSource')
def set_ResWithSource(self, ResWithSource): # String
self.add_query_param('ResWithSource', ResWithSource)
def get_Gop(self): # String
return self.get_query_params().get('Gop')
def set_Gop(self, Gop): # String
self.add_query_param('Gop', Gop)
def get_AudioCodec(self): # String
return self.get_query_params().get('AudioCodec')
def set_AudioCodec(self, AudioCodec): # String
self.add_query_param('AudioCodec', AudioCodec)
def get_KmsUID(self): # String
return self.get_query_params().get('KmsUID')
def set_KmsUID(self, KmsUID): # String
self.add_query_param('KmsUID', KmsUID)
def get_Height(self): # Integer
return self.get_query_params().get('Height')
def set_Height(self, Height): # Integer
self.add_query_param('Height', Height)
def get_App(self): # String
return self.get_query_params().get('App')
def set_App(self, App): # String
self.add_query_param('App', App)
def get_Profile(self): # Integer
return self.get_query_params().get('Profile')
def set_Profile(self, Profile): # Integer
self.add_query_param('Profile', Profile)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ExtWithSource(self): # String
return self.get_query_params().get('ExtWithSource')
def set_ExtWithSource(self, ExtWithSource): # String
self.add_query_param('ExtWithSource', ExtWithSource)
def METHOD_NAME(self): # String
return self.get_query_params().get('BitrateWithSource')
def set_BitrateWithSource(self, BitrateWithSource): # String
self.add_query_param('BitrateWithSource', BitrateWithSource)
def get_Domain(self): # String
return self.get_query_params().get('Domain')
def set_Domain(self, Domain): # String
self.add_query_param('Domain', Domain)
def get_Template(self): # String
return self.get_query_params().get('Template')
def set_Template(self, Template): # String
self.add_query_param('Template', Template)
def get_Lazy(self): # String
return self.get_query_params().get('Lazy')
def set_Lazy(self, Lazy): # String
self.add_query_param('Lazy', Lazy)
def get_KmsKeyExpireInterval(self): # String
return self.get_query_params().get('KmsKeyExpireInterval')
def set_KmsKeyExpireInterval(self, KmsKeyExpireInterval): # String
self.add_query_param('KmsKeyExpireInterval', KmsKeyExpireInterval)
def get_TemplateType(self): # String
return self.get_query_params().get('TemplateType')
def set_TemplateType(self, TemplateType): # String
self.add_query_param('TemplateType', TemplateType)
def get_AudioProfile(self): # String
return self.get_query_params().get('AudioProfile')
def set_AudioProfile(self, AudioProfile): # String
self.add_query_param('AudioProfile', AudioProfile)
def get_EncryptParameters(self): # String
return self.get_query_params().get('EncryptParameters')
def set_EncryptParameters(self, EncryptParameters): # String
self.add_query_param('EncryptParameters', EncryptParameters)
def get_AudioChannelNum(self): # Integer
return self.get_query_params().get('AudioChannelNum')
def set_AudioChannelNum(self, AudioChannelNum): # Integer
self.add_query_param('AudioChannelNum', AudioChannelNum)
def get_FPS(self): # Integer
return self.get_query_params().get('FPS')
def set_FPS(self, FPS): # Integer
self.add_query_param('FPS', FPS)
def get_AudioRate(self): # Integer
return self.get_query_params().get('AudioRate')
def set_AudioRate(self, AudioRate): # Integer
self.add_query_param('AudioRate', AudioRate)
def get_FpsWithSource(self): # String
return self.get_query_params().get('FpsWithSource')
def set_FpsWithSource(self, FpsWithSource): # String
self.add_query_param('FpsWithSource', FpsWithSource)
def get_AudioBitrate(self): # Integer
return self.get_query_params().get('AudioBitrate')
def set_AudioBitrate(self, AudioBitrate): # Integer
self.add_query_param('AudioBitrate', AudioBitrate)
def get_Width(self): # Integer
return self.get_query_params().get('Width')
def set_Width(self, Width): # Integer
self.add_query_param('Width', Width)
def get_VideoBitrate(self): # Integer
return self.get_query_params().get('VideoBitrate')
def set_VideoBitrate(self, VideoBitrate): # Integer
self.add_query_param('VideoBitrate', VideoBitrate)
def get_KmsKeyID(self): # String
return self.get_query_params().get('KmsKeyID')
def set_KmsKeyID(self, KmsKeyID): # String
self.add_query_param('KmsKeyID', KmsKeyID) | null |
118 | import warnings
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Optional, Sequence, Union
import numpy as np
from lhotse.features.base import FeatureExtractor, register_extractor
from lhotse.utils import Seconds, compute_num_frames, is_module_available
@dataclass
class OpenSmileConfig:
"""
OpenSmile configs are stored in separated txt files in its specific format.
You can specify predefined config by setting ``feature_set`` and ``feature_level``
class attributes with:
(1) ``FeatureSet`` and ``FeatureLevel`` classes predefined in
https://github.com/audeering/opensmile-python/blob/master/opensmile/core/define.py
OR
(2) strings refered to enum members,
In opensmile-python You can also create your own config file and pass its path and
corresponding feature level as documented here
https://audeering.github.io/opensmile-python/usage.html#custom-config.
For now custom configs are not supported in this extractor.
"""
feature_set: Union[str, Any] = "ComParE_2016" # default feature set or
# string with set name
feature_level: Union[str, Any] = "lld" # default feature level or level name
options: Optional[dict] = None # dictionary with optional script parameters
loglevel: int = 2 # log level (0-5), the higher the number the more log
# messages are given
logfile: Optional[str] = None # if not ``None`` log messages will be
# stored to this file
sampling_rate: Optional[int] = None # If ``None`` it will call ``process_func``
# with the actual sampling rate of the signal.
channels: Union[int, Sequence[int]] = 0
mixdown: bool = False # apply mono mix-down on selection
resample: bool = False # if ``True`` enforces given sampling rate by resampling
num_workers: Optional[int] = 1 # number of parallel jobs or 1 for sequential
# processing. If ``None`` will be set to the number of processors
verbose: bool = False # show debug messages
def METHOD_NAME(self) -> Dict[str, Any]:
return asdict(self)
@staticmethod
def from_dict(data: Dict[str, Any]) -> "OpenSmileConfig":
return OpenSmileConfig(**data)
@staticmethod
def featuresets_names():
"""
Returns list of strings with names of pretrained FeatureSets available in opensmile.
"""
assert is_module_available(
"opensmile"
), 'To use opensmile extractors, please "pip install opensmile" first.'
import opensmile
return list(opensmile.FeatureSet.__members__)
@register_extractor
class OpenSmileExtractor(FeatureExtractor):
"""Wrapper for extraction of features implemented in OpenSmile."""
name = "opensmile-extractor"
config_type = OpenSmileConfig
def __init__(self, config: Optional[Any] = None):
super().__init__(config=config)
assert is_module_available(
"opensmile"
), 'To use opensmile extractors, please "pip install opensmile" first.'
import opensmile
if isinstance(self.config.feature_set, str):
self.feature_set = opensmile.FeatureSet[self.config.feature_set]
else:
self.feature_set = self.config.feature_set
self.feature_level = opensmile.FeatureLevel(self.config.feature_level)
self.smileExtractor = opensmile.Smile(
feature_set=self.feature_set,
feature_level=self.feature_level,
sampling_rate=self.config.sampling_rate,
options=self.config.options,
loglevel=self.config.loglevel,
logfile=self.config.logfile,
channels=self.config.channels,
mixdown=self.config.mixdown,
resample=self.config.resample,
num_workers=self.config.num_workers,
verbose=self.config.verbose,
)
@property
def feature_names(self) -> List[str]:
return self.smileExtractor.feature_names
def is_lld_or_lld_de(self) -> bool:
from opensmile import FeatureLevel
return (
self.feature_level is FeatureLevel.LowLevelDescriptors
or self.feature_level is FeatureLevel.LowLevelDescriptors_Deltas
)
@property
def frame_shift(self) -> Seconds:
import opensmile
if (
self.is_lld_or_lld_de()
and self.feature_set in opensmile.FeatureSet.__members__.values()
):
# For all deafult opensmile configs frameshift is equal to 10 ms
return 0.01
else:
raise NotImplementedError(
f"frame_shift is not defined for Functionals feature level or for non default feature set. Defined featureset: {self.config.feature_set}"
)
def feature_dim(self, sampling_rate: int) -> int:
return len(self.feature_names)
def feature_names(self) -> List[str]:
return self.smileExtractor.feature_names()
def extract(self, samples: np.ndarray, sampling_rate: int) -> np.ndarray:
if (
self.config.sampling_rate is not None
and self.config.sampling_rate != sampling_rate
):
raise ValueError(
f"Given sampling rate ({sampling_rate}) mismatched with the value set in OpenSmileConfig ({self.config.sampling_rate})."
)
import opensmile
feats = self.smileExtractor.process_signal(
samples, sampling_rate=sampling_rate
).to_numpy()
if self.is_lld_or_lld_de():
feats = self._pad_frames(samples, feats, sampling_rate)
return feats.copy()
def _pad_frames(
self, samples: np.ndarray, feats: np.ndarray, sampling_rate: int
) -> np.ndarray:
"""Adds last diff frames to the end of feats matrix to fit lhotse.utils.compute_num_frames."""
duration = np.shape(samples)[1] / sampling_rate
diff = (
compute_num_frames(duration, self.frame_shift, sampling_rate)
- np.shape(feats)[0]
)
if abs(diff) >= 6:
warnings.warn(f"Unusual difference in number of frames: {diff}")
if diff > 0:
feats = np.append(feats, feats[-diff:, :], axis=0)
elif diff < 0:
feats = feats[:-diff, :]
return feats | null |
119 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class CreateRouterInterfaceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'CreateRouterInterface','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AccessPointId(self): # String
return self.get_query_params().get('AccessPointId')
def set_AccessPointId(self, AccessPointId): # String
self.add_query_param('AccessPointId', AccessPointId)
def get_OppositeAccessPointId(self): # String
return self.get_query_params().get('OppositeAccessPointId')
def set_OppositeAccessPointId(self, OppositeAccessPointId): # String
self.add_query_param('OppositeAccessPointId', OppositeAccessPointId)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Spec(self): # String
return self.get_query_params().get('Spec')
def set_Spec(self, Spec): # String
self.add_query_param('Spec', Spec)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Period(self): # Integer
return self.get_query_params().get('Period')
def set_Period(self, Period): # Integer
self.add_query_param('Period', Period)
def get_OppositeRegionId(self): # String
return self.get_query_params().get('OppositeRegionId')
def set_OppositeRegionId(self, OppositeRegionId): # String
self.add_query_param('OppositeRegionId', OppositeRegionId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_OppositeInterfaceOwnerId(self): # String
return self.get_query_params().get('OppositeInterfaceOwnerId')
def set_OppositeInterfaceOwnerId(self, OppositeInterfaceOwnerId): # String
self.add_query_param('OppositeInterfaceOwnerId', OppositeInterfaceOwnerId)
def get_Tagss(self): # RepeatList
return self.get_query_params().get('Tags')
def set_Tagss(self, Tags): # RepeatList
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tags.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tags.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_AutoRenew(self): # Boolean
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self, AutoRenew): # Boolean
self.add_query_param('AutoRenew', AutoRenew)
def get_OppositeRouterType(self): # String
return self.get_query_params().get('OppositeRouterType')
def set_OppositeRouterType(self, OppositeRouterType): # String
self.add_query_param('OppositeRouterType', OppositeRouterType)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_PricingCycle(self): # String
return self.get_query_params().get('PricingCycle')
def set_PricingCycle(self, PricingCycle): # String
self.add_query_param('PricingCycle', PricingCycle)
def get_OppositeRouterId(self): # String
return self.get_query_params().get('OppositeRouterId')
def set_OppositeRouterId(self, OppositeRouterId): # String
self.add_query_param('OppositeRouterId', OppositeRouterId)
def get_Role(self): # String
return self.get_query_params().get('Role')
def set_Role(self, Role): # String
self.add_query_param('Role', Role)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_HealthCheckTargetIp(self): # String
return self.get_query_params().get('HealthCheckTargetIp')
def set_HealthCheckTargetIp(self, HealthCheckTargetIp): # String
self.add_query_param('HealthCheckTargetIp', HealthCheckTargetIp)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_FastLinkMode(self): # Boolean
return self.get_query_params().get('FastLinkMode')
def set_FastLinkMode(self, FastLinkMode): # Boolean
self.add_query_param('FastLinkMode', FastLinkMode)
def get_OppositeInterfaceId(self): # String
return self.get_query_params().get('OppositeInterfaceId')
def set_OppositeInterfaceId(self, OppositeInterfaceId): # String
self.add_query_param('OppositeInterfaceId', OppositeInterfaceId)
def get_InstanceChargeType(self): # String
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self, InstanceChargeType): # String
self.add_query_param('InstanceChargeType', InstanceChargeType)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def METHOD_NAME(self): # String
return self.get_query_params().get('RouterType')
def set_RouterType(self, RouterType): # String
self.add_query_param('RouterType', RouterType)
def get_HealthCheckSourceIp(self): # String
return self.get_query_params().get('HealthCheckSourceIp')
def set_HealthCheckSourceIp(self, HealthCheckSourceIp): # String
self.add_query_param('HealthCheckSourceIp', HealthCheckSourceIp)
def get_RouterId(self): # String
return self.get_query_params().get('RouterId')
def set_RouterId(self, RouterId): # String
self.add_query_param('RouterId', RouterId) | null |
120 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class CreateOTADynamicUpgradeJobRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'CreateOTADynamicUpgradeJob')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_DynamicMode(self):
return self.get_query_params().get('DynamicMode')
def set_DynamicMode(self,DynamicMode):
self.add_query_param('DynamicMode',DynamicMode)
def get_MultiModuleMode(self):
return self.get_query_params().get('MultiModuleMode')
def set_MultiModuleMode(self,MultiModuleMode):
self.add_query_param('MultiModuleMode',MultiModuleMode)
def get_RetryCount(self):
return self.get_query_params().get('RetryCount')
def set_RetryCount(self,RetryCount):
self.add_query_param('RetryCount',RetryCount)
def get_TimeoutInMinutes(self):
return self.get_query_params().get('TimeoutInMinutes')
def set_TimeoutInMinutes(self,TimeoutInMinutes):
self.add_query_param('TimeoutInMinutes',TimeoutInMinutes)
def get_NeedConfirm(self):
return self.get_query_params().get('NeedConfirm')
def set_NeedConfirm(self,NeedConfirm):
self.add_query_param('NeedConfirm',NeedConfirm)
def get_GroupType(self):
return self.get_query_params().get('GroupType')
def set_GroupType(self,GroupType):
self.add_query_param('GroupType',GroupType)
def get_NeedPush(self):
return self.get_query_params().get('NeedPush')
def set_NeedPush(self,NeedPush):
self.add_query_param('NeedPush',NeedPush)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_DownloadProtocol(self):
return self.get_query_params().get('DownloadProtocol')
def set_DownloadProtocol(self,DownloadProtocol):
self.add_query_param('DownloadProtocol',DownloadProtocol)
def get_Tags(self):
return self.get_query_params().get('Tag')
def METHOD_NAME(self, Tags):
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_FirmwareId(self):
return self.get_query_params().get('FirmwareId')
def set_FirmwareId(self,FirmwareId):
self.add_query_param('FirmwareId',FirmwareId)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_RetryInterval(self):
return self.get_query_params().get('RetryInterval')
def set_RetryInterval(self,RetryInterval):
self.add_query_param('RetryInterval',RetryInterval)
def get_SrcVersions(self):
return self.get_query_params().get('SrcVersion')
def set_SrcVersions(self, SrcVersions):
for depth1 in range(len(SrcVersions)):
if SrcVersions[depth1] is not None:
self.add_query_param('SrcVersion.' + str(depth1 + 1) , SrcVersions[depth1])
def get_OverwriteMode(self):
return self.get_query_params().get('OverwriteMode')
def set_OverwriteMode(self,OverwriteMode):
self.add_query_param('OverwriteMode',OverwriteMode)
def get_MaximumPerMinute(self):
return self.get_query_params().get('MaximumPerMinute')
def set_MaximumPerMinute(self,MaximumPerMinute):
self.add_query_param('MaximumPerMinute',MaximumPerMinute | null |
121 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmartag.endpoint import endpoint_data
class CreateSmartAccessGatewayRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Smartag', '2018-03-13', 'CreateSmartAccessGateway','smartag')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ReceiverTown(self): # String
return self.get_query_params().get('ReceiverTown')
def set_ReceiverTown(self, ReceiverTown): # String
self.add_query_param('ReceiverTown', ReceiverTown)
def get_ReceiverDistrict(self): # String
return self.get_query_params().get('ReceiverDistrict')
def METHOD_NAME(self, ReceiverDistrict): # String
self.add_query_param('ReceiverDistrict', ReceiverDistrict)
def get_BuyerMessage(self): # String
return self.get_query_params().get('BuyerMessage')
def set_BuyerMessage(self, BuyerMessage): # String
self.add_query_param('BuyerMessage', BuyerMessage)
def get_ReceiverState(self): # String
return self.get_query_params().get('ReceiverState')
def set_ReceiverState(self, ReceiverState): # String
self.add_query_param('ReceiverState', ReceiverState)
def get_Period(self): # Integer
return self.get_query_params().get('Period')
def set_Period(self, Period): # Integer
self.add_query_param('Period', Period)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ReceiverPhone(self): # String
return self.get_query_params().get('ReceiverPhone')
def set_ReceiverPhone(self, ReceiverPhone): # String
self.add_query_param('ReceiverPhone', ReceiverPhone)
def get_HaType(self): # String
return self.get_query_params().get('HaType')
def set_HaType(self, HaType): # String
self.add_query_param('HaType', HaType)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_ReceiverCountry(self): # String
return self.get_query_params().get('ReceiverCountry')
def set_ReceiverCountry(self, ReceiverCountry): # String
self.add_query_param('ReceiverCountry', ReceiverCountry)
def get_MaxBandWidth(self): # Integer
return self.get_query_params().get('MaxBandWidth')
def set_MaxBandWidth(self, MaxBandWidth): # Integer
self.add_query_param('MaxBandWidth', MaxBandWidth)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_ReceiverAddress(self): # String
return self.get_query_params().get('ReceiverAddress')
def set_ReceiverAddress(self, ReceiverAddress): # String
self.add_query_param('ReceiverAddress', ReceiverAddress)
def get_HardWareSpec(self): # String
return self.get_query_params().get('HardWareSpec')
def set_HardWareSpec(self, HardWareSpec): # String
self.add_query_param('HardWareSpec', HardWareSpec)
def get_ReceiverEmail(self): # String
return self.get_query_params().get('ReceiverEmail')
def set_ReceiverEmail(self, ReceiverEmail): # String
self.add_query_param('ReceiverEmail', ReceiverEmail)
def get_ReceiverCity(self): # String
return self.get_query_params().get('ReceiverCity')
def set_ReceiverCity(self, ReceiverCity): # String
self.add_query_param('ReceiverCity', ReceiverCity)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def get_CPEVersion(self): # String
return self.get_query_params().get('CPEVersion')
def set_CPEVersion(self, CPEVersion): # String
self.add_query_param('CPEVersion', CPEVersion)
def get_ReceiverMobile(self): # String
return self.get_query_params().get('ReceiverMobile')
def set_ReceiverMobile(self, ReceiverMobile): # String
self.add_query_param('ReceiverMobile', ReceiverMobile)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_ReceiverName(self): # String
return self.get_query_params().get('ReceiverName')
def set_ReceiverName(self, ReceiverName): # String
self.add_query_param('ReceiverName', ReceiverName)
def get_AlreadyHaveSag(self): # Boolean
return self.get_query_params().get('AlreadyHaveSag')
def set_AlreadyHaveSag(self, AlreadyHaveSag): # Boolean
self.add_query_param('AlreadyHaveSag', AlreadyHaveSag)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType)
def get_ReceiverZip(self): # String
return self.get_query_params().get('ReceiverZip')
def set_ReceiverZip(self, ReceiverZip): # String
self.add_query_param('ReceiverZip', ReceiverZip) | null |
122 | #!/usr/bin/env python3
###############################################################################
# Copyright 2020 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
This module provides the preprocessing function of vehicle calibration data
"""
import os
import re
import shutil
import time
from absl import app
from absl import flags
from absl import logging
from datetime import datetime
from cyber.python.cyber_py3 import cyber
from modules.dreamview.proto import preprocess_table_pb2
from modules.tools.vehicle_calibration.sanity_check import sanity_check
flags.DEFINE_string('vehicle_type', '', 'The vehicle type to be calibrated')
flags.DEFINE_string('data_path', '/apollo/output', 'Default output data path')
flags.DEFINE_string('calibration_data_path',
'/apollo/modules/calibration/data',
'Default vehicle configuration file directory')
flags.DEFINE_string('config_file_name', 'vehicle_param.pb.txt',
'Default vehicle configuration file name')
flags.DEFINE_string('record_root_path', '/apollo/data/bag',
'Default record root path')
flags.DEFINE_integer(
'record_num', 1, 'The number of record folders '
'required for this calibration task')
FLAGS = flags.FLAGS
def main(argv):
cyber.init("Preprocessor")
preprocessor = Preprocessor()
task_dir = preprocessor.create_tree()
preprocessor.sanity_check_path(task_dir)
cyber.shutdown()
class Preprocessor(object):
def __init__(self):
self.record_num = FLAGS.record_num
self.vehicle_type = self.folder_case(FLAGS.vehicle_type)
self.config_file = self.METHOD_NAME()
self.node = cyber.Node("vehicle_calibration_preprocessor")
self.writer = self.node.create_writer("/apollo/dreamview/progress",
preprocess_table_pb2.Progress,
10)
self.progress = preprocess_table_pb2.Progress()
self.progress.percentage = 0.0
self.progress.log_string = "Press the button to start preprocessing"
@staticmethod
def folder_case(str):
"""Convert a string from title case to folder case"""
return "_".join(str.lower().split(" "))
def create_if_not_exists(self, path):
"""Create dir if path does not exists"""
try:
if not os.path.exists(path):
os.makedirs(path)
self.log_and_publish(f'Sucessfully created {path}')
except OSError:
self.log_and_publish(f'Failed to create: {path}', 'error')
return path
def METHOD_NAME(self):
"""Get the configuration file of the specified vehicle type"""
return os.path.join(FLAGS.calibration_data_path, self.vehicle_type,
FLAGS.config_file_name)
def get_records_info(self):
"""Get records required for calibration"""
res = []
for dir in os.listdir(FLAGS.record_root_path):
match = re.match(r'(^\d{4}-\d{2}-\d{2})-(\d{2}-\d{2}-\d{2}_s$)',
dir)
if match is not None:
record_info = {}
record_info['rel_path'] = match.group()
record_info['abs_path'] = os.path.join(FLAGS.record_root_path,
match.group())
record_info['prefix'] = match.group(1)
res.append(record_info)
if len(res) < self.record_num:
self.log_and_publish(
f'The number of records in {FLAGS.record_root_path} '
f'is less than {self.record_num}', 'error')
res = sorted(res, key=lambda record: record['rel_path'],
reverse=True)[:self.record_num]
return res
def log_and_publish(self,
str,
logging_level="info",
status=preprocess_table_pb2.Status.UNKNOWN):
"""Publish the str by cyber writer"""
if logging_level == 'info':
logging.info(str)
elif logging_level == 'warn':
logging.warn(str)
elif logging_level == 'error':
logging.error(str)
elif logging_level == 'fatal':
logging.fatal(str)
else:
logging.info(str)
self.progress.log_string = str
self.progress.status = status
self.writer.write(self.progress)
time.sleep(0.5)
def create_tree(self):
"""Create file tree according to a specific order"""
task_dir = self.create_if_not_exists(
os.path.join(FLAGS.data_path,
'task' + datetime.now().strftime("-%Y-%m-%d-%H-%M")))
vehicle_dir = self.create_if_not_exists(
os.path.join(task_dir, self.vehicle_type))
records_dir = self.create_if_not_exists(
os.path.join(vehicle_dir, "Records"))
shutil.copy(self.config_file, vehicle_dir)
records_info = self.get_records_info()
finished_records = 0
self.progress.log_string = 'Start preprocessing...'
for iter in records_info:
sub_dir = self.create_if_not_exists(
os.path.join(records_dir, iter['prefix']))
shutil.copytree(iter['abs_path'],
os.path.join(sub_dir, iter['rel_path']))
finished_records += 1
self.progress.percentage = (
finished_records / self.record_num) * 80.0
self.writer.write(self.progress)
self.log_and_publish(
f'The file tree has been successfully created at {task_dir}.')
return task_dir
def sanity_check_path(self, path):
"""Sanity check wrapper"""
result, log_str = sanity_check(path)
if result is True:
self.progress.percentage = 100.0
self.progress.status = preprocess_table_pb2.Status.SUCCESS
else:
self.progress.status = preprocess_table_pb2.Status.FAIL
self.progress.log_string = log_str
self.writer.write(self.progress)
time.sleep(0.5)
if __name__ == "__main__":
app.run(main) | null |
123 | """
This module defines options which should be available on all commands, such as the
-v, --verbose option.
To use it:
- Add the `@add_common_options()` decorator after all the `click.option()` calls of the
command function.
- Add a `**kwargs: Any` argument to the command function.
The `kwargs` argument is required because due to the way click works,
`add_common_options()` adds an argument for each option it defines.
"""
from pathlib import Path
from typing import Any, Callable, Optional, TypeVar, cast
import click
from ggshield.cmd.utils.debug_logs import setup_debug_logs
from ggshield.core.config.user_config import UserConfig
AnyFunction = Callable[..., Any]
# The argument of a Click option callback function
ArgT = TypeVar("ArgT")
# A Click option callback function
ClickCallback = Callable[
[click.Context, click.Parameter, Optional[ArgT]], Optional[ArgT]
]
def get_config_from_context(ctx: click.Context) -> UserConfig:
"""Returns the UserConfig object stored in Click context"""
return cast(UserConfig, ctx.obj["config"].user_config)
def create_ctx_callback(name: str) -> ClickCallback:
"""Helper function to define a Click option callback for simple cases where we only
have to set a value on Click context object if the option is defined.
"""
def callback(
ctx: click.Context, param: click.Parameter, value: Optional[ArgT]
) -> Optional[ArgT]:
if value is not None:
ctx.obj[name] = value
return value
return callback
def create_config_callback(*option_names: str) -> ClickCallback:
"""Helper function to define a Click option callback for simple cases where we only
have to set a configuration attribute if the option is defined.
to reach UserConfig.foo, set option_names to ["foo"]
to reach Userconfig.secret.bar, set option_names to ["secret", "bar"]
"""
def callback(
ctx: click.Context, param: click.Parameter, value: Optional[ArgT]
) -> Optional[ArgT]:
if value is not None:
obj = get_config_from_context(ctx)
for name in option_names[:-1]:
obj = getattr(obj, name)
setattr(obj, option_names[-1], value)
return value
return callback
_verbose_option = click.option(
"-v",
"--verbose",
is_flag=True,
default=None,
help="Verbose display mode.",
callback=create_config_callback("verbose"),
)
def debug_callback(
ctx: click.Context, param: click.Parameter, value: Optional[bool]
) -> Optional[bool]:
if value is not None:
setup_debug_logs(filename=None)
return value
# The --debug option is marked as "is_eager" so that we can setup logs as soon as
# possible. If we don't then log commands for the creation of the Config instance
# are ignored.
_debug_option = click.option(
"--debug",
is_flag=True,
default=None,
is_eager=True,
help="Send log output to stderr. Equivalent to `--log-file -`.",
callback=debug_callback,
)
def METHOD_NAME(
ctx: click.Context, param: click.Parameter, value: Optional[str]
) -> Optional[str]:
if value is not None:
setup_debug_logs(filename=None if value == "-" else value)
return value
# The --log-file option is marked as "is_eager" so that we can setup logs as soon as
# possible. If we don't then log commands for the creation of the Config instance
# are ignored.
_log_file_option = click.option(
"--log-file",
metavar="FILE",
is_eager=True,
help="Send log output to FILE. Use '-' to redirect to stderr.",
envvar="GITGUARDIAN_LOG_FILE",
callback=METHOD_NAME,
)
_allow_self_signed_option = click.option(
"--allow-self-signed",
is_flag=True,
default=None,
help="Ignore ssl verification.",
callback=create_config_callback("allow_self_signed"),
)
_check_for_updates = click.option(
"--check-for-updates/--no-check-for-updates",
is_flag=True,
default=None,
help="After executing commands, check if a new version of ggshield is available.",
callback=create_ctx_callback("check_for_updates"),
)
exit_zero_option = click.option(
"--exit-zero",
is_flag=True,
default=None,
envvar="GITGUARDIAN_EXIT_ZERO",
help="Always return a 0 (non-error) status code, even if incidents are found."
"The env var GITGUARDIAN_EXIT_ZERO can also be used to set this option.",
callback=create_config_callback("exit_zero"),
)
minimum_severity_option = click.option(
"--minimum-severity",
"minimum_severity",
type=click.Choice(("LOW", "MEDIUM", "HIGH", "CRITICAL")),
help="Minimum severity of the policies.",
)
ignore_path_option = click.option(
"--ignore-path",
"--ipa",
"ignore_paths",
default=None,
multiple=True,
help="Do not scan paths that match the specified glob-like patterns.",
)
def add_common_options() -> Callable[[AnyFunction], AnyFunction]:
def decorator(cmd: AnyFunction) -> AnyFunction:
_verbose_option(cmd)
_debug_option(cmd)
_log_file_option(cmd)
_allow_self_signed_option(cmd)
_check_for_updates(cmd)
return cmd
return decorator
json_option = click.option(
"--json",
"json_output",
is_flag=True,
default=None,
help="Use JSON output.",
callback=create_ctx_callback("use_json"),
)
def use_json(ctx: click.Context) -> bool:
"""Tells whether --json has been set"""
return bool(ctx.obj.get("use_json", False))
directory_argument = click.argument(
"directory",
type=click.Path(exists=True, readable=True, path_type=Path, file_okay=False),
required=False,
# using a default value here makes the deprecated `iac scan` fail
)
all_option = click.option(
"--all",
"scan_all",
is_flag=True,
default=False,
help="Reports all vulnerabilities in the final state.",
)
reference_option = click.option(
"--ref",
required=True,
type=click.STRING,
help="A git reference.",
)
staged_option = click.option(
"--staged",
is_flag=True,
help="Whether staged changes should be included into the scan.",
) | null |
124 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdknlb.endpoint import endpoint_data
class CreateLoadBalancerRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Nlb', '2022-04-30', 'CreateLoadBalancer','nlb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientToken(self): # String
return self.get_body_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_body_params('ClientToken', ClientToken)
def get_ModificationProtectionConfig(self): # Struct
return self.get_body_params().get('ModificationProtectionConfig')
def set_ModificationProtectionConfig(self, ModificationProtectionConfig): # Struct
if ModificationProtectionConfig.get('Status') is not None:
self.add_body_params('ModificationProtectionConfig.Status', ModificationProtectionConfig.get('Status'))
if ModificationProtectionConfig.get('Reason') is not None:
self.add_body_params('ModificationProtectionConfig.Reason', ModificationProtectionConfig.get('Reason'))
def get_LoadBalancerBillingConfig(self): # Struct
return self.get_body_params().get('LoadBalancerBillingConfig')
def METHOD_NAME(self, LoadBalancerBillingConfig): # Struct
if LoadBalancerBillingConfig.get('PayType') is not None:
self.add_body_params('LoadBalancerBillingConfig.PayType', LoadBalancerBillingConfig.get('PayType'))
def get_DeletionProtectionConfig(self): # Struct
return self.get_body_params().get('DeletionProtectionConfig')
def set_DeletionProtectionConfig(self, DeletionProtectionConfig): # Struct
if DeletionProtectionConfig.get('Enabled') is not None:
self.add_body_params('DeletionProtectionConfig.Enabled', DeletionProtectionConfig.get('Enabled'))
if DeletionProtectionConfig.get('Reason') is not None:
self.add_body_params('DeletionProtectionConfig.Reason', DeletionProtectionConfig.get('Reason'))
def get_AddressIpVersion(self): # String
return self.get_body_params().get('AddressIpVersion')
def set_AddressIpVersion(self, AddressIpVersion): # String
self.add_body_params('AddressIpVersion', AddressIpVersion)
def get_ResourceGroupId(self): # String
return self.get_body_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_body_params('ResourceGroupId', ResourceGroupId)
def get_LoadBalancerName(self): # String
return self.get_body_params().get('LoadBalancerName')
def set_LoadBalancerName(self, LoadBalancerName): # String
self.add_body_params('LoadBalancerName', LoadBalancerName)
def get_AddressType(self): # String
return self.get_body_params().get('AddressType')
def set_AddressType(self, AddressType): # String
self.add_body_params('AddressType', AddressType)
def get_Tags(self): # RepeatList
return self.get_body_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_body_params('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_body_params('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_BandwidthPackageId(self): # String
return self.get_body_params().get('BandwidthPackageId')
def set_BandwidthPackageId(self, BandwidthPackageId): # String
self.add_body_params('BandwidthPackageId', BandwidthPackageId)
def get_DryRun(self): # Boolean
return self.get_body_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_body_params('DryRun', DryRun)
def get_ZoneMappings(self): # Array
return self.get_body_params().get('ZoneMappings')
def set_ZoneMappings(self, ZoneMappings): # Array
for index1, value1 in enumerate(ZoneMappings):
if value1.get('VSwitchId') is not None:
self.add_body_params('ZoneMappings.' + str(index1 + 1) + '.VSwitchId', value1.get('VSwitchId'))
if value1.get('ZoneId') is not None:
self.add_body_params('ZoneMappings.' + str(index1 + 1) + '.ZoneId', value1.get('ZoneId'))
if value1.get('PrivateIPv4Address') is not None:
self.add_body_params('ZoneMappings.' + str(index1 + 1) + '.PrivateIPv4Address', value1.get('PrivateIPv4Address'))
if value1.get('AllocationId') is not None:
self.add_body_params('ZoneMappings.' + str(index1 + 1) + '.AllocationId', value1.get('AllocationId'))
def get_LoadBalancerType(self): # String
return self.get_body_params().get('LoadBalancerType')
def set_LoadBalancerType(self, LoadBalancerType): # String
self.add_body_params('LoadBalancerType', LoadBalancerType)
def get_VpcId(self): # String
return self.get_body_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_body_params('VpcId', VpcId) | null |
125 | # OPENCORE - ADD
from shared.database.task.job.job import Job
from shared.database.project import Project
from shared.regular import regular_log
from shared.shared_logger import get_shared_logger
from shared.feature_flags.feature_checker import FeatureChecker
from shared.settings import settings
from shared.database.source_control.working_dir import WorkingDirFileLink
from shared.database.annotation.instance import Instance
from shared.database.event.event import Event
import datetime
logger = get_shared_logger()
def METHOD_NAME(export, session):
"""
Returns a string with the final filename for an export.
:param export:
:return:
"""
# TODO (low priority) switch to starting to array with "".join() it
# it's a bit faster and more importantly easier to read / check.
filename = f"_diffgram_annotations__source_{str(export.source)}_"
if export.source == "task":
filename += str(export.task.id)
if export.source == "job":
job = Job.get_by_id(
session=session,
job_id=export.job_id)
if job:
filename += str(job.name)
if export.source == "directory":
filename += str(export.working_dir.nickname)
# Always add timestamps to avoid duplicate names.
filename += f"_datetime_{datetime.datetime.utcnow().isoformat()}"
filename = filename.replace(":", "-")
return filename
def has_project_permissions_for_export(export, project_string_id, session):
log = regular_log.default()
project = Project.get(session, project_string_id)
# Theory is that if a user has access to project
# They have access to download from project
if export.project_id != project.id:
log['error']['project_permissions'] = 'Permission error, invalid project export match'
return log
def is_export_completed(export):
log = regular_log.default()
# Context of exposing this in SDK, and
# it failing if the export is not ready
# note it's "complete" and not "success"
if export.status != "complete":
log['error']['export'] = "Export not ready yet."
log['export'] = export.serialize()
return log
return log
def check_export_permissions_and_status(export, project_string_id, session):
project_perms = has_project_permissions_for_export(export, project_string_id, session)
if regular_log.log_has_error(project_perms):
return project_perms
export_completed_result = is_export_completed(export)
if regular_log.log_has_error(export_completed_result):
return export_completed_result
return regular_log.default()
def check_export_billing(
session,
project,
directory,
member,
log):
"""
"""
logger.info('Checking Limits for Plan')
if settings.ALLOW_STRIPE_BILLING is False:
return log
checker = FeatureChecker(
session = session,
user = member.user,
project = project
)
max_allowed_instances = checker.get_limit_from_plan('MAX_INSTANCES_PER_EXPORT')
if max_allowed_instances is None:
return log
# Careful if it's a large project,
# And no other areas / no billing ID it can hang here ina funny way
# Put limit of 200 as a temp measure for this.
# Free case, could error or success
file_list = WorkingDirFileLink.file_list(
session = session,
working_dir_id = directory.id,
type = "image",
exclude_removed = True,
limit = 200
)
new_instance_count = 0
for file in file_list:
new_instance_count += Instance.list(
session = session,
file_id = file.id,
exclude_removed = True,
return_kind = "count")
logger.info(f"Checking limits for export with {new_instance_count} instances")
if max_allowed_instances:
if new_instance_count > max_allowed_instances:
message = 'Free Tier Limit Reached - Max Instances Allowed: {}. But Export has {} instances'.format(
max_allowed_instances,
new_instance_count
)
log['error']['over_free_plan_limit'] = True
log['error']['active_instances'] = new_instance_count
log['error']['free_tier_limit'] = message
Event.new(
kind = "export_generation_free_account_over_limit",
session = session,
member = member,
success = False,
project_id = project.id
)
return log
return lo | null |
126 | # Copyright 2020 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from intern.service.service import Service
from enum import IntEnum
import numpy as np
class VoxelUnits(IntEnum):
"""Enum with valid VoxelUnits
"""
nm = 1
um = 1000
mm = 1000000
cm = 10000000
nanometers = 1
micrometers = 1000
millimeters = 1000000
centimeters = 10000000
class MeshService(Service):
""" Partial implementation of intern.service.service.Service for the Meshing' services.
"""
def __init__(self):
"""Constructor
"""
Service.__init__(self)
def set_auth(self):
""" No auth for Meshing
"""
self._auth = None
def create(self, volume,
x_range, y_range, z_range, time_range=None,
id_list=[], voxel_unit=VoxelUnits.nm,
voxel_size=[4,4,40], simp_fact=0, max_simplification_error=60,
normals=False, **kwargs):
"""Generate a mesh of the specified IDs
Args:
volume ([array]): Numpy array volume.
resolution (int): 0 indicates native resolution.
x_range (list[int]): x range such as [10, 20] which means x>=10 and x<20.
y_range (list[int]): y range such as [10, 20] which means y>=10 and y<20.
z_range (list[int]): z range such as [10, 20] which means z>=10 and z<20.
time_range (optional [list[int]]): time range such as [30, 40] which means t>=30 and t<40.
id_list (optional [list]): list of object ids to filter the volume by.
voxel_unit (optional VoxelUnit): voxel unit of measurement to derive conversion factor.
voxel_size (optional [list]): list in form [x,y,z] of voxel size. Defaults to 4x4x40nm
simp_fact (optional int): mesh simplification factor, reduces triangles by given factor
max_simplification_error (optional int): Max tolerable error in physical distance
normals (optional bool): if true will calculate normals
Returns:
(): Return type depends on volume service's implementation.
Raises:
RuntimeError when given invalid resource.
Other exceptions may be raised depending on the volume service's implementation.
"""
from zmesh import Mesher
if np.unique(volume).shape[0] == 1:
raise ValueError("The volume provided only has one unique ID (0). ID 0 is considered background.")
conv_factor = self._get_conversion_factor(voxel_unit)
# Get voxel_sizes
x_voxel_size = float(voxel_size[0]) * conv_factor
y_voxel_size = float(voxel_size[1]) * conv_factor
z_voxel_size = float(voxel_size[2]) * conv_factor
# Mesh
mesher = Mesher((x_voxel_size,y_voxel_size,z_voxel_size))
mesher.mesh(volume)
# If the list is empty then just default to all ID's found in the volume
if (id_list == []):
id_list = mesher.ids()
# Run the mesher on all specified ID's
for oid in id_list:
mesh = mesher.get_mesh(
oid,
normals=normals,
simplification_factor=simp_fact,
max_simplification_error= max_simplification_error,
)
mesh.vertices += [x_range[0]*conv_factor, y_range[0]*conv_factor, z_range[0]*conv_factor]
return Mesh([volume, mesh])
def _get_conversion_factor(self, voxel_unit):
"""
Validate the voxel unit type and derive conversion factor from it if valid
Arguments:
voxel_unit (VoxelUnits): 'nanometers', 'millimeters', <etc>
Returns:
int: conversion factor to use by meshing service
Raises:
ValueError
"""
if not isinstance(voxel_unit, VoxelUnits):
raise ValueError("{} is not a valid voxel unit".format(voxel_unit))
else:
return voxel_unit.value
class Mesh:
def __init__(self, data):
"""Constructor.
Args:
data (tuple[raw_volume, mesh]): tuple containing the raw data and the mesh data
"""
self._raw_vol = data[0]
self._mesh = data[1]
def ng_mesh(self):
"""Convert mesh to precompute format for Neuroglancer visualization
Args:
mesh: mesh to convert.
Returns:
(): Returns mesh precompute format
"""
return self._mesh.to_precomputed()
def METHOD_NAME(self):
"""Convert mesh to obj
Args:
mesh: mesh to convert.
Returns:
(): Returns mesh obj format
"""
return self._mesh.to_obj() | null |
127 | import aiohttp
import asyncio
import logging
import time
import ujson
from typing import (
Any,
Dict,
Optional,
)
from hummingbot.connector.exchange.ndax.ndax_auth import NdaxAuth
from hummingbot.connector.exchange.ndax import ndax_constants as CONSTANTS, ndax_utils
from hummingbot.connector.exchange.ndax.ndax_websocket_adaptor import NdaxWebSocketAdaptor
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.logger import HummingbotLogger
class NdaxAPIUserStreamDataSource(UserStreamTrackerDataSource):
_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(self, throttler: AsyncThrottler, auth_assistant: NdaxAuth, shared_client: Optional[aiohttp.ClientSession] = None, domain: Optional[str] = None):
super().__init__()
self._shared_client = shared_client or self._get_session_instance()
self._ws_adaptor = None
self._auth_assistant: NdaxAuth = auth_assistant
self._last_recv_time: float = 0
self._account_id: Optional[int] = None
self._oms_id: Optional[int] = None
self._domain = domain
self._throttler = throttler
@property
def last_recv_time(self) -> float:
return self._last_recv_time
@classmethod
def _get_session_instance(cls) -> aiohttp.ClientSession:
session = aiohttp.ClientSession()
return session
async def _init_websocket_connection(self) -> NdaxWebSocketAdaptor:
"""
Initialize WebSocket client for UserStreamDataSource
"""
try:
if self._ws_adaptor is None:
ws = await self._shared_client.ws_connect(ndax_utils.wss_url(self._domain))
self._ws_adaptor = NdaxWebSocketAdaptor(throttler=self._throttler, websocket=ws)
return self._ws_adaptor
except asyncio.CancelledError:
raise
except Exception as ex:
self.logger().network(f"Unexpected error occurred during {CONSTANTS.EXCHANGE_NAME} WebSocket Connection "
f"({ex})")
raise
async def _authenticate(self, ws: NdaxWebSocketAdaptor):
"""
Authenticates user to websocket
"""
try:
auth_payload: Dict[str, Any] = self._auth_assistant.get_ws_auth_payload()
async with self._throttler.execute_task(CONSTANTS.AUTHENTICATE_USER_ENDPOINT_NAME):
await ws.send_request(CONSTANTS.AUTHENTICATE_USER_ENDPOINT_NAME, auth_payload)
auth_resp = await ws.receive()
auth_payload: Dict[str, Any] = ws.payload_from_raw_message(auth_resp.data)
if not auth_payload["Authenticated"]:
self.logger().error(f"Response: {auth_payload}",
exc_info=True)
raise Exception("Could not authenticate websocket connection with NDAX")
auth_user = auth_payload.get("User")
self._account_id = auth_user.get("AccountId")
self._oms_id = auth_user.get("OMSId")
except asyncio.CancelledError:
raise
except Exception as ex:
self.logger().error(f"Error occurred when authenticating to user stream ({ex})",
exc_info=True)
raise
async def METHOD_NAME(self, ws: NdaxWebSocketAdaptor):
"""
Subscribes to User Account Events
"""
payload = {"AccountId": self._account_id,
"OMSId": self._oms_id}
try:
async with self._throttler.execute_task(CONSTANTS.SUBSCRIBE_ACCOUNT_EVENTS_ENDPOINT_NAME):
await ws.send_request(CONSTANTS.SUBSCRIBE_ACCOUNT_EVENTS_ENDPOINT_NAME, payload)
except asyncio.CancelledError:
raise
except Exception as ex:
self.logger().error(f"Error occurred subscribing to {CONSTANTS.EXCHANGE_NAME} private channels ({ex})",
exc_info=True)
raise
async def listen_for_user_stream(self, output: asyncio.Queue):
"""
*required
Subscribe to user stream via web socket, and keep the connection open for incoming messages
:param output: an async queue where the incoming messages are stored
"""
while True:
try:
ws: NdaxWebSocketAdaptor = await self._init_websocket_connection()
self.logger().info("Authenticating to User Stream...")
await self._authenticate(ws)
self.logger().info("Successfully authenticated to User Stream.")
await self.METHOD_NAME(ws)
self.logger().info("Successfully subscribed to user events.")
async for msg in ws.iter_messages():
self._last_recv_time = int(time.time())
output.put_nowait(ujson.loads(msg))
except asyncio.CancelledError:
raise
except Exception as ex:
self.logger().error(
f"Unexpected error with NDAX WebSocket connection. Retrying in 30 seconds. ({ex})",
exc_info=True
)
if self._ws_adaptor is not None:
await self._ws_adaptor.close()
self._ws_adaptor = None
await asyncio.sleep(30.0) | null |
128 | import sys
from json import loads
from typing import Iterator
from galaxy.datatypes.tabular import Tabular
from galaxy.model import DatasetInstance
class BaseDataProvider:
"""
Base class for data providers. Data providers both:
- read and package data from datasets
- write subsets of data to new datasets
"""
original_dataset: DatasetInstance
def __init__(
self,
converted_dataset=None,
original_dataset=None,
dependencies=None,
error_max_vals="Only the first %i values are returned.",
):
"""Create basic data provider."""
self.converted_dataset = converted_dataset
self.original_dataset = original_dataset
self.dependencies = dependencies
self.error_max_vals = error_max_vals
def has_data(self, **kwargs):
"""
Returns true if dataset has data in the specified genome window, false
otherwise.
"""
raise Exception("Unimplemented Function")
def get_iterator(self, data_file, chrom, start, end, **kwargs) -> Iterator[str]:
"""
Returns an iterator that provides data in the region chrom:start-end
"""
raise Exception("Unimplemented Function")
def METHOD_NAME(self, iterator, start_val=0, max_vals=None, **kwargs):
"""
Process data from an iterator to a format that can be provided to client.
"""
raise Exception("Unimplemented Function")
def get_data(self, chrom, start, end, start_val=0, max_vals=sys.maxsize, **kwargs):
"""
Returns data as specified by kwargs. start_val is the first element to
return and max_vals indicates the number of values to return.
Return value must be a dictionary with the following attributes:
dataset_type, data
"""
iterator = self.get_iterator(chrom, start, end)
return self.METHOD_NAME(iterator, start_val, max_vals, **kwargs)
def write_data_to_file(self, filename, **kwargs):
"""
Write data in region defined by chrom, start, and end to a file.
"""
raise Exception("Unimplemented Function")
class ColumnDataProvider(BaseDataProvider):
"""Data provider for columnar data"""
MAX_LINES_RETURNED = 30000
def __init__(self, original_dataset, max_lines_returned=MAX_LINES_RETURNED):
# Compatibility check.
if not isinstance(original_dataset.datatype, Tabular):
raise Exception("Data provider can only be used with tabular data")
# Attribute init.
self.original_dataset = original_dataset
# allow throttling
self.max_lines_returned = max_lines_returned
def get_data(self, columns=None, start_val=0, max_vals=None, skip_comments=True, **kwargs):
"""
Returns data from specified columns in dataset. Format is list of lists
where each list is a line of data.
"""
if not columns:
raise TypeError("parameter required: columns")
# TODO: validate kwargs
try:
max_vals = int(max_vals)
max_vals = min([max_vals, self.max_lines_returned])
except (ValueError, TypeError):
max_vals = self.max_lines_returned
try:
start_val = int(start_val)
start_val = max([start_val, 0])
except (ValueError, TypeError):
start_val = 0
# skip comment lines (if any/avail)
# pre: should have original_dataset and
if (
skip_comments
and self.original_dataset.metadata.comment_lines
and start_val < self.original_dataset.metadata.comment_lines
):
start_val = int(self.original_dataset.metadata.comment_lines)
# columns is an array of ints for now (should handle column names later)
columns = loads(columns)
for column in columns:
assert (column < self.original_dataset.metadata.columns) and (
column >= 0
), "column index (%d) must be positive and less" % (column) + " than the number of columns: %d" % (
self.original_dataset.metadata.columns
)
# set up the response, column lists
response = {}
response["data"] = data = [[] for column in columns]
response["meta"] = meta = [{"min": None, "max": None, "count": 0, "sum": 0} for column in columns]
column_types = [self.original_dataset.metadata.column_types[column] for column in columns]
# function for casting by column_types
def cast_val(val, type):
"""Cast value based on type. Return None if can't be cast"""
if type == "int":
try:
val = int(val)
except ValueError:
return None
elif type == "float":
try:
val = float(val)
except ValueError:
return None
return val
returning_data = False
f = open(self.original_dataset.file_name)
# TODO: add f.seek if given fptr in kwargs
for count, line in enumerate(f):
# check line v. desired start, end
if count < start_val:
continue
if (count - start_val) >= max_vals:
break
returning_data = True
fields = line.split()
fields_len = len(fields)
# NOTE: this will return None/null for abberrant column values (including bad indeces)
for index, column in enumerate(columns):
column_val = None
column_type = column_types[index]
if column < fields_len:
column_val = cast_val(fields[column], column_type)
if column_val is not None:
# if numeric, maintain min, max, sum
if column_type == "float" or column_type == "int":
if (meta[index]["min"] is None) or (column_val < meta[index]["min"]):
meta[index]["min"] = column_val
if (meta[index]["max"] is None) or (column_val > meta[index]["max"]):
meta[index]["max"] = column_val
meta[index]["sum"] += column_val
# maintain a count - for other stats
meta[index]["count"] += 1
data[index].append(column_val)
response["endpoint"] = dict(last_line=(count - 1), file_ptr=f.tell())
f.close()
if not returning_data:
return None
for index, meta in enumerate(response["meta"]):
column_type = column_types[index]
count = meta["count"]
if (column_type == "float" or column_type == "int") and count:
meta["mean"] = float(meta["sum"]) / count
sorted_data = sorted(response["data"][index])
middle_index = (count / 2) - 1
if count % 2 == 0:
meta["median"] = (sorted_data[middle_index] + sorted_data[(middle_index + 1)]) / 2.0
else:
meta["median"] = sorted_data[middle_index]
# ugh ... metadata_data_lines is not a reliable source; hafta have an EOF
return response | null |
129 | #!/usr/bin/env python3
import cv2
import numpy as np
import depthai as dai
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-res", "--resolution", type=str, default="720",
help="Sets the resolution on mono cameras. Options: 800 | 720 | 400")
parser.add_argument("-md", "--mesh_dir", type=str, default=None,
help="Output directory for mesh files. If not specified mesh files won't be saved")
parser.add_argument("-lm", "--load_mesh", default=False, action="store_true",
help="Read camera intrinsics, generate mesh files and load them into the stereo node.")
args = parser.parse_args()
meshDirectory = args.mesh_dir # Output dir for mesh files
generateMesh = args.load_mesh # Load mesh files
RES_MAP = {
'800': {'w': 1280, 'h': 800, 'res': dai.MonoCameraProperties.SensorResolution.THE_800_P },
'720': {'w': 1280, 'h': 720, 'res': dai.MonoCameraProperties.SensorResolution.THE_720_P },
'400': {'w': 640, 'h': 400, 'res': dai.MonoCameraProperties.SensorResolution.THE_400_P }
}
if args.resolution not in RES_MAP:
exit("Unsupported resolution!")
resolution = RES_MAP[args.resolution]
def getMesh(calibData):
M1 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_B, resolution['w'], resolution['h']))
d1 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_B))
R1 = np.array(calibData.getStereoLeftRectificationRotation())
M2 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C, resolution['w'], resolution['h']))
d2 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_C))
R2 = np.array(calibData.getStereoRightRectificationRotation())
mapXL, mapYL = cv2.initUndistortRectifyMap(M1, d1, R1, M2, (resolution['w'], resolution['h']), cv2.CV_32FC1)
mapXR, mapYR = cv2.initUndistortRectifyMap(M2, d2, R2, M2, (resolution['w'], resolution['h']), cv2.CV_32FC1)
meshCellSize = 16
meshLeft = []
meshRight = []
for y in range(mapXL.shape[0] + 1):
if y % meshCellSize == 0:
rowLeft = []
rowRight = []
for x in range(mapXL.shape[1] + 1):
if x % meshCellSize == 0:
if y == mapXL.shape[0] and x == mapXL.shape[1]:
rowLeft.append(mapYL[y - 1, x - 1])
rowLeft.append(mapXL[y - 1, x - 1])
rowRight.append(mapYR[y - 1, x - 1])
rowRight.append(mapXR[y - 1, x - 1])
elif y == mapXL.shape[0]:
rowLeft.append(mapYL[y - 1, x])
rowLeft.append(mapXL[y - 1, x])
rowRight.append(mapYR[y - 1, x])
rowRight.append(mapXR[y - 1, x])
elif x == mapXL.shape[1]:
rowLeft.append(mapYL[y, x - 1])
rowLeft.append(mapXL[y, x - 1])
rowRight.append(mapYR[y, x - 1])
rowRight.append(mapXR[y, x - 1])
else:
rowLeft.append(mapYL[y, x])
rowLeft.append(mapXL[y, x])
rowRight.append(mapYR[y, x])
rowRight.append(mapXR[y, x])
if (mapXL.shape[1] % meshCellSize) % 2 != 0:
rowLeft.append(0)
rowLeft.append(0)
rowRight.append(0)
rowRight.append(0)
meshLeft.append(rowLeft)
meshRight.append(rowRight)
meshLeft = np.array(meshLeft)
meshRight = np.array(meshRight)
return meshLeft, meshRight
def METHOD_NAME(meshLeft, meshRight, outputPath):
print("Saving mesh to:", outputPath)
meshLeft.tofile(outputPath + "/left_mesh.calib")
meshRight.tofile(outputPath + "/right_mesh.calib")
def create_pipeline(device: dai.Device) -> dai.Pipeline:
calibData = device.readCalibration()
print("Creating Stereo Depth pipeline")
pipeline = dai.Pipeline()
camLeft = pipeline.create(dai.node.MonoCamera)
camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
camRight = pipeline.create(dai.node.MonoCamera)
camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
xoutRight = pipeline.create(dai.node.XLinkOut)
xoutRight.setStreamName("right")
camRight.out.link(xoutRight.input)
for monoCam in (camLeft, camRight): # Common config
monoCam.setResolution(resolution['res'])
# monoCam.setFps(20.0)
stereo = pipeline.create(dai.node.StereoDepth)
camLeft.out.link(stereo.left)
camRight.out.link(stereo.right)
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
stereo.setRectifyEdgeFillColor(0) # Black, to better see the cutout
stereo.setLeftRightCheck(True)
stereo.setExtendedDisparity(True)
xoutDisparity = pipeline.create(dai.node.XLinkOut)
xoutDisparity.setStreamName("disparity")
stereo.disparity.link(xoutDisparity.input)
xoutRectifRight = pipeline.create(dai.node.XLinkOut)
xoutRectifRight.setStreamName("rectifiedRight")
stereo.rectifiedRight.link(xoutRectifRight.input)
# Create custom meshes from calibration data. Here you could also
# load your own mesh files, or generate them in any other way.
leftMesh, rightMesh = getMesh(calibData)
if generateMesh:
meshLeft = list(leftMesh.tobytes())
meshRight = list(rightMesh.tobytes())
# Load mesh data to the StereoDepth node
stereo.loadMeshData(meshLeft, meshRight)
if meshDirectory is not None:
METHOD_NAME(leftMesh, rightMesh, meshDirectory)
return pipeline
with dai.Device() as device:
device.startPipeline(create_pipeline(device))
# Create a receive queue for each stream
qList = [device.getOutputQueue(stream, 8, blocking=False) for stream in ['right', 'rectifiedRight', 'disparity']]
while True:
for q in qList:
name = q.getName()
frame = q.get().getCvFrame()
cv2.imshow(name, frame)
if cv2.waitKey(1) == ord("q"):
break | null |
130 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with self work for additional information
# regarding copyright ownership. The ASF licenses self file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use self file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import threading
import json
from aliyunsdkcore.acs_exception.exceptions import ServerException
from aliyunsdkcore.endpoint.endpoint_resolver_base import EndpointResolverBase
from aliyunsdkcore.endpoint.location.DescribeEndpointsRequest import DescribeEndpointsRequest
DEFAULT_LOCATION_SERVICE_ENDPOINT = "location-readonly.aliyuncs.com"
class LocationServiceEndpointResolver(EndpointResolverBase):
def __init__(self, client):
EndpointResolverBase.__init__(self)
self._location_service_endpoint = DEFAULT_LOCATION_SERVICE_ENDPOINT
self._client = client
self._invalid_product_codes = set()
self._invalid_region_ids = set()
self._valid_product_codes = set()
self._valid_region_ids = set()
def set_location_service_endpoint(self, endpoint):
self._location_service_endpoint = endpoint
def resolve(self, request):
if not request.location_service_code:
return None
if request.product_code_lower in self._invalid_product_codes:
return None
if request.region_id in self._invalid_region_ids:
return None
key = self.get_endpoint_key_from_request(request)
if key in self.endpoints_data:
# The endpoint can be None when last fetch is failed
return self.endpoints_data[key]
lock = threading.Lock()
with lock:
return self._get_endpoint_from_location_service(key, request)
def _get_endpoint_from_location_service(self, key, request):
# when other thread
if key in self.endpoints_data:
return self.endpoints_data.get(key)
self._call_location_service(key, request)
return self.endpoints_data.get(key)
def _call_location_service(self, key, raw_request):
request = DescribeEndpointsRequest()
request.set_protocol_type("https")
request.set_accept_format("json")
request.set_Id(raw_request.region_id)
request.set_ServiceCode(raw_request.location_service_code)
request.set_Type(raw_request.endpoint_type)
request.endpoint = self._location_service_endpoint
try:
response = self._client.do_action_with_exception(request)
except ServerException as e:
if "InvalidRegionId" == e.get_error_code() and \
"The specified region does not exist." == e.get_error_msg():
# No such region`
self._invalid_region_ids.add(raw_request.region_id)
self.put_endpoint_entry(key, None)
return
elif "Illegal Parameter" == e.get_error_code() and \
"Please check the parameters" == e.get_error_msg():
# No such product
self._invalid_product_codes.add(raw_request.product_code_lower)
self.put_endpoint_entry(key, None)
return
else:
raise e
# As long as code gets here
# the product code and the region id is valid
# the endpoint can be still not found
self._valid_product_codes.add(raw_request.product_code_lower)
self._valid_region_ids.add(raw_request.region_id)
found_flag = False
body = json.loads(response.decode('utf-8'))
for item in body["Endpoints"]["Endpoint"]:
# Location return data has a typo: SerivceCode
# We still try to expect ServiceCode in case this typo would be fixed in the future
service_code = item.get("ServiceCode") or item.get("SerivceCode")
if service_code and item.get("Type") == raw_request.endpoint_type:
found_flag = True
self.put_endpoint_entry(key, item.get("Endpoint"))
break
if not found_flag:
self.put_endpoint_entry(key, None)
def is_product_code_valid(self, request):
if request.location_service_code:
return request.product_code_lower not in self._invalid_product_codes
return False
def is_region_id_valid(self, request):
return self.verify_region_id(request.region_id.lower())
def get_endpoint_key_from_request(self, request):
return self.METHOD_NAME(
request.product_code, request.location_service_code,
request.region_id, request.endpoint_type
)
def METHOD_NAME(self,
product_code,
location_service_code,
region_id, endpoint_type):
return ".".join([
product_code.lower(),
location_service_code,
region_id.lower(),
endpoint_type
]) | null |
131 | #
# junitxml: extensions to Python unittest to get output junitxml
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
#
# Copying permitted under the LGPL-3 licence, included with this library.
"""unittest compatible JUnit XML output."""
import datetime
import re
import time
import unittest
# same format as sys.version_info: "A tuple containing the five components of
# the version number: major, minor, micro, releaselevel, and serial. All
# values except releaselevel are integers; the release level is 'alpha',
# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
# releaselevel of 'dev' for unreleased under-development code.
#
# If the releaselevel is 'alpha' then the major/minor/micro components are not
# established at this point, and setup.py will use a version of next-$(revno).
# If the releaselevel is 'final', then the tarball will be major.minor.micro.
# Otherwise it is major.minor.micro~$(revno).
__version__ = (0, 7, 0, 'alpha', 0)
def test_suite():
import junitxml.tests
return junitxml.tests.test_suite()
class LocalTimezone(datetime.tzinfo):
def __init__(self):
self._offset = None
# It seems that the minimal possible implementation is to just return all
# None for every function, but then it breaks...
def utcoffset(self, dt):
if self._offset is None:
t = 1260423030 # arbitrary, but doesn't handle dst very well
dt = datetime.datetime
self._offset = (dt.fromtimestamp(t) - dt.utcfromtimestamp(t))
return self._offset
def dst(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return None
def _error_name(eclass):
module = eclass.__module__
if module not in ("__main__", "builtins", "exceptions"):
return ".".join([module, eclass.__name__])
return eclass.__name__
_non_cdata = "[\0-\b\x0B-\x1F\uD800-\uDFFF\uFFFE\uFFFF]+"
if "\\u" in _non_cdata:
_non_cdata = _non_cdata.decode("unicode-escape")
def _strip_invalid_chars(s, _sub=re.compile(_non_cdata, re.UNICODE).sub):
if not isinstance(s, unicode):
try:
s = s.decode("utf-8")
except UnicodeDecodeError:
s = s.decode("ascii", "replace")
return _sub("", s).encode("utf-8")
else:
def _strip_invalid_chars(s, _sub=re.compile(_non_cdata, re.UNICODE).sub):
return _sub("", s)
def _escape_content(s):
return (_strip_invalid_chars(s)
.replace("&", "&")
.replace("<", "<")
.replace("]]>", "]]>"))
def _escape_attr(s):
return (_strip_invalid_chars(s)
.replace("&", "&")
.replace("<", "<")
.replace("]]>", "]]>")
.replace('"', """)
.replace("\t", "	")
.replace("\n", "
"))
class JUnitXmlResult(unittest.TestResult):
"""A TestResult which outputs JUnit compatible XML."""
def __init__(self, stream):
"""Create a JUnitXmlResult.
:param stream: A stream to write results to. Note that due to the
nature of JUnit XML output, nnothing will be written to the stream
until stopTestRun() is called.
"""
self.__super = super(JUnitXmlResult, self)
self.__super.__init__()
# GZ 2010-09-03: We have a problem if passed a text stream in Python 3
# as really we want to write raw UTF-8 to ensure that
# the encoding is not mangled later
self._stream = stream
self._results = []
self._set_time = None
self._test_start = None
self._run_start = None
self._tz_info = None
def startTestRun(self):
"""Start a test run."""
self._run_start = self._now()
def _get_tzinfo(self):
if self._tz_info is None:
self._tz_info = LocalTimezone()
return self._tz_info
def _now(self):
if self._set_time is not None:
return self._set_time
else:
return datetime.datetime.now(self._get_tzinfo())
def time(self, a_datetime):
self._set_time = a_datetime
if (self._run_start is not None and
self._run_start > a_datetime):
self._run_start = a_datetime
def startTest(self, test):
self.__super.startTest(test)
self._test_start = self._now()
def _duration(self, from_datetime):
try:
delta = self._now() - from_datetime
except TypeError:
n = self._now()
delta = datetime.timedelta(-1)
seconds = delta.days * 3600*24 + delta.seconds
return seconds + 0.000001 * delta.microseconds
def _test_case_string(self, test):
duration = self._duration(self._test_start)
test_id = test.id()
# Split on the last dot not inside a parameter
class_end = test_id.rfind(".", 0, test_id.find("("))
if class_end == -1:
classname, name = "", test_id
else:
classname, name = test_id[:class_end], test_id[class_end+1:]
self._results.append('<testcase classname="%s" name="%s" '
'time="%0.3f"' % (_escape_attr(classname), _escape_attr(name), duration))
def stopTestRun(self):
"""Stop a test run.
This allows JUnitXmlResult to output the XML representation of the test
run.
"""
duration = self._duration(self._run_start)
self._stream.write('<testsuite errors="%d" failures="%d" name="" '
'tests="%d" time="%0.3f">\n' % (len(self.errors),
len(self.failures) + len(getattr(self, "unexpectedSuccesses", ())),
self.testsRun, duration))
self._stream.write(''.join(self._results))
self._stream.write('</testsuite>\n')
def METHOD_NAME(self, test, error):
self.__super.METHOD_NAME(test, error)
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<error type="%s">%s</error>\n</testcase>\n' % (
_escape_attr(_error_name(error[0])),
_escape_content(self._exc_info_to_string(error, test))))
def addFailure(self, test, error):
self.__super.addFailure(test, error)
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<failure type="%s">%s</failure>\n</testcase>\n' %
(_escape_attr(_error_name(error[0])),
_escape_content(self._exc_info_to_string(error, test))))
def addSuccess(self, test):
self.__super.addSuccess(test)
self._test_case_string(test)
self._results.append('/>\n')
def addSkip(self, test, reason):
try:
self.__super.addSkip(test, reason)
except AttributeError:
# Python < 2.7|3.1
pass
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<skip>%s</skip>\n</testcase>\n'% _escape_attr(reason))
def addUnexpectedSuccess(self, test):
try:
self.__super.addUnexpectedSuccess(test)
except AttributeError:
# Python < 2.7|3.1
pass
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<failure type="unittest.case._UnexpectedSuccess"/>\n</testcase>\n')
def addExpectedFailure(self, test, error):
try:
self.__super.addExpectedFailure(test, error)
except AttributeError:
# Python < 2.7|3.1
pass
self._test_case_string(test)
self._results.append('/>\n')
| null |
132 | import pytest
from api.base.settings.defaults import API_BASE
from osf.migrations import ensure_invisible_and_inactive_schema
from osf.models import RegistrationSchema
from osf_tests.factories import (
AuthUserFactory,
)
pytestmark = pytest.mark.django_db
SCHEMA_VERSION = 2
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.fixture()
def METHOD_NAME():
return RegistrationSchema.objects.filter(
name='OSF Preregistration',
schema_version=SCHEMA_VERSION
).first()
@pytest.fixture(autouse=True)
def invisible_and_inactive_schema():
return ensure_invisible_and_inactive_schema()
class TestDeprecatedMetaSchemaDetail:
def test_deprecated_metaschemas_routes(self, app, user, METHOD_NAME):
# test base /metaschemas/ GET with min version
url = '/{}metaschemas/?version=2.7'.format(API_BASE)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
# test GET with higher version
url = '/{}metaschemas/?version=2.8'.format(API_BASE)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'This route has been deprecated. It was last available in version 2.7'
# test /metaschemas/registrations/
url = '/{}metaschemas/registrations/{}/?version=2.8'.format(API_BASE, METHOD_NAME._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
# test /metaschemas/registrations/ deprecated version
url = '/{}metaschemas/registrations/{}/?version=2.9'.format(API_BASE, METHOD_NAME._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'This route has been deprecated. It was last available in version 2.8'
@pytest.mark.django_db
class TestRegistrationSchemaDetail:
def test_schemas_detail_visibility(self, app, user, METHOD_NAME):
# test_pass_authenticated_user_can_retrieve_schema
url = '/{}schemas/registrations/{}/'.format(API_BASE, METHOD_NAME._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
data = res.json['data']['attributes']
assert data['name'] == 'OSF Preregistration'
assert data['schema_version'] == 2
assert res.json['data']['id'] == METHOD_NAME._id
# test_pass_unauthenticated_user_can_view_schemas
res = app.get(url)
assert res.status_code == 200
# test_inactive_metaschema_returned
inactive_schema = RegistrationSchema.objects.get(
name='Election Research Preacceptance Competition', active=False)
url = '/{}schemas/registrations/{}/'.format(API_BASE, inactive_schema._id)
res = app.get(url)
assert res.status_code == 200
assert res.json['data']['attributes']['name'] == 'Election Research Preacceptance Competition'
assert res.json['data']['attributes']['active'] is False
# test_invalid_metaschema_not_found
url = '/{}schemas/registrations/garbage/'.format(API_BASE)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_registration_schema_schema_blocks(self, app, user, METHOD_NAME):
# test_authenticated_user_can_retrieve_schema_schema_blocks
url = '/{}schemas/registrations/{}/schema_blocks/'.format(API_BASE, METHOD_NAME._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
# test_unauthenticated_user_can_retrieve_schema_schema_blocks
url = '/{}schemas/registrations/{}/schema_blocks/'.format(API_BASE, METHOD_NAME._id)
res = app.get(url)
assert res.status_code == 200
# test_schema_blocks_detail
schema_block_id = METHOD_NAME.schema_blocks.first()._id
url = '/{}schemas/registrations/{}/schema_blocks/{}/'.format(API_BASE, METHOD_NAME._id, schema_block_id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['id'] == schema_block_id | null |
133 | # Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Materializer for Pillow Image objects."""
import os
import tempfile
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Tuple, Type
from PIL import Image
from zenml.enums import ArtifactType, VisualizationType
from zenml.io import fileio
from zenml.logger import get_logger
from zenml.materializers.base_materializer import BaseMaterializer
from zenml.utils import io_utils
if TYPE_CHECKING:
from zenml.metadata.metadata_types import MetadataType
logger = get_logger(__name__)
DEFAULT_IMAGE_FILENAME = "image_file"
DEFAULT_IMAGE_EXTENSION = "PNG"
class PillowImageMaterializer(BaseMaterializer):
"""Materializer for Image.Image objects.
This materializer takes a PIL image object and returns a PIL image object.
It handles all the source image formats supported by PIL as listed here:
https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html.
"""
ASSOCIATED_TYPES: ClassVar[Tuple[Type[Any], ...]] = (Image.Image,)
ASSOCIATED_ARTIFACT_TYPE: ClassVar[ArtifactType] = ArtifactType.DATA
def load(self, data_type: Type[Image.Image]) -> Image.Image:
"""Read from artifact store.
Args:
data_type: An Image.Image type.
Returns:
An Image.Image object.
"""
files = io_utils.find_files(self.uri, f"{DEFAULT_IMAGE_FILENAME}.*")
filepath = [file for file in files if not fileio.isdir(file)][0]
# create a temporary folder
temp_dir = tempfile.TemporaryDirectory(prefix="zenml-temp-")
temp_file = os.path.join(
temp_dir.name,
f"{DEFAULT_IMAGE_FILENAME}{os.path.splitext(filepath)[1]}",
)
# copy from artifact store to temporary file
fileio.copy(filepath, temp_file)
return Image.open(temp_file)
def save(self, image: Image.Image) -> None:
"""Write to artifact store.
Args:
image: An Image.Image object.
"""
temp_dir = tempfile.TemporaryDirectory(prefix="zenml-temp-")
file_extension = image.format or DEFAULT_IMAGE_EXTENSION
full_filename = f"{DEFAULT_IMAGE_FILENAME}.{file_extension}"
temp_image_path = os.path.join(temp_dir.name, full_filename)
# save the image in a temporary directory
image.save(temp_image_path)
# copy the saved image to the artifact store
artifact_store_path = os.path.join(self.uri, full_filename)
io_utils.copy(temp_image_path, artifact_store_path, overwrite=True) # type: ignore[attr-defined]
temp_dir.cleanup()
def METHOD_NAME(
self, image: Image.Image
) -> Dict[str, VisualizationType]:
"""Finds and saves the given image as a visualization.
Args:
image: The image to save as a visualization.
Returns:
A dictionary of visualization URIs and their types.
"""
file_extension = image.format or DEFAULT_IMAGE_EXTENSION
full_filename = f"{DEFAULT_IMAGE_FILENAME}.{file_extension}"
artifact_store_path = os.path.join(self.uri, full_filename)
return {artifact_store_path: VisualizationType.IMAGE}
def extract_metadata(
self, image: Image.Image
) -> Dict[str, "MetadataType"]:
"""Extract metadata from the given `Image` object.
Args:
image: The `Image` object to extract metadata from.
Returns:
The extracted metadata as a dictionary.
"""
metadata = {
"width": image.width,
"height": image.height,
"mode": str(image.mode),
}
if hasattr(image, "filename"):
metadata["original_filename"] = str(image.filename)
return metadata # type: ignore[return-value] | null |
134 | # This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
import unittest as ut
from h5py import h5p, h5f, version
from .common import TestCase
class TestLibver(TestCase):
"""
Feature: Setting/getting lib ver bounds
"""
def test_libver(self):
""" Test libver bounds set/get """
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_libver_bounds(h5f.LIBVER_EARLIEST, h5f.LIBVER_LATEST)
self.assertEqual((h5f.LIBVER_EARLIEST, h5f.LIBVER_LATEST),
plist.get_libver_bounds())
@ut.skipIf(version.hdf5_version_tuple < (1, 10, 2),
'Requires HDF5 1.10.2 or later')
def test_libver_v18(self):
""" Test libver bounds set/get for H5F_LIBVER_V18"""
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_libver_bounds(h5f.LIBVER_EARLIEST, h5f.LIBVER_V18)
self.assertEqual((h5f.LIBVER_EARLIEST, h5f.LIBVER_V18),
plist.get_libver_bounds())
@ut.skipIf(version.hdf5_version_tuple < (1, 10, 2),
'Requires HDF5 1.10.2 or later')
def test_libver_v110(self):
""" Test libver bounds set/get for H5F_LIBVER_V110"""
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_libver_bounds(h5f.LIBVER_V18, h5f.LIBVER_V110)
self.assertEqual((h5f.LIBVER_V18, h5f.LIBVER_V110),
plist.get_libver_bounds())
@ut.skipIf(version.hdf5_version_tuple < (1, 11, 4),
'Requires HDF5 1.11.4 or later')
def test_libver_v112(self):
""" Test libver bounds set/get for H5F_LIBVER_V112"""
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_libver_bounds(h5f.LIBVER_V18, h5f.LIBVER_V112)
self.assertEqual((h5f.LIBVER_V18, h5f.LIBVER_V112),
plist.get_libver_bounds())
class TestDA(TestCase):
'''
Feature: setting/getting chunk cache size on a dataset access property list
'''
def test_chunk_cache(self):
'''test get/set chunk cache '''
dalist = h5p.create(h5p.DATASET_ACCESS)
nslots = 10000 # 40kb hash table
nbytes = 1000000 # 1MB cache size
w0 = .5 # even blend of eviction strategy
dalist.set_chunk_cache(nslots, nbytes, w0)
self.assertEqual((nslots, nbytes, w0),
dalist.get_chunk_cache())
@ut.skipIf(version.hdf5_version_tuple < (1, 8, 17),
'Requires HDF5 1.8.17 or later')
def test_efile_prefix(self):
'''test get/set efile prefix '''
dalist = h5p.create(h5p.DATASET_ACCESS)
self.assertEqual(dalist.get_efile_prefix().decode(), '')
efile_prefix = "path/to/external/dataset"
dalist.set_efile_prefix(efile_prefix.encode('utf-8'))
self.assertEqual(dalist.get_efile_prefix().decode(),
efile_prefix)
efile_prefix = "${ORIGIN}"
dalist.set_efile_prefix(efile_prefix.encode('utf-8'))
self.assertEqual(dalist.get_efile_prefix().decode(),
efile_prefix)
@ut.skipIf(version.hdf5_version_tuple < (1, 10, 2),
'Requires HDF5 1.10.2 or later')
def test_virtual_prefix(self):
'''test get/set virtual prefix '''
dalist = h5p.create(h5p.DATASET_ACCESS)
self.assertEqual(dalist.get_virtual_prefix().decode(), '')
virtual_prefix = "path/to/virtual/dataset"
dalist.set_virtual_prefix(virtual_prefix.encode('utf-8'))
self.assertEqual(dalist.get_virtual_prefix().decode(),
virtual_prefix)
class TestFA(TestCase):
'''
Feature: setting/getting mdc config on a file access property list
'''
def test_mdc_config(self):
'''test get/set mdc config '''
falist = h5p.create(h5p.FILE_ACCESS)
config = falist.get_mdc_config()
falist.set_mdc_config(config)
def test_set_alignment(self):
'''test get/set chunk cache '''
falist = h5p.create(h5p.FILE_ACCESS)
threshold = 10 * 1024 # threshold of 10kiB
alignment = 1024 * 1024 # threshold of 1kiB
falist.set_alignment(threshold, alignment)
self.assertEqual((threshold, alignment),
falist.get_alignment())
@ut.skipUnless(
version.hdf5_version_tuple >= (1, 12, 1) or
(version.hdf5_version_tuple[:2] == (1, 10) and version.hdf5_version_tuple[2] >= 7),
'Requires HDF5 1.12.1 or later or 1.10.x >= 1.10.7')
def test_set_file_locking(self):
'''test get/set file locking'''
falist = h5p.create(h5p.FILE_ACCESS)
use_file_locking = False
ignore_when_disabled = False
falist.set_file_locking(use_file_locking, ignore_when_disabled)
self.assertEqual((use_file_locking, ignore_when_disabled),
falist.get_file_locking())
class TestPL(TestCase):
def test_obj_track_times(self):
"""
tests if the object track times set/get
"""
# test for groups
gcid = h5p.create(h5p.GROUP_CREATE)
gcid.set_obj_track_times(False)
self.assertEqual(False, gcid.get_obj_track_times())
gcid.set_obj_track_times(True)
self.assertEqual(True, gcid.get_obj_track_times())
# test for datasets
dcid = h5p.create(h5p.DATASET_CREATE)
dcid.set_obj_track_times(False)
self.assertEqual(False, dcid.get_obj_track_times())
dcid.set_obj_track_times(True)
self.assertEqual(True, dcid.get_obj_track_times())
# test for generic objects
ocid = h5p.create(h5p.OBJECT_CREATE)
ocid.set_obj_track_times(False)
self.assertEqual(False, ocid.get_obj_track_times())
ocid.set_obj_track_times(True)
self.assertEqual(True, ocid.get_obj_track_times())
def test_link_creation_tracking(self):
"""
tests the link creation order set/get
"""
gcid = h5p.create(h5p.GROUP_CREATE)
gcid.set_link_creation_order(0)
self.assertEqual(0, gcid.get_link_creation_order())
flags = h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED
gcid.set_link_creation_order(flags)
self.assertEqual(flags, gcid.get_link_creation_order())
# test for file creation
fcpl = h5p.create(h5p.FILE_CREATE)
fcpl.set_link_creation_order(flags)
self.assertEqual(flags, fcpl.get_link_creation_order())
def METHOD_NAME(self):
"""
test the attribute phase change
"""
cid = h5p.create(h5p.OBJECT_CREATE)
# test default value
ret = cid.get_attr_phase_change()
self.assertEqual((8,6), ret)
# max_compact must < 65536 (64kb)
with self.assertRaises(ValueError):
cid.set_attr_phase_change(65536, 6)
# Using dense attributes storage to avoid 64kb size limitation
# for a single attribute in compact attribute storage.
cid.set_attr_phase_change(0, 0)
self.assertEqual((0,0), cid.get_attr_phase_change()) | null |
135 | import os, json
from opendm import log
from opendm.pseudogeo import get_pseudogeo_utm, get_pseudogeo_scale
from opendm.location import transformer
from pyproj import CRS
from osgeo import gdal
import numpy as np
import cv2
def get_rotation_matrix(rotation):
"""Get rotation as a 3x3 matrix."""
return cv2.Rodrigues(rotation)[0]
def matrix_to_rotation(rotation_matrix):
R = np.array(rotation_matrix, dtype=float)
# if not np.isclose(np.linalg.det(R), 1):
# raise ValueError("Determinant != 1")
# if not np.allclose(np.linalg.inv(R), R.T):
# raise ValueError("Not orthogonal")
return cv2.Rodrigues(R)[0].ravel()
def METHOD_NAME(shot):
"""The origin of the pose in world coordinates."""
return -get_rotation_matrix(np.array(shot['rotation'])).T.dot(np.array(shot['translation']))
def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset=None, pseudo_geotiff=None, a_matrix=None):
"""
Extract shots from OpenSfM's reconstruction.json
"""
pseudo_geocoords = None
if pseudo_geotiff is not None and os.path.exists(pseudo_geotiff):
# pseudogeo transform
utm_srs = get_pseudogeo_utm()
# the pseudo-georeferencing CRS UL corner is at 0,0
# but our shot coordinates aren't, so we need to offset them
raster = gdal.Open(pseudo_geotiff)
ulx, xres, _, uly, _, yres = raster.GetGeoTransform()
lrx = ulx + (raster.RasterXSize * xres)
lry = uly + (raster.RasterYSize * yres)
pseudo_geocoords = np.array([[1.0 / get_pseudogeo_scale() ** 2, 0, 0, ulx + lrx / 2.0],
[0, 1.0 / get_pseudogeo_scale() ** 2, 0, uly + lry / 2.0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
raster = None
pseudo = True
# Couldn't get a SRS?
if utm_srs is None:
return None
crstrans = transformer(CRS.from_proj4(utm_srs), CRS.from_epsg("4326"))
if os.path.exists(reconstruction_file):
with open(reconstruction_file, 'r') as fin:
reconstructions = json.loads(fin.read())
feats = []
added_shots = {}
for recon in reconstructions:
cameras = recon.get('cameras', {})
for filename in recon.get('shots', {}):
shot = recon['shots'][filename]
cam = shot.get('camera')
if (not cam in cameras) or (filename in added_shots):
continue
cam = cameras[cam]
if pseudo_geocoords is not None:
Rs, T = pseudo_geocoords[:3, :3], pseudo_geocoords[:3, 3]
Rs1 = np.linalg.inv(Rs)
origin = METHOD_NAME(shot)
# Translation
utm_coords = np.dot(Rs, origin) + T
trans_coords = crstrans.TransformPoint(utm_coords[0], utm_coords[1], utm_coords[2])
# Rotation
rotation_matrix = get_rotation_matrix(np.array(shot['rotation']))
rotation = matrix_to_rotation(np.dot(rotation_matrix, Rs1))
translation = origin
else:
rotation = shot['rotation']
# Just add UTM offset
origin = METHOD_NAME(shot)
utm_coords = [origin[0] + utm_offset[0],
origin[1] + utm_offset[1],
origin[2]]
if a_matrix is not None:
rotation = list(np.array(rotation).dot(a_matrix[:3,:3]))
utm_coords = list(a_matrix.dot(np.hstack((np.array(utm_coords), 1)))[:-1])
translation = utm_coords
trans_coords = crstrans.TransformPoint(utm_coords[0], utm_coords[1], utm_coords[2])
feats.append({
'type': 'Feature',
'properties': {
'filename': filename,
'focal': cam.get('focal', cam.get('focal_x')), # Focal ratio = focal length (mm) / max(sensor_width, sensor_height) (mm)
'width': cam.get('width', 0),
'height': cam.get('height', 0),
'capture_time': shot.get('capture_time', 0),
'translation': list(translation),
'rotation': list(rotation)
},
'geometry':{
'type': 'Point',
'coordinates': list(trans_coords)
}
})
added_shots[filename] = True
return {
'type': 'FeatureCollection',
'features': feats
}
else:
raise RuntimeError("%s does not exist." % reconstruction_file)
def merge_geojson_shots(geojson_shots_files, output_geojson_file):
result = {}
added_files = {}
for shot_file in geojson_shots_files:
with open(shot_file, "r") as f:
shots = json.loads(f.read())
if len(result) == 0:
for feat in shots.get('features', []):
added_files[feat['properties']['filename']] = True
# Use first file as base
result = shots
else:
# Append features if filename not already added
for feat in shots.get('features', []):
if not feat['properties']['filename'] in added_files:
result['features'].append(feat)
with open(output_geojson_file, "w") as f:
f.write(json.dumps(result)) | null |
136 | # Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from logging import getLogger
from conda.common.path import (
get_major_minor_version,
missing_pyc_files,
url_to_path,
win_path_backout,
)
log = getLogger(__name__)
def METHOD_NAME():
assert url_to_path("file:///etc/fstab") == "/etc/fstab"
assert url_to_path("file://localhost/etc/fstab") == "/etc/fstab"
assert url_to_path("file://127.0.0.1/etc/fstab") == "/etc/fstab"
assert url_to_path("file://::1/etc/fstab") == "/etc/fstab"
def test_url_to_path_windows_local():
assert url_to_path("file:///c|/WINDOWS/notepad.exe") == "c:/WINDOWS/notepad.exe"
assert url_to_path("file:///C:/WINDOWS/notepad.exe") == "C:/WINDOWS/notepad.exe"
assert (
url_to_path("file://localhost/C|/WINDOWS/notepad.exe")
== "C:/WINDOWS/notepad.exe"
)
assert (
url_to_path("file://localhost/c:/WINDOWS/notepad.exe")
== "c:/WINDOWS/notepad.exe"
)
assert url_to_path("C:\\Windows\\notepad.exe") == "C:\\Windows\\notepad.exe"
assert (
url_to_path("file:///C:/Program%20Files/Internet%20Explorer/iexplore.exe")
== "C:/Program Files/Internet Explorer/iexplore.exe"
)
assert (
url_to_path("C:\\Program Files\\Internet Explorer\\iexplore.exe")
== "C:\\Program Files\\Internet Explorer\\iexplore.exe"
)
def test_url_to_path_windows_unc():
assert (
url_to_path("file://windowshost/windowshare/path")
== "//windowshost/windowshare/path"
)
assert (
url_to_path("\\\\windowshost\\windowshare\\path")
== "\\\\windowshost\\windowshare\\path"
)
assert (
url_to_path("file://windowshost\\windowshare\\path")
== "//windowshost\\windowshare\\path"
)
assert (
url_to_path("file://\\\\machine\\shared_folder\\path\\conda")
== "\\\\machine\\shared_folder\\path\\conda"
)
def test_win_path_backout():
assert (
win_path_backout("file://\\\\machine\\shared_folder\\path\\conda")
== "file://machine/shared_folder/path/conda"
)
assert (
win_path_backout("file://\\\\machine\\shared\\ folder\\path\\conda")
== "file://machine/shared\\ folder/path/conda"
)
FILES = (
"bin/flask",
"lib/python2.7/site-packages/Flask-0.11.1-py2.7.egg-info/PKG-INFO",
"lib/python2.7/site-packages/Flask-0.11.1-py2.7.egg-info/SOURCES.txt",
"lib/python2.7/site-packages/Flask-0.11.1-py2.7.egg-info/dependency_links.txt",
"lib/python2.7/site-packages/Flask-0.11.1-py2.7.egg-info/entry_points.txt",
"lib/python2.7/site-packages/Flask-0.11.1-py2.7.egg-info/not-zip-safe",
"lib/python2.7/site-packages/Flask-0.11.1-py2.7.egg-info/requires.txt",
"lib/python2.7/site-packages/Flask-0.11.1-py2.7.egg-info/top_level.txt",
"lib/python2.7/site-packages/flask/__init__.py",
"lib/python2.7/site-packages/flask/__main__.py",
"lib/python2.7/site-packages/flask/_compat.py",
"lib/python2.7/site-packages/flask/app.py",
"lib/python2.7/site-packages/flask/blueprints.py",
"lib/python2.7/site-packages/flask/cli.py",
"lib/python2.7/site-packages/flask/config.py",
"lib/python2.7/site-packages/flask/ctx.py",
"lib/python2.7/site-packages/flask/debughelpers.py",
"lib/python2.7/site-packages/flask/ext/__init__.py",
)
def test_missing_pyc_files_27():
missing = missing_pyc_files("27", FILES)
assert len(missing) == 10
assert tuple(m[1] for m in missing) == (
"lib/python2.7/site-packages/flask/__init__.pyc",
"lib/python2.7/site-packages/flask/__main__.pyc",
"lib/python2.7/site-packages/flask/_compat.pyc",
"lib/python2.7/site-packages/flask/app.pyc",
"lib/python2.7/site-packages/flask/blueprints.pyc",
"lib/python2.7/site-packages/flask/cli.pyc",
"lib/python2.7/site-packages/flask/config.pyc",
"lib/python2.7/site-packages/flask/ctx.pyc",
"lib/python2.7/site-packages/flask/debughelpers.pyc",
"lib/python2.7/site-packages/flask/ext/__init__.pyc",
)
def test_missing_pyc_files_34():
missing = missing_pyc_files("34", FILES)
assert len(missing) == 10
assert tuple(m[1] for m in missing) == (
"lib/python2.7/site-packages/flask/__pycache__/__init__.cpython-34.pyc",
"lib/python2.7/site-packages/flask/__pycache__/__main__.cpython-34.pyc",
"lib/python2.7/site-packages/flask/__pycache__/_compat.cpython-34.pyc",
"lib/python2.7/site-packages/flask/__pycache__/app.cpython-34.pyc",
"lib/python2.7/site-packages/flask/__pycache__/blueprints.cpython-34.pyc",
"lib/python2.7/site-packages/flask/__pycache__/cli.cpython-34.pyc",
"lib/python2.7/site-packages/flask/__pycache__/config.cpython-34.pyc",
"lib/python2.7/site-packages/flask/__pycache__/ctx.cpython-34.pyc",
"lib/python2.7/site-packages/flask/__pycache__/debughelpers.cpython-34.pyc",
"lib/python2.7/site-packages/flask/ext/__pycache__/__init__.cpython-34.pyc",
)
def test_missing_pyc_files_35():
missing = missing_pyc_files("35", FILES)
assert len(missing) == 10
assert tuple(m[1] for m in missing) == (
"lib/python2.7/site-packages/flask/__pycache__/__init__.cpython-35.pyc",
"lib/python2.7/site-packages/flask/__pycache__/__main__.cpython-35.pyc",
"lib/python2.7/site-packages/flask/__pycache__/_compat.cpython-35.pyc",
"lib/python2.7/site-packages/flask/__pycache__/app.cpython-35.pyc",
"lib/python2.7/site-packages/flask/__pycache__/blueprints.cpython-35.pyc",
"lib/python2.7/site-packages/flask/__pycache__/cli.cpython-35.pyc",
"lib/python2.7/site-packages/flask/__pycache__/config.cpython-35.pyc",
"lib/python2.7/site-packages/flask/__pycache__/ctx.cpython-35.pyc",
"lib/python2.7/site-packages/flask/__pycache__/debughelpers.cpython-35.pyc",
"lib/python2.7/site-packages/flask/ext/__pycache__/__init__.cpython-35.pyc",
)
def test_get_major_minor_version_no_dot():
assert get_major_minor_version("3.5.2") == "3.5"
assert get_major_minor_version("27") == "2.7"
assert get_major_minor_version("bin/python2.7") == "2.7"
assert get_major_minor_version("lib/python34/site-packages/") == "3.4"
assert get_major_minor_version("python3") is None
assert get_major_minor_version("3.10.0") == "3.10"
assert get_major_minor_version("310") == "3.10"
assert get_major_minor_version("bin/python3.10") == "3.10"
assert get_major_minor_version("lib/python310/site-packages/") == "3.10"
assert get_major_minor_version("python3") is None
assert get_major_minor_version("3.5.2", False) == "35"
assert get_major_minor_version("27", False) == "27"
assert get_major_minor_version("bin/python2.7", False) == "27"
assert get_major_minor_version("lib/python34/site-packages/", False) == "34"
assert get_major_minor_version("python3", False) is None
assert get_major_minor_version("3.10.0", False) == "310"
assert get_major_minor_version("310", False) == "310"
assert get_major_minor_version("bin/python3.10", False) == "310"
assert get_major_minor_version("lib/python310/site-packages/", False) == "310"
assert get_major_minor_version("python3", False) is None | null |
137 | import pytest
from eth_utils.toolz import (
compose,
curry,
)
from hexbytes import (
HexBytes,
)
AMBIGUOUS_CONTRACT_ABI = [
{
"constant": False,
"inputs": [{"name": "input", "type": "uint256"}],
"name": "blockHashAmphithyronVersify",
"outputs": [{"name": "", "type": "uint256"}],
"payable": False,
"stateMutability": "nonpayable",
"type": "function",
},
{
"constant": False,
"inputs": [
{"name": "input", "type": "uint256"},
{"name": "uselessFlag", "type": "bool"},
],
"name": "identity",
"outputs": [{"name": "", "type": "uint256"}],
"payable": False,
"stateMutability": "nonpayable",
"type": "function",
},
{
"constant": False,
"inputs": [
{"name": "input", "type": "int256"},
{"name": "uselessFlag", "type": "bool"},
],
"name": "identity",
"outputs": [{"name": "", "type": "int256"}],
"payable": False,
"stateMutability": "nonpayable",
"type": "function",
},
]
@pytest.fixture
def string_contract(w3, string_contract_factory, address_conversion_func):
deploy_txn = string_contract_factory.constructor("Caqalai").transact()
deploy_receipt = w3.eth.wait_for_transaction_receipt(deploy_txn)
assert deploy_receipt is not None
contract_address = address_conversion_func(deploy_receipt["contractAddress"])
contract = string_contract_factory(address=contract_address)
assert contract.address == contract_address
assert len(w3.eth.get_code(contract.address)) > 0
return contract
map_repr = compose(list, curry(map, repr))
@pytest.mark.parametrize(
"method,args,repr_func,expected",
(
(
"all_functions",
(),
map_repr,
[
"<Function blockHashAmphithyronVersify(uint256)>",
"<Function identity(uint256,bool)>",
"<Function identity(int256,bool)>",
],
),
(
"get_function_by_signature",
("identity(uint256,bool)",),
repr,
"<Function identity(uint256,bool)>",
),
(
"find_functions_by_name",
("identity",),
map_repr,
["<Function identity(uint256,bool)>", "<Function identity(int256,bool)>"],
),
(
"get_function_by_name",
("blockHashAmphithyronVersify",),
repr,
"<Function blockHashAmphithyronVersify(uint256)>",
),
(
"get_function_by_selector",
(b"\x00\x00\x00\x00",),
repr,
"<Function blockHashAmphithyronVersify(uint256)>",
),
(
"get_function_by_selector",
(0x00000000,),
repr,
"<Function blockHashAmphithyronVersify(uint256)>",
),
(
"get_function_by_selector",
("0x00000000",),
repr,
"<Function blockHashAmphithyronVersify(uint256)>",
),
(
"find_functions_by_args",
(1, True),
map_repr,
["<Function identity(uint256,bool)>", "<Function identity(int256,bool)>"],
),
(
"get_function_by_args",
(1,),
repr,
"<Function blockHashAmphithyronVersify(uint256)>",
),
),
)
def METHOD_NAME(w3, method, args, repr_func, expected):
contract = w3.eth.contract(abi=AMBIGUOUS_CONTRACT_ABI)
function = getattr(contract, method)(*args)
assert repr_func(function) == expected
@pytest.mark.parametrize(
"method,args,expected_message,expected_error",
(
(
"get_function_by_signature",
("identity(uint256, bool)",),
r"Function signature should not contain any spaces.*",
ValueError,
),
(
"get_function_by_name",
("identity",),
r"Found multiple functions with matching name*",
ValueError,
),
(
"get_function_by_name",
("undefined_function",),
r"Could not find any function with matching name",
ValueError,
),
(
"get_function_by_selector",
(b"\x00" * (4 + 1),),
f"expected value of size 4 bytes. Got: {(4 + 1)} bytes",
ValueError,
),
(
"get_function_by_args",
(1, True),
r"Found multiple functions with matching args*",
ValueError,
),
(
"get_function_by_args",
(1,) * 50,
r"Could not find any function with matching args",
ValueError,
),
),
)
def test_functions_error_messages(w3, method, args, expected_message, expected_error):
contract = w3.eth.contract(abi=AMBIGUOUS_CONTRACT_ABI)
with pytest.raises(expected_error, match=expected_message):
getattr(contract, method)(*args)
def test_contract_function_methods(string_contract):
set_value_func = string_contract.get_function_by_signature("setValue(string)")
get_value_func = string_contract.get_function_by_signature("getValue()")
assert isinstance(set_value_func("Hello").transact(), HexBytes)
assert get_value_func().call() == "Hello"
assert isinstance(set_value_func("Hello World").estimate_gas(), int)
assert isinstance(set_value_func("Hello World").build_transaction(), dict)
def test_diff_between_fn_and_fn_called(string_contract):
get_value_func = string_contract.get_function_by_signature("getValue()")
get_value_func_called = get_value_func()
assert get_value_func is not get_value_func_called
assert repr(get_value_func) == "<Function getValue()>"
assert repr(get_value_func_called) == "<Function getValue() bound to ()>" | null |
138 | '''
These functions are rather specific to Amod's deeplearning code
and should probably be either moved or completely deleted. It might
make sense to develop some more universal normalization utility functions.
'''
import numpy as np
def piecewise_linear_normalize(in_img_data, ref_img_data):
'''Function to piecewise linearly scale image intensities to training data landmarks.'''
import sklearn.mixture
in_img_flat = np.ravel(in_img_data, 'C')
in_img_fg = in_img_flat[in_img_flat > 0].reshape(-1, 1)
clf_in = mixture.GaussianMixture(n_components=3, covariance_type='full')
clf_in.fit(in_img_fg)
ref_img_flat = np.ravel(ref_img_data, 'C')
ref_img_fg = ref_img_flat[ref_img_flat > 0].reshape(-1, 1)
clf_ref = mixture.GaussianMixture(n_components=3, covariance_type='full')
clf_ref.fit(ref_img_fg)
in_landmarks = np.asarray(sorted(clf_in.means_.squeeze()))
in_wm_std = np.sqrt(clf_in.covariances_[np.argmax(clf_in.means_)])
in_wm_threshold = in_landmarks[2] + 2*in_wm_std[0]
in_landmarks = np.append(np.asarray([0]), np.append(in_landmarks, in_wm_threshold))
ref_landmarks = np.asanyarray(sorted(clf_ref.means_.squeeze()))
ref_wm_std = np.sqrt(clf_in.covariances_[np.argmax(clf_in.means_)])
ref_wm_threshold = 255
ref_landmarks = np.append(np.asarray([0]), np.append(ref_landmarks, ref_wm_threshold))
print(ref_landmarks)
print(in_landmarks)
out_img_data = np.zeros(in_img_data.shape)
# map intensities using these landmarks
for i in range(len(in_landmarks)-1):
m = (ref_landmarks[i+1] - ref_landmarks[i])/(in_landmarks[i+1] - in_landmarks[i])
c = (in_landmarks[i+1]*ref_landmarks[i] - in_landmarks[i]*ref_landmarks[i+1])/(in_landmarks[i+1] - in_landmarks[i])
out_img_data[(in_img_data > in_landmarks[i]) & (in_img_data <= in_landmarks[i+1])] = \
m * in_img_data[(in_img_data > in_landmarks[i]) & (in_img_data <= in_landmarks[i+1])] + c
out_img_data[(in_img_data > in_landmarks[-1])] = 255
return out_img_data
def wm_peak_normalize(in_img_data):
'''Function to scale image intensities by setting wm peak to 200.'''
import sklearn.mixture
in_img_flat = np.ravel(in_img_data, 'C')
in_img_fg = in_img_flat[in_img_flat > 0].reshape(-1, 1)
# clf = mixture.GMM(n_components=3, covariance_type='full')
clf = mixture.GaussianMixture(n_components=3, covariance_type='full', n_init=5)
clf.fit(in_img_fg)
# max of means is the wm centroid for t1w images
wm_peak_intensity = clf.means_.max()
wm_scaling = 200.0 / wm_peak_intensity
print(wm_peak_intensity)
out_img_data = in_img_data * wm_scaling
return out_img_data
def robust_normalize(in_img_data):
in_img_flat = np.ravel(in_img_data, 'C')
in_img_fg = in_img_flat[in_img_flat > 0].reshape(-1, 1)
p01 = np.percentile(in_img_fg, q=1)
p999 = np.percentile(in_img_fg, q=99)
scaling = 255.0 / (p999 - p01) # set p99 to 255
in_img_data[(in_img_data < p01) & (in_img_data > 0 )] = p01
print(scaling)
out_img_data = (in_img_data) * scaling
return out_img_data
def wm_peak_normalize_t2w(in_img_data):
'''Function to scale image intensities by setting wm peak to 200.'''
import sklearn.mixture
in_img_flat = np.ravel(in_img_data, 'C')
in_img_fg = in_img_flat[in_img_flat > 0].reshape(-1, 1)
p95 = np.percentile(in_img_fg, q=90)
p05 = np.percentile(in_img_fg, q=10)
in_img_fg = in_img_fg[(in_img_fg < p95) & (in_img_fg > p05)]
in_img_fg = in_img_fg.reshape(-1,1)
# clf = mixture.GMM(n_components=3, covariance_type='full')
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.fit(in_img_fg)
print('GMM centroids are ')
print(sorted(clf.means_))
wm_peak_intensity = sorted(clf.means_)[0]
# h, bin_edges = np.histogram(in_img_fg, 500)
# max_bin = np.argmax(h)
# mode_h = max_bin * (bin_edges[1] - bin_edges[0])
# max of means is the wm centroid for t1w images
# wm_peak_intensity = mode_h
wm_scaling = 0.3 / wm_peak_intensity
print(wm_peak_intensity)
out_img_data = in_img_data * wm_scaling
return out_img_data
def wm_peak_normalize_t2w(in_img_data):
"""Function to scale image intensities by setting wm peak to 200"""
import sklearn.mixture
in_img_flat = np.ravel(in_img_data, 'C')
in_img_fg = in_img_flat[in_img_flat > 0].reshape(-1, 1)
p95 = np.percentile(in_img_fg, q=90)
p05 = np.percentile(in_img_fg, q=10)
in_img_fg = in_img_fg[(in_img_fg < p95) & (in_img_fg > p05)]
in_img_fg = in_img_fg.reshape(-1,1)
# clf = mixture.GMM(n_components=3, covariance_type='full')
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.fit(in_img_fg)
print('GMM centroids are:')
print(sorted(clf.means_))
wm_peak_intensity = sorted(clf.means_)[0]
#
#
# h, bin_edges = np.histogram(in_img_fg, 500)
# max_bin = np.argmax(h)
# mode_h = max_bin * (bin_edges[1] - bin_edges[0])
# max of means is the wm centroid for t1w images
# wm_peak_intensity = mode_h
wm_scaling = 0.3 / wm_peak_intensity
print(wm_peak_intensity)
out_img_data = in_img_data * wm_scaling
return out_img_data
def max_normalize(in_img_data):
in_img_flat = np.ravel(in_img_data, 'C')
in_img_fg = in_img_flat[in_img_flat > 0].reshape(-1, 1)
p01 = np.percentile(in_img_fg, q=1)
p999 = np.percentile(in_img_fg, q=99)
scaling = 255.0 / (p999 - p01) # set p99 to 255
in_img_data[in_img_data < p01] = p01
print(scaling)
out_img_data = (in_img_data) * scaling
return out_img_data
def METHOD_NAME(in_img_data, ref_img_data):
# in_img_data = wm_peak_normalize(in_img_data)
# ref_img_data = wm_peak_normalize(ref_img_data)
in_img_data_flat = in_img_data.flatten()
in_img_fg = in_img_data_flat[in_img_data_flat > 0] # foreground is > 0
ref_img_data_flat = ref_img_data.flatten()
ref_img_fg = ref_img_data_flat[ref_img_data_flat > 0] # foreground is > 0
bins_in = np.linspace(0, 1, 255 / 1)
bins_ref = np.linspace(0, 1, 255 / 1)
hist_in = np.histogram(in_img_fg, bins=bins_in, range=(bins_in.min(), bins_in.max()))
n_in = hist_in[0]
bins_in = hist_in[1]
hist_ref = np.histogram(ref_img_fg, bins=bins_ref, range=(bins_ref.min(), bins_ref.max()))
n_ref = hist_ref[0]
bins_ref = hist_ref[1]
cdf_in_img = np.float64(np.cumsum(n_in))
cdf_in_img = np.divide(cdf_in_img, cdf_in_img[-1])
cdf_ref_img = np.float64(np.cumsum(n_ref))
cdf_ref_img = np.divide(cdf_ref_img, cdf_ref_img[-1])
interp_ref_values = np.interp(cdf_in_img, cdf_ref_img, bins_ref[1:])
bins_in_z = np.append(0, bins_in)
out_img_data = np.copy(in_img_data)
for i in range(1, len(bins_in)):
out_img_data[(in_img_data > bins_in_z[i - 1]) & (in_img_data <= bins_in_z[i])] = interp_ref_values[i - 1]
return out_img_data, bins_in_z, interp_ref_values | null |
139 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalidns.endpoint import endpoint_data
class AddGtmAddressPoolRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alidns', '2015-01-09', 'AddGtmAddressPool','alidns')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_MonitorExtendInfo(self): # String
return self.get_query_params().get('MonitorExtendInfo')
def set_MonitorExtendInfo(self, MonitorExtendInfo): # String
self.add_query_param('MonitorExtendInfo', MonitorExtendInfo)
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
def get_Timeout(self): # Integer
return self.get_query_params().get('Timeout')
def set_Timeout(self, Timeout): # Integer
self.add_query_param('Timeout', Timeout)
def get_MinAvailableAddrNum(self): # Integer
return self.get_query_params().get('MinAvailableAddrNum')
def set_MinAvailableAddrNum(self, MinAvailableAddrNum): # Integer
self.add_query_param('MinAvailableAddrNum', MinAvailableAddrNum)
def get_EvaluationCount(self): # Integer
return self.get_query_params().get('EvaluationCount')
def set_EvaluationCount(self, EvaluationCount): # Integer
self.add_query_param('EvaluationCount', EvaluationCount)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_Addrs(self): # RepeatList
return self.get_query_params().get('Addr')
def set_Addrs(self, Addr): # RepeatList
for depth1 in range(len(Addr)):
if Addr[depth1].get('Mode') is not None:
self.add_query_param('Addr.' + str(depth1 + 1) + '.Mode', Addr[depth1].get('Mode'))
if Addr[depth1].get('LbaWeight') is not None:
self.add_query_param('Addr.' + str(depth1 + 1) + '.LbaWeight', Addr[depth1].get('LbaWeight'))
if Addr[depth1].get('Value') is not None:
self.add_query_param('Addr.' + str(depth1 + 1) + '.Value', Addr[depth1].get('Value'))
def get_MonitorStatus(self): # String
return self.get_query_params().get('MonitorStatus')
def set_MonitorStatus(self, MonitorStatus): # String
self.add_query_param('MonitorStatus', MonitorStatus)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def METHOD_NAME(self): # String
return self.get_query_params().get('ProtocolType')
def set_ProtocolType(self, ProtocolType): # String
self.add_query_param('ProtocolType', ProtocolType)
def get_Interval(self): # Integer
return self.get_query_params().get('Interval')
def set_Interval(self, Interval): # Integer
self.add_query_param('Interval', Interval)
def get_IspCityNodes(self): # RepeatList
return self.get_query_params().get('IspCityNode')
def set_IspCityNodes(self, IspCityNode): # RepeatList
for depth1 in range(len(IspCityNode)):
if IspCityNode[depth1].get('CityCode') is not None:
self.add_query_param('IspCityNode.' + str(depth1 + 1) + '.CityCode', IspCityNode[depth1].get('CityCode'))
if IspCityNode[depth1].get('IspCode') is not None:
self.add_query_param('IspCityNode.' + str(depth1 + 1) + '.IspCode', IspCityNode[depth1].get('IspCode')) | null |
140 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbssopenapi.endpoint import endpoint_data
class DescribeInstanceAmortizedCostByAmortizationPeriodDateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'BssOpenApi', '2017-12-14', 'DescribeInstanceAmortizedCostByAmortizationPeriodDate','bssopenapi')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProductCode(self): # String
return self.get_body_params().get('ProductCode')
def set_ProductCode(self, ProductCode): # String
self.add_body_params('ProductCode', ProductCode)
def get_AmortizationDateStart(self): # String
return self.get_body_params().get('AmortizationDateStart')
def set_AmortizationDateStart(self, AmortizationDateStart): # String
self.add_body_params('AmortizationDateStart', AmortizationDateStart)
def get_SubscriptionType(self): # String
return self.get_body_params().get('SubscriptionType')
def set_SubscriptionType(self, SubscriptionType): # String
self.add_body_params('SubscriptionType', SubscriptionType)
def get_CostUnitCode(self): # String
return self.get_body_params().get('CostUnitCode')
def set_CostUnitCode(self, CostUnitCode): # String
self.add_body_params('CostUnitCode', CostUnitCode)
def get_NextToken(self): # String
return self.get_body_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_body_params('NextToken', NextToken)
def get_BillUserIdLists(self): # RepeatList
return self.get_body_params().get('BillUserIdList')
def set_BillUserIdLists(self, BillUserIdList): # RepeatList
for depth1 in range(len(BillUserIdList)):
self.add_body_params('BillUserIdList.' + str(depth1 + 1), BillUserIdList[depth1])
def get_ProductDetail(self): # String
return self.get_body_params().get('ProductDetail')
def set_ProductDetail(self, ProductDetail): # String
self.add_body_params('ProductDetail', ProductDetail)
def get_BillOwnerIdLists(self): # RepeatList
return self.get_body_params().get('BillOwnerIdList')
def set_BillOwnerIdLists(self, BillOwnerIdList): # RepeatList
for depth1 in range(len(BillOwnerIdList)):
self.add_body_params('BillOwnerIdList.' + str(depth1 + 1), BillOwnerIdList[depth1])
def get_BillingCycle(self): # String
return self.get_body_params().get('BillingCycle')
def set_BillingCycle(self, BillingCycle): # String
self.add_body_params('BillingCycle', BillingCycle)
def get_AmortizationDateEnd(self): # String
return self.get_body_params().get('AmortizationDateEnd')
def set_AmortizationDateEnd(self, AmortizationDateEnd): # String
self.add_body_params('AmortizationDateEnd', AmortizationDateEnd)
def get_InstanceIdLists(self): # RepeatList
return self.get_body_params().get('InstanceIdList')
def set_InstanceIdLists(self, InstanceIdList): # RepeatList
for depth1 in range(len(InstanceIdList)):
self.add_body_params('InstanceIdList.' + str(depth1 + 1), InstanceIdList[depth1])
def METHOD_NAME(self): # Integer
return self.get_body_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Integer
self.add_body_params('MaxResults', MaxResults) | null |
141 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
import json
class CreateAppInstanceGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'appstream-center', '2021-09-01', 'CreateAppInstanceGroup')
self.set_method('POST')
def get_RuntimePolicy(self): # Struct
return self.get_body_params().get('RuntimePolicy')
def set_RuntimePolicy(self, RuntimePolicy): # Struct
self.add_body_params("RuntimePolicy", json.dumps(RuntimePolicy))
def get_BizRegionId(self): # String
return self.get_body_params().get('BizRegionId')
def set_BizRegionId(self, BizRegionId): # String
self.add_body_params('BizRegionId', BizRegionId)
def get_ProductType(self): # String
return self.get_body_params().get('ProductType')
def set_ProductType(self, ProductType): # String
self.add_body_params('ProductType', ProductType)
def get_Network(self): # Struct
return self.get_body_params().get('Network')
def set_Network(self, Network): # Struct
self.add_body_params("Network", json.dumps(Network))
def get_SessionTimeout(self): # Integer
return self.get_body_params().get('SessionTimeout')
def set_SessionTimeout(self, SessionTimeout): # Integer
self.add_body_params('SessionTimeout', SessionTimeout)
def get_ChargeResourceMode(self): # String
return self.get_body_params().get('ChargeResourceMode')
def set_ChargeResourceMode(self, ChargeResourceMode): # String
self.add_body_params('ChargeResourceMode', ChargeResourceMode)
def get_AppCenterImageId(self): # String
return self.get_body_params().get('AppCenterImageId')
def set_AppCenterImageId(self, AppCenterImageId): # String
self.add_body_params('AppCenterImageId', AppCenterImageId)
def get_UserInfo(self): # Struct
return self.get_body_params().get('UserInfo')
def set_UserInfo(self, UserInfo): # Struct
self.add_body_params("UserInfo", json.dumps(UserInfo))
def get_PreOpenAppId(self): # String
return self.get_body_params().get('PreOpenAppId')
def set_PreOpenAppId(self, PreOpenAppId): # String
self.add_body_params('PreOpenAppId', PreOpenAppId)
def get_Period(self): # Integer
return self.get_body_params().get('Period')
def METHOD_NAME(self, Period): # Integer
self.add_body_params('Period', Period)
def get_AutoPay(self): # Boolean
return self.get_body_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_body_params('AutoPay', AutoPay)
def get_NodePool(self): # Struct
return self.get_body_params().get('NodePool')
def set_NodePool(self, NodePool): # Struct
self.add_body_params("NodePool", json.dumps(NodePool))
def get_PromotionId(self): # String
return self.get_body_params().get('PromotionId')
def set_PromotionId(self, PromotionId): # String
self.add_body_params('PromotionId', PromotionId)
def get_Userss(self): # RepeatList
return self.get_body_params().get('Users')
def set_Userss(self, Users): # RepeatList
for depth1 in range(len(Users)):
self.add_body_params('Users.' + str(depth1 + 1), Users[depth1])
def get_AppInstanceGroupName(self): # String
return self.get_body_params().get('AppInstanceGroupName')
def set_AppInstanceGroupName(self, AppInstanceGroupName): # String
self.add_body_params('AppInstanceGroupName', AppInstanceGroupName)
def get_PeriodUnit(self): # String
return self.get_body_params().get('PeriodUnit')
def set_PeriodUnit(self, PeriodUnit): # String
self.add_body_params('PeriodUnit', PeriodUnit)
def get_AutoRenew(self): # Boolean
return self.get_body_params().get('AutoRenew')
def set_AutoRenew(self, AutoRenew): # Boolean
self.add_body_params('AutoRenew', AutoRenew)
def get_ChargeType(self): # String
return self.get_body_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_body_params('ChargeType', ChargeType) | null |
142 | #
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for traversing and manipulating reference types."""
import re
from typing import Optional
from google.protobuf import descriptor
from google.protobuf import message
from google.fhir.core.utils import annotation_utils
from google.fhir.core.utils import path_utils
from google.fhir.core.utils import proto_utils
_FRAGMENT_REFERENCE_PATTERN = re.compile(r'^#[A-Za-z0-9.-]{1,64}$')
_INTERNAL_REFERENCE_PATTERN = re.compile(
r'^(?P<resource_type>[0-9A-Za-z_]+)/(?P<resource_id>[A-Za-z0-9.-]{1,64})'
r'(?:/_history/(?P<version>[A-Za-z0-9.-]{1,64}))?$')
def METHOD_NAME(reference: message.Message) -> None:
"""Raises a ValueError if the provided Message is not a FHIR reference."""
if not annotation_utils.is_reference(reference):
raise ValueError(
f'Message {reference.DESCRIPTOR.name} is not a FHIR reference.')
def get_reference_id_field_for_resource(
reference: message.Message,
resource_type: str) -> descriptor.FieldDescriptor:
"""Returns the reference ID field for a provided resource type."""
METHOD_NAME(reference)
field_name = path_utils.camel_case_to_snake_case(resource_type) + '_id'
field = reference.DESCRIPTOR.fields_by_name.get(field_name)
if field is None:
raise ValueError(f'Resource type {resource_type!r} is not valid for a '
f'reference. Field {field_name!r} does not exist.')
return field
def populate_typed_reference_id(reference_id: message.Message, resource_id: str,
version: Optional[str]) -> None:
"""Sets the resource_id and optionally, version, on the reference."""
reference_id_value_field = reference_id.DESCRIPTOR.fields_by_name['value']
proto_utils.set_value_at_field(reference_id, reference_id_value_field,
resource_id)
if version is not None:
history_field = reference_id.DESCRIPTOR.fields_by_name.get('history')
if history_field is None:
raise ValueError('Not a valid ReferenceId message: '
f"{reference_id.DESCRIPTOR.full_name}. Field 'history' "
'does not exist.')
history = proto_utils.set_in_parent_or_add(reference_id, history_field)
history_value_field = history.DESCRIPTOR.fields_by_name['value']
proto_utils.set_value_at_field(history, history_value_field, version)
def split_if_relative_reference(reference: message.Message) -> None:
"""If possible, parses a `Reference` `uri` into more structured fields.
This is only possible for two forms of reference uris:
* Relative references of the form $TYPE/$ID, e.g., "Patient/1234"
In this case, this will be parsed to a proto of the form:
{patient_id: {value: "1234"}}
* Fragments of the form "#$FRAGMENT", e.g., "#vs1". In this case, this would
be parsed into a proto of the form:
{fragment: {value: "vs1"} }
If the reference URI matches one of these schemas, the `uri` field will be
cleared, and the appropriate structured fields set. Otherwise, the reference
will be unchanged.
Args:
reference: The FHIR reference to potentially split.
Raises:
ValueError: If the message is not a valid FHIR Reference proto.
"""
METHOD_NAME(reference)
uri_field = reference.DESCRIPTOR.fields_by_name.get('uri')
if not proto_utils.field_is_set(reference, uri_field): # pytype: disable=wrong-arg-types
return # No URI to split
uri = proto_utils.get_value_at_field(reference, uri_field) # pytype: disable=wrong-arg-types
internal_match = re.fullmatch(_INTERNAL_REFERENCE_PATTERN, uri.value)
if internal_match is not None:
# Note that we make the reference_id off of the reference before adding it,
# since adding the reference_id would destroy the uri field, as they are
# both in the same oneof. This allows us to copy fields from uri to
# reference_id without making an extra copy.
reference_id_field = get_reference_id_field_for_resource(
reference, internal_match.group('resource_type'))
reference_id = proto_utils.create_message_from_descriptor(
reference_id_field.message_type)
populate_typed_reference_id(reference_id,
internal_match.group('resource_id'),
internal_match.group('version'))
proto_utils.copy_common_field(uri, reference_id, 'id')
proto_utils.copy_common_field(uri, reference_id, 'extension')
proto_utils.set_value_at_field(reference, reference_id_field, reference_id)
return
fragment_match = re.fullmatch(_FRAGMENT_REFERENCE_PATTERN, uri.value)
if fragment_match is not None:
# Note that we make the fragment off of the reference before adding it,
# since adding the fragment would destroy the uri field, as they are both in
# the same oneof. This allows us to copy fields from uri to fragment without
# making an extra copy.
fragment_field = reference.DESCRIPTOR.fields_by_name['fragment']
fragment = proto_utils.create_message_from_descriptor(
fragment_field.message_type)
value_field = fragment.DESCRIPTOR.fields_by_name['value']
proto_utils.set_value_at_field(fragment, value_field, uri.value[1:])
proto_utils.copy_common_field(uri, fragment, 'id')
proto_utils.copy_common_field(uri, fragment, 'extension')
proto_utils.set_value_at_field(reference, fragment_field, fragment)
return
def reference_to_string(reference: message.Message) -> str:
"""Returns a reference URI for a typed reference message."""
METHOD_NAME(reference)
# Early-exit if URI or fragment is set
if proto_utils.field_is_set(reference, 'uri'):
uri = proto_utils.get_value_at_field(reference, 'uri')
return uri.value
elif proto_utils.field_is_set(reference, 'fragment'):
fragment = proto_utils.get_value_at_field(reference, 'fragment')
return f'#{fragment.value}'
set_oneof = reference.WhichOneof('reference')
if set_oneof is None:
raise ValueError(f'Reference is not set on: {reference.DESCRIPTOR.name}.')
# Convert to CamelCase
prefix = path_utils.snake_case_to_camel_case(set_oneof, upper=True)
# Cull trailing 'Id'
if prefix.endswith('Id'):
prefix = prefix[:-2]
reference_id = proto_utils.get_value_at_field(reference, set_oneof)
reference_string = f'{prefix}/{reference_id.value}'
if proto_utils.field_is_set(reference_id, 'history'):
reference_string += f'/_history/{reference_id.history.value}'
return reference_string | null |
143 | # -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.util import Qt
import os
import acq4.util.database as database
import acq4.Manager
import acq4.analysis.modules as analysis
import acq4.analysis.AnalysisHost as AnalysisHost
import acq4.analysis.dataModels as models
from pyqtgraph import FileDialog
from six.moves import range
Ui_Form = Qt.importTemplate('.AnalysisTemplate')
class FileAnalysisView(Qt.QWidget):
sigDbChanged = Qt.Signal()
def __init__(self, parent, mod):
Qt.QWidget.__init__(self, parent)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.man = acq4.Manager.getManager()
self.mod = mod
self.dbFile = None
self.db = None
self.mods = []
self.currentModel = None
self.populateModuleList()
self.METHOD_NAME()
stateFile = os.path.join('modules', self.mod.name + '_db_file_list')
files = self.mod.manager.readConfigFile(stateFile).get('db_file_list', [])
self.ui.databaseCombo.addItem('')
for f in files:
self.ui.databaseCombo.addItem(f)
self.ui.openDbBtn.clicked.connect(self.openDbClicked)
self.ui.createDbBtn.clicked.connect(self.createDbClicked)
self.ui.loadModuleBtn.clicked.connect(self.loadModule)
self.ui.refreshDbBtn.clicked.connect(self.refreshDb)
self.ui.dataModelCombo.currentIndexChanged.connect(self.loadModel)
self.ui.analysisModuleList.currentItemChanged.connect(self.showModuleDescription)
self.ui.analysisModuleList.itemDoubleClicked.connect(self.loadModule)
self.ui.databaseCombo.currentIndexChanged.connect(self.dbComboChanged)
def openDbClicked(self):
bd = self.man.getBaseDir()
if bd is None:
bd = ""
else:
bd = bd.name()
self.fileDialog = FileDialog(self, "Select Database File", bd, "SQLite Database (*.sqlite *.sql);;All Files (*.*)")
#self.fileDialog.setFileMode(Qt.QFileDialog.AnyFile)
self.fileDialog.show()
self.fileDialog.fileSelected.connect(self.openDb)
def openDb(self, fileName):
#fn = str(Qt.QFileDialog.getOpenFileName(self, "Select Database File", self.man.getBaseDir().name(), "SQLite Database (*.sqlite)"))
fileName = str(fileName)
if fileName == '':
return
#if not fileName[-7:] == '.sqlite' and '.' not in fileName:
# fileName =+ '.sqlite'
self.ui.databaseCombo.blockSignals(True)
try:
## put fileName at the top of the list, write to disk
files = [self.ui.databaseCombo.itemText(i) for i in range(self.ui.databaseCombo.count())]
files.remove('')
if fileName in files:
files.remove(fileName)
files = [fileName] + files
self.ui.databaseCombo.clear()
self.ui.databaseCombo.addItem('')
for f in files:
self.ui.databaseCombo.addItem(f)
stateFile = os.path.join('modules', self.mod.name + '_db_file_list')
self.mod.manager.writeConfigFile({'db_file_list': files}, stateFile)
self.ui.databaseCombo.setCurrentIndex(1)
finally:
self.ui.databaseCombo.blockSignals(False)
self.dbFile = fileName
self.db = database.AnalysisDatabase(self.dbFile, dataModel=self.currentModel)
self.sigDbChanged.emit()
def dbComboChanged(self):
fn = self.ui.databaseCombo.currentText()
if fn == '':
return
if not os.path.exists(fn):
raise Exception("Database file does not exist: %s" % fn)
self.openDb(fn)
def quit(self):
if self.db is not None:
self.db.close()
def createDbClicked(self):
bd = self.man.getBaseDir()
if bd is None:
raise Exception("Must select a base directory before creating database.")
self.fileDialog = FileDialog(self, "Create Database File", bd.name(), "SQLite Database (*.sqlite *.sql);;All Files (*.*)")
#self.fileDialog.setFileMode(Qt.QFileDialog.AnyFile)
self.fileDialog.setAcceptMode(Qt.QFileDialog.AcceptSave)
self.fileDialog.setOption(Qt.QFileDialog.DontConfirmOverwrite)
self.fileDialog.show()
self.fileDialog.fileSelected.connect(self.createDb)
def createDb(self, fileName):
#fn = str(Qt.QFileDialog.getSaveFileName(self, "Create Database File", self.man.getBaseDir().name(), "SQLite Database (*.sqlite)", None, Qt.QFileDialog.DontConfirmOverwrite))
fileName = str(fileName)
if fileName == '':
return
self.dbFile = fileName
self.db = database.AnalysisDatabase(self.dbFile, dataModel=self.currentModel, baseDir=self.man.getBaseDir())
self.ui.databaseCombo.blockSignals(True)
try:
self.ui.databaseCombo.addItem(fileName)
self.ui.databaseCombo.setCurrentIndex(self.ui.databaseCombo.count()-1)
finally:
self.ui.databaseCombo.blockSignals(False)
self.sigDbChanged.emit()
def refreshDb(self):
if self.db is None:
return
self.db._readTableList()
def addFileClicked(self):
cf = self.mod.selectedFile()
self.db.addDir(cf)
def populateModuleList(self):
for m in analysis.listModules():
self.ui.analysisModuleList.addItem(m)
def loadModule(self):
mod = self.ui.analysisModuleList.currentItem()
if mod is None:
return
modName = str(mod.text())
#if self.ui.analysisCombo.currentIndex() == 0:
#return
#modName = str(self.ui.analysisCombo.currentText())
#self.ui.analysisCombo.setCurrentIndex(0)
mod = AnalysisHost.AnalysisHost(dataManager=self.mod, dataModel=self.currentModel, module=modName)
self.mods.append(mod)
self.man.modules[modName] = mod
def METHOD_NAME(self):
self.ui.dataModelCombo.clear()
self.ui.dataModelCombo.addItem('Load...')
mods = models.listModels()
for m in mods:
self.ui.dataModelCombo.addItem(m)
if len(mods) == 1:
self.ui.dataModelCombo.setCurrentIndex(1)
self.loadModel()
def loadModel(self):
if self.ui.dataModelCombo.currentIndex() == 0:
return
modName = str(self.ui.dataModelCombo.currentText())
self.currentModel = models.loadModel(modName)
acq4.Manager.getManager().dataModel = self.currentModel ## make model globally available
if self.db is not None:
self.db.setDataModel(self.currentModel)
def currentDatabase(self):
return self.db
def currentDataModel(self):
return self.currentModel
def showModuleDescription(self):
mod = self.ui.analysisModuleList.currentItem()
if mod is None:
return
modName = str(mod.text())
cls = analysis.getModuleClass(modName)
doc = cls.__doc__
self.ui.modDescriptionText.setPlainText(doc) | null |
144 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateDataAddressRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hcs-mgw', '2017-10-24', 'CreateDataAddress')
self.set_method('POST')
def get_InvPath(self):
return self.get_query_params().get('InvPath')
def set_InvPath(self,InvPath):
self.add_query_param('InvPath',InvPath)
def get_ServerEncryption(self):
return self.get_query_params().get('ServerEncryption')
def set_ServerEncryption(self,ServerEncryption):
self.add_query_param('ServerEncryption',ServerEncryption)
def get_AccessMethod(self):
return self.get_query_params().get('AccessMethod')
def set_AccessMethod(self,AccessMethod):
self.add_query_param('AccessMethod',AccessMethod)
def get_InvAccessKeyId(self):
return self.get_query_params().get('InvAccessKeyId')
def set_InvAccessKeyId(self,InvAccessKeyId):
self.add_query_param('InvAccessKeyId',InvAccessKeyId)
def get_AccessKeySecret(self):
return self.get_query_params().get('AccessKeySecret')
def set_AccessKeySecret(self,AccessKeySecret):
self.add_query_param('AccessKeySecret',AccessKeySecret)
def get_ListFilePath(self):
return self.get_query_params().get('ListFilePath')
def set_ListFilePath(self,ListFilePath):
self.add_query_param('ListFilePath',ListFilePath)
def get_InvDomain(self):
return self.get_query_params().get('InvDomain')
def set_InvDomain(self,InvDomain):
self.add_query_param('InvDomain',InvDomain)
def get_AccessKey(self):
return self.get_query_params().get('AccessKey')
def set_AccessKey(self,AccessKey):
self.add_query_param('AccessKey',AccessKey)
def get_AddressType(self):
return self.get_query_params().get('AddressType')
def set_AddressType(self,AddressType):
self.add_query_param('AddressType',AddressType)
def get_EnableAcceleration(self):
return self.get_query_params().get('EnableAcceleration')
def set_EnableAcceleration(self,EnableAcceleration):
self.add_query_param('EnableAcceleration',EnableAcceleration)
def get_Address(self):
return self.get_query_params().get('Address')
def set_Address(self,Address):
self.add_query_param('Address',Address)
def get_AccessVersion(self):
return self.get_query_params().get('AccessVersion')
def set_AccessVersion(self,AccessVersion):
self.add_query_param('AccessVersion',AccessVersion)
def get_AccessProxy(self):
return self.get_query_params().get('AccessProxy')
def set_AccessProxy(self,AccessProxy):
self.add_query_param('AccessProxy',AccessProxy)
def get_VSwitchId(self):
return self.get_query_params().get('VSwitchId')
def METHOD_NAME(self,VSwitchId):
self.add_query_param('VSwitchId',VSwitchId)
def get_AliasName(self):
return self.get_query_params().get('AliasName')
def set_AliasName(self,AliasName):
self.add_query_param('AliasName',AliasName)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_Domain(self):
return self.get_query_params().get('Domain')
def set_Domain(self,Domain):
self.add_query_param('Domain',Domain)
def get_Appid(self):
return self.get_query_params().get('Appid')
def set_Appid(self,Appid):
self.add_query_param('Appid',Appid)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_InvSecretKey(self):
return self.get_query_params().get('InvSecretKey')
def set_InvSecretKey(self,InvSecretKey):
self.add_query_param('InvSecretKey',InvSecretKey)
def get_MgwRegionId(self):
return self.get_query_params().get('MgwRegionId')
def set_MgwRegionId(self,MgwRegionId):
self.add_query_param('MgwRegionId',MgwRegionId)
def get_SubAddress(self):
return self.get_query_params().get('SubAddress')
def set_SubAddress(self,SubAddress):
self.add_query_param('SubAddress',SubAddress | null |
145 | # coding: utf-8
"""
Generated by: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
"""
import unittest
from unittest.mock import patch
import urllib3
import typing_extensions
import unit_test_api
from unit_test_api.paths.request_body_post_allof_with_base_schema_request_body.post import operation as post # noqa: E501
from unit_test_api import schemas, api_client
from unit_test_api.configurations import api_configuration, schema_configuration
from .. import ApiTestMixin
class TestPost(ApiTestMixin, unittest.TestCase):
"""
Post unit test stubs
"""
api_config = api_configuration.ApiConfiguration()
schema_config = schema_configuration.SchemaConfiguration()
used_api_client = api_client.ApiClient(configuration=api_config, schema_configuration=schema_config)
api = post.ApiForPost(api_client=used_api_client) # noqa: E501
response_status = 200
response_body = ''
def METHOD_NAME(self):
content_type = 'application/json'
# valid
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
{
"foo":
"quux",
"bar":
2,
"baz":
None,
}
)
body = post.request_body.RequestBody.content["application/json"].schema.validate(
payload,
configuration=self.schema_config
)
mock_request.return_value = self.response(
self.json_bytes(self.response_body),
status=self.response_status
)
api_response = self.api.post(
body=body,
)
self.assert_pool_manager_request_called_with(
mock_request,
self.api_config.get_server_url('servers', None) + "/requestBody/postAllofWithBaseSchemaRequestBody",
method='post'.upper(),
body=self.json_bytes(payload),
content_type=content_type,
)
assert isinstance(api_response.response, urllib3.HTTPResponse)
assert isinstance(api_response.body, schemas.Unset)
def test_mismatch_first_allof_fails(self):
content_type = 'application/json'
# mismatch first allOf
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
{
"bar":
2,
"baz":
None,
}
)
with self.assertRaises((unit_test_api.ApiValueError, unit_test_api.ApiTypeError)):
body = post.request_body.RequestBody.content["application/json"].schema.validate(
payload,
configuration=self.schema_config
)
self.api.post(body=body)
def test_mismatch_base_schema_fails(self):
content_type = 'application/json'
# mismatch base schema
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
{
"foo":
"quux",
"baz":
None,
}
)
with self.assertRaises((unit_test_api.ApiValueError, unit_test_api.ApiTypeError)):
body = post.request_body.RequestBody.content["application/json"].schema.validate(
payload,
configuration=self.schema_config
)
self.api.post(body=body)
def test_mismatch_both_fails(self):
content_type = 'application/json'
# mismatch both
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
{
"bar":
2,
}
)
with self.assertRaises((unit_test_api.ApiValueError, unit_test_api.ApiTypeError)):
body = post.request_body.RequestBody.content["application/json"].schema.validate(
payload,
configuration=self.schema_config
)
self.api.post(body=body)
def test_mismatch_second_allof_fails(self):
content_type = 'application/json'
# mismatch second allOf
with patch.object(urllib3.PoolManager, 'request') as mock_request:
payload = (
{
"foo":
"quux",
"bar":
2,
}
)
with self.assertRaises((unit_test_api.ApiValueError, unit_test_api.ApiTypeError)):
body = post.request_body.RequestBody.content["application/json"].schema.validate(
payload,
configuration=self.schema_config
)
self.api.post(body=body)
if __name__ == '__main__':
unittest.main() | null |
146 | # -*- coding: utf-8 -*-
import re
import hmac
import hashlib
import logging
from django.apps import apps
from nameparser import HumanName
from werkzeug.utils import cached_property
from framework.flask import request
from website import settings
from website.conferences.exceptions import ConferenceError
logger = logging.getLogger(__name__)
SSCORE_MAX_VALUE = 5
DKIM_PASS_VALUES = ['Pass']
SPF_PASS_VALUES = ['Pass', 'Neutral']
ANGLE_BRACKETS_REGEX = re.compile(r'<(.*?)>')
BASE_REGEX = r"""
(?P<test>(test|stage)(\d*)-)?
(?P<meeting>\w*?)
-
(?P<category>{allowed_types})
@osf\.io
"""
class ConferenceMessage(object):
def __init__(self):
self.request = request._get_current_object()
def verify(self):
self.METHOD_NAME()
_ = [self.sender_email, self.route] # noqa
def METHOD_NAME(self):
"""Verify that request comes from Mailgun. Based on sample code from
http://documentation.mailgun.com/user_manual.html#webhooks
"""
signature = hmac.new(
key=settings.MAILGUN_API_KEY.encode(),
msg='{}{}'.format(
self.form['timestamp'],
self.form['token'],
).encode(),
digestmod=hashlib.sha256,
).hexdigest()
if signature != self.form['signature']:
raise ConferenceError('Invalid headers on incoming mail')
@cached_property
def is_spam(self):
"""Check SSCORE, DKIM, and SPF headers for spam.
See http://documentation.mailgun.com/user_manual.html#spam-filter for
details.
:return: At least one header indicates spam
"""
try:
# Mailgun only inserts score headers for messages checked for spam.
sscore_header = float(self.form.get('X-Mailgun-Sscore', 0))
except (TypeError, ValueError):
return True
dkim_header = self.form.get('X-Mailgun-Dkim-Check-Result')
spf_header = self.form.get('X-Mailgun-Spf')
return (
(sscore_header and sscore_header > SSCORE_MAX_VALUE) or
(dkim_header and dkim_header not in DKIM_PASS_VALUES) or
(spf_header and spf_header not in SPF_PASS_VALUES)
)
@cached_property
def form(self):
return self.request.form
@cached_property
def raw(self):
return {
'headers': dict(self.request.headers),
'form': self.request.form.to_dict(),
'args': self.request.args.to_dict(),
}
@cached_property
def subject(self):
subject = self.form['subject']
subject = re.sub(r'^re:', '', subject, flags=re.I)
subject = re.sub(r'^fwd:', '', subject, flags=re.I)
return subject.strip()
@cached_property
def recipient(self):
return self.form['recipient']
@cached_property
def text(self):
# Not included if there is no message body
# https://documentation.mailgun.com/user_manual.html#routes
return self.form.get('stripped-text', '')
@cached_property
def sender(self):
return self.form['from']
@cached_property
def sender_name(self):
if '<' in self.sender:
# sender format: "some name" <email@domain.tld>
name = ANGLE_BRACKETS_REGEX.sub('', self.sender)
name = name.strip().replace('"', '')
else:
# sender format: email@domain.tld
name = self.sender
return str(HumanName(name))
@cached_property
def sender_email(self):
match = ANGLE_BRACKETS_REGEX.search(self.sender)
if match:
# sender format: "some name" <email@domain.tld>
return match.groups()[0].lower().strip()
elif '@' in self.sender:
# sender format: email@domain.tld
return self.sender.lower().strip()
raise ConferenceError('Could not extract sender email')
@cached_property
def sender_display(self):
return self.sender_name or self.sender_email.split('@')[0]
@cached_property
def route(self):
match = re.search(re.compile(BASE_REGEX.format(allowed_types=(self.allowed_types or 'poster|talk')), re.IGNORECASE | re.VERBOSE), self.form['recipient'])
if not match:
raise ConferenceError('Invalid recipient: '.format(self.form['recipient']))
data = match.groupdict()
if bool(settings.DEV_MODE) != bool(data['test']):
# NOTE: test.osf.io has DEV_MODE = False
if not data['test'] or (data['test'] and data['test'].rstrip('-') != 'test'):
raise ConferenceError(
'Mismatch between `DEV_MODE` and recipient {0}'.format(
self.form['recipient']
)
)
return data
@cached_property
def conference_name(self):
return self.route['meeting']
@cached_property
def conference_category(self):
return self.route['category']
@cached_property
def attachments(self):
count = self.form.get('attachment-count', 0)
try:
count = int(count)
except (TypeError, ValueError):
count = 0
return list(filter(
lambda value: value is not None,
list(map(
lambda idx: self.request.files.get('attachment-{0}'.format(idx + 1)),
list(range(count)),
)),
))
@property
def allowed_types(self):
Conference = apps.get_model('osf.Conference')
allowed_types = []
for field_names in Conference.objects.values_list('field_names', flat=True):
allowed_types.extend([field_names['submission1'], field_names['submission2']])
regex_types_allowed = '|'.join(set(allowed_types))
return regex_types_allowed | null |
147 | # Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import numpy as np
import time
from models.model import create_model, load_model
from utils.image import fast_pad, get_affine_transform
from utils.debugger import Debugger
class BaseDetector(object):
def __init__(self, opt):
self.model = create_model(
opt.arch,
opt.heads,
opt.head_conv,
opt.num_layers,
training=False,
channel_last=opt.channel_last,
)
if opt.trained_model_path != '':
extension = os.path.splitext(opt.trained_model_path)[1]
assert (
extension == '.h5' or extension == ".protobuf"
), "incorrect file extension, should be either .h5 or .protobuf"
load_model(self.model, opt.trained_model_path, clear=True)
self.mean = opt.mean
self.std = opt.std
self.max_per_image = 128
self.opt = opt
self.pause = True
def pre_process(self, image, scale, meta=None):
height, width = image.shape[0:2]
new_height = int(height * scale)
new_width = int(width * scale)
if self.opt.fix_res:
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([new_width / 2.0, new_height / 2.0], dtype=np.float32)
s = max(height, width) * 1.0
else:
inp_height = (new_height | self.opt.pad) + 1
inp_width = (new_width | self.opt.pad) + 1
c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
s = np.array([inp_width, inp_height], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
resized_image = cv2.resize(image, (new_width, new_height))
inp_image = cv2.warpAffine(
resized_image,
trans_input,
(inp_width, inp_height),
flags=cv2.INTER_LINEAR,
)
inp_image = ((inp_image / 255.0 - self.mean) / self.std).astype(
np.float32
)
if self.opt.mixed_precision:
inp_image = fast_pad(inp_image)
if not self.opt.channel_last:
inp_image = inp_image.transpose(2, 0, 1)
images = np.expand_dims(inp_image, axis=0)
meta = {
'c': c,
's': s,
'out_height': inp_height // self.opt.down_ratio,
'out_width': inp_width // self.opt.down_ratio,
}
return images, meta
def process(self, images):
raise NotImplementedError
def post_process(self, dets, meta, scale=1):
raise NotImplementedError
def merge_outputs(self, detections):
"""Merge detection results
Args:
detections (list): List of detection results. Each 1-based detection result will be saved to dictionary.
Raises:
NotImplementedError: Abstract method.
"""
raise NotImplementedError
def debug(self, debugger, images, dets, output, scale=1):
raise NotImplementedError
def show_results(self, debugger, image, results):
raise NotImplementedError
def METHOD_NAME(self, image_or_path_or_tensor, meta=None):
load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0
merge_time, tot_time = 0, 0
debugger = Debugger(
dataset=self.opt.dataset,
ipynb=(self.opt.debug == 3),
theme=self.opt.debugger_theme,
)
start_time = time.time()
pre_processed = False
if isinstance(image_or_path_or_tensor, np.ndarray):
image = image_or_path_or_tensor
elif isinstance(image_or_path_or_tensor, str):
image = cv2.imread(image_or_path_or_tensor)
else:
image = image_or_path_or_tensor['image'][0].numpy()
pre_processed_images = image_or_path_or_tensor
pre_processed = True
loaded_time = time.time()
load_time += loaded_time - start_time
detections = []
for scale in self.opt.test_scales:
scale_start_time = time.time()
if not pre_processed:
images, meta = self.pre_process(image, scale, meta)
else:
images = pre_processed_images['images'][scale][0]
meta = pre_processed_images['meta'][scale]
meta = {k: v.numpy()[0] for k, v in meta.items()}
pre_process_time = time.time()
pre_time += pre_process_time - scale_start_time
output, dets, forward_time = self.process(images)
net_time += forward_time - pre_process_time
decode_time = time.time()
dec_time += decode_time - forward_time
if self.opt.debug >= 2:
self.debug(debugger, images, dets, output, scale)
dets = self.post_process(dets, meta, scale)
post_process_time = time.time()
post_time += post_process_time - decode_time
detections.append(dets)
results = self.merge_outputs(detections)
end_time = time.time()
merge_time += end_time - post_process_time
tot_time += end_time - start_time
if self.opt.debug >= 1:
self.show_results(debugger, image, results)
return {
'results': results,
'tot': tot_time,
'load': load_time,
'pre': pre_time,
'net': net_time,
'dec': dec_time,
'post': post_time,
'merge': merge_time,
} | null |
148 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class ModifyCasterLayoutRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'ModifyCasterLayout','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BlendLists(self): # RepeatList
return self.get_query_params().get('BlendList')
def set_BlendLists(self, BlendList): # RepeatList
for depth1 in range(len(BlendList)):
self.add_query_param('BlendList.' + str(depth1 + 1), BlendList[depth1])
def get_LayoutId(self): # String
return self.get_query_params().get('LayoutId')
def METHOD_NAME(self, LayoutId): # String
self.add_query_param('LayoutId', LayoutId)
def get_CasterId(self): # String
return self.get_query_params().get('CasterId')
def set_CasterId(self, CasterId): # String
self.add_query_param('CasterId', CasterId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_AudioLayers(self): # RepeatList
return self.get_query_params().get('AudioLayer')
def set_AudioLayers(self, AudioLayer): # RepeatList
for depth1 in range(len(AudioLayer)):
if AudioLayer[depth1].get('VolumeRate') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.VolumeRate', AudioLayer[depth1].get('VolumeRate'))
if AudioLayer[depth1].get('FixedDelayDuration') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.FixedDelayDuration', AudioLayer[depth1].get('FixedDelayDuration'))
if AudioLayer[depth1].get('ValidChannel') is not None:
self.add_query_param('AudioLayer.' + str(depth1 + 1) + '.ValidChannel', AudioLayer[depth1].get('ValidChannel'))
def get_VideoLayers(self): # RepeatList
return self.get_query_params().get('VideoLayer')
def set_VideoLayers(self, VideoLayer): # RepeatList
for depth1 in range(len(VideoLayer)):
if VideoLayer[depth1].get('FixedDelayDuration') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.FixedDelayDuration', VideoLayer[depth1].get('FixedDelayDuration'))
if VideoLayer[depth1].get('FillMode') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.FillMode', VideoLayer[depth1].get('FillMode'))
if VideoLayer[depth1].get('HeightNormalized') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.HeightNormalized', VideoLayer[depth1].get('HeightNormalized'))
if VideoLayer[depth1].get('PositionRefer') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.PositionRefer', VideoLayer[depth1].get('PositionRefer'))
if VideoLayer[depth1].get('PositionNormalized') is not None:
for depth2 in range(len(VideoLayer[depth1].get('PositionNormalized'))):
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.PositionNormalized.' + str(depth2 + 1), VideoLayer[depth1].get('PositionNormalized')[depth2])
if VideoLayer[depth1].get('WidthNormalized') is not None:
self.add_query_param('VideoLayer.' + str(depth1 + 1) + '.WidthNormalized', VideoLayer[depth1].get('WidthNormalized'))
def get_MixLists(self): # RepeatList
return self.get_query_params().get('MixList')
def set_MixLists(self, MixList): # RepeatList
for depth1 in range(len(MixList)):
self.add_query_param('MixList.' + str(depth1 + 1), MixList[depth1]) | null |
149 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcassandra.endpoint import endpoint_data
class CreateClusterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cassandra', '2019-01-01', 'CreateCluster','Cassandra')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_ClusterName(self):
return self.get_query_params().get('ClusterName')
def set_ClusterName(self,ClusterName):
self.add_query_param('ClusterName',ClusterName)
def get_ResourceGroupId(self):
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self,ResourceGroupId):
self.add_query_param('ResourceGroupId',ResourceGroupId)
def get_Password(self):
return self.get_query_params().get('Password')
def set_Password(self,Password):
self.add_query_param('Password',Password)
def get_InstanceType(self):
return self.get_query_params().get('InstanceType')
def set_InstanceType(self,InstanceType):
self.add_query_param('InstanceType',InstanceType)
def get_AutoRenewPeriod(self):
return self.get_query_params().get('AutoRenewPeriod')
def set_AutoRenewPeriod(self,AutoRenewPeriod):
self.add_query_param('AutoRenewPeriod',AutoRenewPeriod)
def get_Period(self):
return self.get_query_params().get('Period')
def set_Period(self,Period):
self.add_query_param('Period',Period)
def get_MajorVersion(self):
return self.get_query_params().get('MajorVersion')
def set_MajorVersion(self,MajorVersion):
self.add_query_param('MajorVersion',MajorVersion)
def get_DiskSize(self):
return self.get_query_params().get('DiskSize')
def set_DiskSize(self,DiskSize):
self.add_query_param('DiskSize',DiskSize)
def get_DiskType(self):
return self.get_query_params().get('DiskType')
def set_DiskType(self,DiskType):
self.add_query_param('DiskType',DiskType)
def get_VswitchId(self):
return self.get_query_params().get('VswitchId')
def set_VswitchId(self,VswitchId):
self.add_query_param('VswitchId',VswitchId)
def METHOD_NAME(self):
return self.get_query_params().get('PeriodUnit')
def set_PeriodUnit(self,PeriodUnit):
self.add_query_param('PeriodUnit',PeriodUnit)
def get_AutoRenew(self):
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self,AutoRenew):
self.add_query_param('AutoRenew',AutoRenew)
def get_DataCenterName(self):
return self.get_query_params().get('DataCenterName')
def set_DataCenterName(self,DataCenterName):
self.add_query_param('DataCenterName',DataCenterName)
def get_NodeCount(self):
return self.get_query_params().get('NodeCount')
def set_NodeCount(self,NodeCount):
self.add_query_param('NodeCount',NodeCount)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
def get_PayType(self):
return self.get_query_params().get('PayType')
def set_PayType(self,PayType):
self.add_query_param('PayType',PayType | null |
150 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkess.endpoint import endpoint_data
class ModifyScalingRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'ModifyScalingRule','ess')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_AlarmDimensions(self): # RepeatList
return self.get_query_params().get('AlarmDimension')
def set_AlarmDimensions(self, AlarmDimension): # RepeatList
for depth1 in range(len(AlarmDimension)):
if AlarmDimension[depth1].get('DimensionValue') is not None:
self.add_query_param('AlarmDimension.' + str(depth1 + 1) + '.DimensionValue', AlarmDimension[depth1].get('DimensionValue'))
if AlarmDimension[depth1].get('DimensionKey') is not None:
self.add_query_param('AlarmDimension.' + str(depth1 + 1) + '.DimensionKey', AlarmDimension[depth1].get('DimensionKey'))
def get_StepAdjustments(self): # RepeatList
return self.get_query_params().get('StepAdjustment')
def set_StepAdjustments(self, StepAdjustment): # RepeatList
for depth1 in range(len(StepAdjustment)):
if StepAdjustment[depth1].get('MetricIntervalUpperBound') is not None:
self.add_query_param('StepAdjustment.' + str(depth1 + 1) + '.MetricIntervalUpperBound', StepAdjustment[depth1].get('MetricIntervalUpperBound'))
if StepAdjustment[depth1].get('MetricIntervalLowerBound') is not None:
self.add_query_param('StepAdjustment.' + str(depth1 + 1) + '.MetricIntervalLowerBound', StepAdjustment[depth1].get('MetricIntervalLowerBound'))
if StepAdjustment[depth1].get('ScalingAdjustment') is not None:
self.add_query_param('StepAdjustment.' + str(depth1 + 1) + '.ScalingAdjustment', StepAdjustment[depth1].get('ScalingAdjustment'))
def get_DisableScaleIn(self): # Boolean
return self.get_query_params().get('DisableScaleIn')
def set_DisableScaleIn(self, DisableScaleIn): # Boolean
self.add_query_param('DisableScaleIn', DisableScaleIn)
def get_ScalingRuleId(self): # String
return self.get_query_params().get('ScalingRuleId')
def set_ScalingRuleId(self, ScalingRuleId): # String
self.add_query_param('ScalingRuleId', ScalingRuleId)
def get_InitialMaxSize(self): # Integer
return self.get_query_params().get('InitialMaxSize')
def set_InitialMaxSize(self, InitialMaxSize): # Integer
self.add_query_param('InitialMaxSize', InitialMaxSize)
def get_ScalingRuleName(self): # String
return self.get_query_params().get('ScalingRuleName')
def set_ScalingRuleName(self, ScalingRuleName): # String
self.add_query_param('ScalingRuleName', ScalingRuleName)
def get_Cooldown(self): # Integer
return self.get_query_params().get('Cooldown')
def set_Cooldown(self, Cooldown): # Integer
self.add_query_param('Cooldown', Cooldown)
def get_PredictiveValueBehavior(self): # String
return self.get_query_params().get('PredictiveValueBehavior')
def set_PredictiveValueBehavior(self, PredictiveValueBehavior): # String
self.add_query_param('PredictiveValueBehavior', PredictiveValueBehavior)
def get_ScaleInEvaluationCount(self): # Integer
return self.get_query_params().get('ScaleInEvaluationCount')
def set_ScaleInEvaluationCount(self, ScaleInEvaluationCount): # Integer
self.add_query_param('ScaleInEvaluationCount', ScaleInEvaluationCount)
def get_MetricName(self): # String
return self.get_query_params().get('MetricName')
def set_MetricName(self, MetricName): # String
self.add_query_param('MetricName', MetricName)
def get_PredictiveScalingMode(self): # String
return self.get_query_params().get('PredictiveScalingMode')
def set_PredictiveScalingMode(self, PredictiveScalingMode): # String
self.add_query_param('PredictiveScalingMode', PredictiveScalingMode)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_AdjustmentValue(self): # Integer
return self.get_query_params().get('AdjustmentValue')
def set_AdjustmentValue(self, AdjustmentValue): # Integer
self.add_query_param('AdjustmentValue', AdjustmentValue)
def get_EstimatedInstanceWarmup(self): # Integer
return self.get_query_params().get('EstimatedInstanceWarmup')
def METHOD_NAME(self, EstimatedInstanceWarmup): # Integer
self.add_query_param('EstimatedInstanceWarmup', EstimatedInstanceWarmup)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_PredictiveTaskBufferTime(self): # Integer
return self.get_query_params().get('PredictiveTaskBufferTime')
def set_PredictiveTaskBufferTime(self, PredictiveTaskBufferTime): # Integer
self.add_query_param('PredictiveTaskBufferTime', PredictiveTaskBufferTime)
def get_AdjustmentType(self): # String
return self.get_query_params().get('AdjustmentType')
def set_AdjustmentType(self, AdjustmentType): # String
self.add_query_param('AdjustmentType', AdjustmentType)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_PredictiveValueBuffer(self): # Integer
return self.get_query_params().get('PredictiveValueBuffer')
def set_PredictiveValueBuffer(self, PredictiveValueBuffer): # Integer
self.add_query_param('PredictiveValueBuffer', PredictiveValueBuffer)
def get_ScaleOutEvaluationCount(self): # Integer
return self.get_query_params().get('ScaleOutEvaluationCount')
def set_ScaleOutEvaluationCount(self, ScaleOutEvaluationCount): # Integer
self.add_query_param('ScaleOutEvaluationCount', ScaleOutEvaluationCount)
def get_MinAdjustmentMagnitude(self): # Integer
return self.get_query_params().get('MinAdjustmentMagnitude')
def set_MinAdjustmentMagnitude(self, MinAdjustmentMagnitude): # Integer
self.add_query_param('MinAdjustmentMagnitude', MinAdjustmentMagnitude)
def get_TargetValue(self): # Float
return self.get_query_params().get('TargetValue')
def set_TargetValue(self, TargetValue): # Float
self.add_query_param('TargetValue', TargetValue) | null |
151 | """ handle reading a csv from an external service, defaults are from Goodreads """
import csv
from datetime import timedelta
from typing import Iterable, Optional
from django.utils import timezone
from bookwyrm.models import ImportJob, ImportItem, SiteSettings, User
class Importer:
"""Generic class for csv data import from an outside service"""
service = "Import"
delimiter = ","
encoding = "UTF-8"
# these are from Goodreads
row_mappings_guesses = [
("id", ["id", "book id"]),
("title", ["title"]),
("authors", ["author", "authors", "primary author"]),
("isbn_10", ["isbn10", "isbn", "isbn/uid"]),
("isbn_13", ["isbn13", "isbn", "isbns", "isbn/uid"]),
("shelf", ["shelf", "exclusive shelf", "read status", "bookshelf"]),
("review_name", ["review name"]),
("review_body", ["my review", "review"]),
("rating", ["my rating", "rating", "star rating"]),
("date_added", ["date added", "entry date", "added"]),
("date_started", ["date started", "started"]),
("date_finished", ["date finished", "last date read", "date read", "finished"]),
]
date_fields = ["date_added", "date_started", "date_finished"]
shelf_mapping_guesses = {
"to-read": ["to-read", "want to read"],
"read": ["read", "already read"],
"reading": ["currently-reading", "reading", "currently reading"],
}
# pylint: disable=too-many-locals
def create_job(
self, user: User, csv_file: Iterable[str], include_reviews: bool, privacy: str
) -> ImportJob:
"""check over a csv and creates a database entry for the job"""
csv_reader = csv.DictReader(csv_file, delimiter=self.delimiter)
rows = list(csv_reader)
if len(rows) < 1:
raise ValueError("CSV file is empty")
mappings = (
self.create_row_mappings(list(fieldnames))
if (fieldnames := csv_reader.fieldnames)
else {}
)
job = ImportJob.objects.create(
user=user,
include_reviews=include_reviews,
privacy=privacy,
mappings=mappings,
source=self.service,
)
enforce_limit, allowed_imports = self.get_import_limit(user)
if enforce_limit and allowed_imports <= 0:
job.complete_job()
return job
for index, entry in enumerate(rows):
if enforce_limit and index >= allowed_imports:
break
self.create_item(job, index, entry)
return job
def METHOD_NAME(self, job: ImportJob) -> None:
"""patch up a job that was in the old format"""
items = job.items
first_item = items.first()
if first_item is None:
return
headers = list(first_item.data.keys())
job.mappings = self.create_row_mappings(headers)
job.updated_date = timezone.now()
job.save()
for item in items.all():
normalized = self.normalize_row(item.data, job.mappings)
normalized["shelf"] = self.get_shelf(normalized)
item.normalized_data = normalized
item.save()
def create_row_mappings(self, headers: list[str]) -> dict[str, Optional[str]]:
"""guess what the headers mean"""
mappings = {}
for (key, guesses) in self.row_mappings_guesses:
values = [h for h in headers if h.lower() in guesses]
value = values[0] if len(values) else None
if value:
headers.remove(value)
mappings[key] = value
return mappings
def create_item(self, job: ImportJob, index: int, data: dict[str, str]) -> None:
"""creates and saves an import item"""
normalized = self.normalize_row(data, job.mappings)
normalized["shelf"] = self.get_shelf(normalized)
ImportItem(job=job, index=index, data=data, normalized_data=normalized).save()
def get_shelf(self, normalized_row: dict[str, Optional[str]]) -> Optional[str]:
"""determine which shelf to use"""
shelf_name = normalized_row.get("shelf")
if not shelf_name:
return None
shelf_name = shelf_name.lower()
shelf = [
s for (s, gs) in self.shelf_mapping_guesses.items() if shelf_name in gs
]
return shelf[0] if shelf else None
# pylint: disable=no-self-use
def normalize_row(
self, entry: dict[str, str], mappings: dict[str, Optional[str]]
) -> dict[str, Optional[str]]:
"""use the dataclass to create the formatted row of data"""
return {k: entry.get(v) if v else None for k, v in mappings.items()}
# pylint: disable=no-self-use
def get_import_limit(self, user: User) -> tuple[int, int]:
"""check if import limit is set and return how many imports are left"""
site_settings = SiteSettings.objects.get()
import_size_limit = site_settings.import_size_limit
import_limit_reset = site_settings.import_limit_reset
enforce_limit = import_size_limit and import_limit_reset
allowed_imports = 0
if enforce_limit:
time_range = timezone.now() - timedelta(days=import_limit_reset)
import_jobs = ImportJob.objects.filter(
user=user, created_date__gte=time_range
)
# pylint: disable=consider-using-generator
imported_books = sum([job.successful_item_count for job in import_jobs])
allowed_imports = import_size_limit - imported_books
return enforce_limit, allowed_imports
def create_retry_job(
self, user: User, original_job: ImportJob, items: list[ImportItem]
) -> ImportJob:
"""retry items that didn't import"""
job = ImportJob.objects.create(
user=user,
include_reviews=original_job.include_reviews,
privacy=original_job.privacy,
source=original_job.source,
# TODO: allow users to adjust mappings
mappings=original_job.mappings,
retry=True,
)
enforce_limit, allowed_imports = self.get_import_limit(user)
if enforce_limit and allowed_imports <= 0:
job.complete_job()
return job
for index, item in enumerate(items):
if enforce_limit and index >= allowed_imports:
break
# this will re-normalize the raw data
self.create_item(job, item.index, item.data)
return job | null |
152 | import shutil
import warnings
from contextlib import contextmanager
from pathlib import Path
import pytest
from pharmpy.internals.fs.cwd import chdir
from pharmpy.model import Model
from pharmpy.tools import run_amd
from pharmpy.workflows import default_tool_database
def test_invalid_search_space_raises(tmp_path, testdata):
with chdir(tmp_path):
db, model = _load_model(testdata)
with pytest.raises(
ValueError,
match='Invalid `search_space`, could not be parsed:',
):
run_amd(
model,
results=model.modelfit_results,
search_space='XYZ',
path=db.path,
resume=True,
)
def test_skip_most(tmp_path, testdata):
with chdir(tmp_path):
db, model = _load_model(testdata)
with METHOD_NAME() as record:
res = run_amd(
model,
results=model.modelfit_results,
modeltype='basic_pk',
administration='oral',
order=['iovsearch', 'allometry', 'covariates'],
occasion=None,
path=db.path,
resume=True,
)
_validate_record(
record,
[
'IOVsearch will be skipped because occasion is None',
'Allometry will most likely be skipped',
'COVsearch will most likely be skipped',
'Skipping Allometry',
'Skipping COVsearch',
'AMDResults.summary_models is None',
'AMDResults.summary_individuals_count is None',
],
)
assert len(res.summary_tool) == 1
assert res.summary_models is None
assert res.summary_individuals_count is None
assert res.final_model == 'start'
def test_raise_allometry(tmp_path, testdata):
with chdir(tmp_path):
db, model = _load_model(testdata, with_datainfo=True)
with pytest.raises(
ValueError,
match='Invalid `allometric_variable`',
):
run_amd(
model,
results=model.modelfit_results,
modeltype='basic_pk',
administration='oral',
order=['allometry'],
allometric_variable='SJDLKSDJ',
path=db.path,
resume=True,
)
def test_raise_covsearch(tmp_path, testdata):
with chdir(tmp_path):
db, model = _load_model(testdata, with_datainfo=True)
with pytest.raises(
ValueError,
match='Invalid `search_space` because of invalid covariate .* got `SJDLKSDJ`',
):
run_amd(
model,
results=model.modelfit_results,
search_space='LET(CONTINUOUS, [AGE, SJDLKSDJ]); LET(CATEGORICAL, [SEX])',
modeltype='basic_pk',
administration='oral',
order=['covariates'],
path=db.path,
resume=True,
)
def test_skip_covsearch(tmp_path, testdata):
with chdir(tmp_path):
db, model = _load_model(testdata, with_datainfo=True)
with METHOD_NAME() as record:
res = run_amd(
model,
results=model.modelfit_results,
search_space='LET(CONTINUOUS, []); LET(CATEGORICAL, [])',
modeltype='basic_pk',
administration='oral',
order=['covariates'],
path=db.path,
resume=True,
)
_validate_record(
record,
[
'COVsearch will most likely be skipped',
'Skipping COVsearch',
'AMDResults.summary_models is None',
'AMDResults.summary_individuals_count is None',
],
)
assert len(res.summary_tool) == 1
assert res.summary_models is None
assert res.summary_individuals_count is None
assert res.final_model == 'start'
def test_skip_iovsearch_one_occasion(tmp_path, testdata):
with chdir(tmp_path):
db, model = _load_model(testdata)
with METHOD_NAME() as record:
res = run_amd(
model,
results=model.modelfit_results,
modeltype='basic_pk',
administration='oral',
order=['iovsearch'],
occasion='XAT2',
path=db.path,
resume=True,
)
_validate_record(
record,
[
'Skipping IOVsearch because there are less than two occasion categories',
'AMDResults.summary_models is None',
'AMDResults.summary_individuals_count is None',
],
)
assert len(res.summary_tool) == 1
assert res.summary_models is None
assert res.summary_individuals_count is None
assert res.final_model == 'start'
def test_skip_iovsearch_missing_occasion_raises(tmp_path, testdata):
with chdir(tmp_path):
db, model = _load_model(testdata)
with pytest.raises(
ValueError,
match='Invalid `occasion`',
):
run_amd(
model,
results=model.modelfit_results,
modeltype='basic_pk',
administration='oral',
order=['iovsearch'],
occasion='XYZ',
path=db.path,
resume=True,
)
def _load_model(testdata: Path, with_datainfo: bool = False):
models = testdata / 'nonmem' / 'models'
# NOTE We need to make a local copy and read the model locally to avoid
# reading the .datainfo which contains allometry information we do not want
# to ignore.
shutil.copy2(models / 'mox_simulated_normal.csv', '.')
if with_datainfo:
shutil.copy2(models / 'mox_simulated_normal.datainfo', '.')
shutil.copy2(models / 'mox2.mod', '.')
shutil.copy2(models / 'mox2.ext', '.')
shutil.copy2(models / 'mox2.lst', '.')
shutil.copy2(models / 'mox2.phi', '.')
model = Model.parse_model('mox2.mod')
model = model.replace(name='start')
# NOTE Load results directly in DB to skip fitting
db_tool = default_tool_database(toolname='amd', path='amd_dir1')
db_fit = default_tool_database(toolname='modelfit', path=db_tool.path / 'modelfit')
with db_fit.model_database.transaction(model) as txn:
txn.store_model()
txn.store_modelfit_results()
# NOTE This are needed because currently caching of the results cannot
# read from the JSON file created above.
txn.store_local_file(models / 'mox2.ext', 'start.ext')
txn.store_local_file(models / 'mox2.lst', 'start.lst')
txn.store_local_file(models / 'mox2.phi', 'start.phi')
return db_tool, model
@contextmanager
def METHOD_NAME():
with pytest.warns(Warning) as record:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
module='distributed',
category=UserWarning,
)
warnings.filterwarnings(
"ignore",
module='distributed',
category=ResourceWarning,
)
warnings.filterwarnings(
"ignore",
module='distributed',
category=RuntimeWarning,
)
yield record
def _validate_record(record, expected):
assert len(record) == len(expected)
for warning, match in zip(record, expected):
assert match in str(warning.message) | null |
153 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdts.endpoint import endpoint_data
class DescribeDtsJobsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dts', '2020-01-01', 'DescribeDtsJobs','dts')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_WithoutDbList(self): # Boolean
return self.get_query_params().get('WithoutDbList')
def set_WithoutDbList(self, WithoutDbList): # Boolean
self.add_query_param('WithoutDbList', WithoutDbList)
def get_OrderDirection(self): # String
return self.get_query_params().get('OrderDirection')
def set_OrderDirection(self, OrderDirection): # String
self.add_query_param('OrderDirection', OrderDirection)
def get_DedicatedClusterId(self): # String
return self.get_query_params().get('DedicatedClusterId')
def set_DedicatedClusterId(self, DedicatedClusterId): # String
self.add_query_param('DedicatedClusterId', DedicatedClusterId)
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_OrderColumn(self): # String
return self.get_query_params().get('OrderColumn')
def set_OrderColumn(self, OrderColumn): # String
self.add_query_param('OrderColumn', OrderColumn)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_DtsBisLabel(self): # String
return self.get_query_params().get('DtsBisLabel')
def set_DtsBisLabel(self, DtsBisLabel): # String
self.add_query_param('DtsBisLabel', DtsBisLabel)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_DtsJobId(self): # String
return self.get_query_params().get('DtsJobId')
def set_DtsJobId(self, DtsJobId): # String
self.add_query_param('DtsJobId', DtsJobId)
def get_GroupId(self): # String
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_query_param('GroupId', GroupId)
def get_Params(self): # String
return self.get_query_params().get('Params')
def set_Params(self, Params): # String
self.add_query_param('Params', Params)
def get_OwnerId(self): # String
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # String
self.add_query_param('OwnerId', OwnerId)
def get_JobType(self): # String
return self.get_query_params().get('JobType')
def set_JobType(self, JobType): # String
self.add_query_param('JobType', JobType)
def get_Tags(self): # String
return self.get_query_params().get('Tags')
def METHOD_NAME(self, Tags): # String
self.add_query_param('Tags', Tags)
def get_Region(self): # String
return self.get_query_params().get('Region')
def set_Region(self, Region): # String
self.add_query_param('Region', Region)
def get_DtsInstanceId(self): # String
return self.get_query_params().get('DtsInstanceId')
def set_DtsInstanceId(self, DtsInstanceId): # String
self.add_query_param('DtsInstanceId', DtsInstanceId)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status) | null |
154 | """
DataProvider related decorators.
"""
# I'd like to decorate the factory methods that give data_providers by the name they can be accessed from. e.g.:
# @provides( 'id_seq' ) # where 'id_seq' is some 'data_format' string/alias
# def get_id_seq_provider( dataset, **settings ):
# then in some central dispatch (maybe data.Data), have it look up the proper method by the data_format string
# also it would be good to have this decorator maintain a list of available providers (for a datatype)
# i don't particularly want to cut up method names ( get_([\w_]*)_provider )
# adapted from: http://stackoverflow.com
# /questions/14095616/python-can-i-programmatically-decorate-class-methods-from-a-class-instance
import copy
import logging
from functools import wraps
from urllib.parse import unquote
log = logging.getLogger(__name__)
_DATAPROVIDER_CLASS_MAP_KEY = "dataproviders"
_DATAPROVIDER_METHOD_NAME_KEY = "_dataprovider_name"
def METHOD_NAME(cls):
"""
Wraps a class (generally a Datatype), finds methods within that have been
decorated with `@dataprovider` and adds them, by their name, to a map
in the class.
This allows a class to maintain a name -> method map, effectively
'registering' dataprovider factory methods::
@has_dataproviders
class MyDtype( data.Data ):
@dataprovider_factory( 'bler' )
def provide_some_bler( self, dataset, **settings ):
'''blerblerbler'''
dataset_source = providers.DatasetDataProvider( dataset )
# ... chain other, intermidiate providers here
return providers.BlerDataProvider( dataset_source, **settings )
# use the base method in data.Data
provider = dataset.datatype.dataprovider( dataset, 'bler',
my_setting='blah', ... )
# OR directly from the map
provider = dataset.datatype.dataproviders[ 'bler' ]( dataset,
my_setting='blah', ... )
"""
# init the class dataproviders map if necc.
if not hasattr(cls, _DATAPROVIDER_CLASS_MAP_KEY):
setattr(cls, _DATAPROVIDER_CLASS_MAP_KEY, {})
else:
# need to deepcopy or subclasses will modify super.dataproviders as well
existing_dataproviders = getattr(cls, _DATAPROVIDER_CLASS_MAP_KEY)
copied_dataproviders = copy.deepcopy(existing_dataproviders)
setattr(cls, _DATAPROVIDER_CLASS_MAP_KEY, copied_dataproviders)
dataproviders = getattr(cls, _DATAPROVIDER_CLASS_MAP_KEY)
# scan for methods with dataprovider names and add them to the map
# note: this has a 'cascading' effect
# where it's possible to override a super's provider with a sub's
for attr_key, attr_value in cls.__dict__.items():
# can't use isinstance( attr_value, MethodType ) bc of wrapping
if (
(callable(attr_value))
and (not attr_key.startswith("__"))
and (getattr(attr_value, _DATAPROVIDER_METHOD_NAME_KEY, None))
):
name = getattr(attr_value, _DATAPROVIDER_METHOD_NAME_KEY)
dataproviders[name] = attr_value
return cls
def dataprovider_factory(name, settings=None):
"""
Wraps a class method and marks it as a dataprovider factory and creates a
function to parse query strings to __init__ arguments as the
`parse_query_string_settings` attribute of the factory function.
An example use of the `parse_query_string_settings`::
kwargs = dataset.datatype.dataproviders[ provider ].parse_query_string_settings( query_kwargs )
return list( dataset.datatype.dataprovider( dataset, provider, **kwargs ) )
:param name: what name/key to register the factory under in `cls.dataproviders`
:type name: any hashable var
:param settings: dictionary containing key/type pairs for parsing query strings
to __init__ arguments
:type settings: dictionary
"""
# TODO:?? use *args for settings allowing mulitple dictionaries
# make a function available through the name->provider dispatch to parse query strings
# callable like:
# settings_dict = dataproviders[ provider_name ].parse_query_string_settings( query_kwargs )
# TODO: ugh - overly complicated but the best I could think of
def parse_query_string_settings(query_kwargs):
return _parse_query_string_settings(query_kwargs, settings)
def named_dataprovider_factory(func):
setattr(func, _DATAPROVIDER_METHOD_NAME_KEY, name)
func.parse_query_string_settings = parse_query_string_settings
func.settings = settings
# TODO: I want a way to inherit settings from the previous provider( this_name ) instead of defining over and over
@wraps(func)
def wrapped_dataprovider_factory(self, *args, **kwargs):
return func(self, *args, **kwargs)
return wrapped_dataprovider_factory
return named_dataprovider_factory
def _parse_query_string_settings(query_kwargs, settings=None):
"""
Parse the values in `query_kwargs` from strings to the proper types
listed in the same key in `settings`.
"""
# TODO: this was a relatively late addition: review and re-think
def list_from_query_string(s):
# assume csv
return s.split(",")
parsers = {
"int": int,
"float": float,
"bool": bool,
"list:str": lambda s: list_from_query_string(s),
"list:escaped": lambda s: [unquote(e) for e in list_from_query_string(s)],
"list:int": lambda s: [int(i) for i in list_from_query_string(s)],
}
settings = settings or {}
# yay! yet another set of query string parsers! <-- sarcasm
# work through the keys in settings finding matching keys in query_kwargs
# if found in both, get the expected/needed type from settings and store the new parsed value
# if we can't parse it (no parser, bad value), delete the key from query_kwargs so the provider will use the defaults
for key in settings:
if key in query_kwargs:
# TODO: this would be the place to sanitize any strings
query_value = query_kwargs[key]
needed_type = settings[key]
if needed_type != "str":
try:
query_kwargs[key] = parsers[needed_type](query_value)
except (KeyError, ValueError):
del query_kwargs[key]
# TODO:?? do we want to remove query_kwarg entries NOT in settings?
return query_kwargs | null |
155 | import IMP
import IMP.test
import IMP.algebra
from io import BytesIO
import pickle
class Tests(IMP.test.TestCase):
def test_trivial_constructor(self):
"""Test trivial SpherePatch3D constructor"""
p = IMP.algebra.SpherePatch3D()
def METHOD_NAME(self):
"""Check that a patch of a sphere is constructed correctly"""
center = IMP.algebra.Vector3D(0.0, 0.0, 0.0)
radius = 5.0
sph = IMP.algebra.Sphere3D(center, radius)
xy_plane = IMP.algebra.Plane3D(
IMP.algebra.Vector3D(0., 0., 0.), IMP.algebra.Vector3D(0., 0., 1.))
patch = IMP.algebra.SpherePatch3D(sph, xy_plane)
# Not implemented
self.assertRaises(Exception, IMP.algebra.get_area, patch)
g = IMP.algebra.get_sphere_patch_3d_geometry(patch)
self.assertLess(IMP.algebra.get_distance(
patch.get_plane().get_normal(), g.get_plane().get_normal()), 1e-4)
self.assertLess(IMP.algebra.get_distance(
patch.get_sphere().get_center(),
g.get_sphere().get_center()), 1e-4)
bb = IMP.algebra.get_bounding_box(patch)
self.assertLess(IMP.algebra.get_distance(
bb.get_corner(0), IMP.algebra.Vector3D(-5, -5, -5)), 1e-4)
self.assertLess(IMP.algebra.get_distance(
bb.get_corner(1), IMP.algebra.Vector3D(5, 5, 5)), 1e-4)
p = patch.get_boundary_point()
self.assertLess(IMP.algebra.get_distance(
p, IMP.algebra.Vector3D(3.53553, 3.53553, 0)), 1e-3)
sio = BytesIO()
patch.show(sio)
self.assertEqual(sio.getvalue(), b'(0 0 0: 5)(0: 0 0 1)')
self.assertTrue(patch.get_plane().get_is_above(
IMP.algebra.Vector3D(1., 0., 1.)))
self.assertTrue(patch.get_contains(IMP.algebra.Vector3D(0.0, 1.0, 0.4)))
self.assertFalse(patch.get_contains(
IMP.algebra.Vector3D(0.0, 1.0, -0.4)))
def test_get_uniform_surface_cover(self):
"""Check uniform cover on a patch of a sphere"""
center = IMP.algebra.Vector3D(0.0, 0.0, 0.0)
radius = 5.0
sph = IMP.algebra.Sphere3D(center, radius)
xz_plane = IMP.algebra.Plane3D(
IMP.algebra.Vector3D(0., 0., 0.), IMP.algebra.Vector3D(0., 1., 0.))
patch = IMP.algebra.SpherePatch3D(sph, xz_plane)
numpts = 600
points = IMP.algebra.get_uniform_surface_cover(patch, numpts)
# check that the centroid of the sampled points make sense
sampled_centroid = IMP.algebra.Vector3D(0.0, radius / 2, 0.0)
expected_sampled_centroid = IMP.algebra.Vector3D(0.0, radius / 2, 0.0)
self.assertEqual(len(points), numpts)
for p in points:
sampled_centroid = sampled_centroid + p
sampled_centroid = sampled_centroid * (1.0 / len(points))
sampled_centroid.show()
self.assertAlmostEqual(
(sampled_centroid - expected_sampled_centroid).get_magnitude(), 0,
delta=4 * radius / numpts ** .5)
def _assert_equal(self, a, b):
spherea = a.get_sphere()
sphereb = b.get_sphere()
self.assertLess(IMP.algebra.get_distance(
spherea.get_center(), sphereb.get_center()), 1e-4)
self.assertAlmostEqual(spherea.get_radius(), sphereb.get_radius(),
delta=1e-4)
planea = a.get_plane()
planeb = b.get_plane()
self.assertLess(IMP.algebra.get_distance(
planea.get_normal(), planeb.get_normal()), 1e-4)
self.assertLess(IMP.algebra.get_distance(
planea.get_point_on_plane(), planeb.get_point_on_plane()), 1e-4)
def test_pickle(self):
"""Test (un-)pickle of SpherePatch3D"""
sph = IMP.algebra.Sphere3D(IMP.algebra.Vector3D(1.0, 2.0, 3.0), 5.0)
plane = IMP.algebra.Plane3D(
IMP.algebra.Vector3D(0., 0., 0.), IMP.algebra.Vector3D(0., 1., 0.))
p1 = IMP.algebra.SpherePatch3D(sph, plane)
sph = IMP.algebra.Sphere3D(IMP.algebra.Vector3D(4.0, 5.0, 6.0), 5.0)
plane = IMP.algebra.Plane3D(
IMP.algebra.Vector3D(0., 0., 0.), IMP.algebra.Vector3D(1., 0., 0.))
p2 = IMP.algebra.SpherePatch3D(sph, plane)
p2.foo = 'bar'
dump = pickle.dumps((p1, p2))
newp1, newp2 = pickle.loads(dump)
self._assert_equal(p1, newp1)
self._assert_equal(p2, newp2)
self.assertEqual(newp2.foo, 'bar')
self.assertRaises(TypeError, p1._set_from_binary, 42)
if __name__ == '__main__':
IMP.test.main() | null |
156 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateNetworkInterfaceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateNetworkInterface','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_QueueNumber(self): # Integer
return self.get_query_params().get('QueueNumber')
def set_QueueNumber(self, QueueNumber): # Integer
self.add_query_param('QueueNumber', QueueNumber)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Ipv4Prefixs(self): # RepeatList
return self.get_query_params().get('Ipv4Prefix')
def set_Ipv4Prefixs(self, Ipv4Prefix): # RepeatList
for depth1 in range(len(Ipv4Prefix)):
self.add_query_param('Ipv4Prefix.' + str(depth1 + 1), Ipv4Prefix[depth1])
def get_SecondaryPrivateIpAddressCount(self): # Integer
return self.get_query_params().get('SecondaryPrivateIpAddressCount')
def set_SecondaryPrivateIpAddressCount(self, SecondaryPrivateIpAddressCount): # Integer
self.add_query_param('SecondaryPrivateIpAddressCount', SecondaryPrivateIpAddressCount)
def get_BusinessType(self): # String
return self.get_query_params().get('BusinessType')
def set_BusinessType(self, BusinessType): # String
self.add_query_param('BusinessType', BusinessType)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def METHOD_NAME(self): # String
return self.get_query_params().get('NetworkInterfaceName')
def set_NetworkInterfaceName(self, NetworkInterfaceName): # String
self.add_query_param('NetworkInterfaceName', NetworkInterfaceName)
def get_Visible(self): # Boolean
return self.get_query_params().get('Visible')
def set_Visible(self, Visible): # Boolean
self.add_query_param('Visible', Visible)
def get_Ipv6AddressCount(self): # Integer
return self.get_query_params().get('Ipv6AddressCount')
def set_Ipv6AddressCount(self, Ipv6AddressCount): # Integer
self.add_query_param('Ipv6AddressCount', Ipv6AddressCount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_PrivateIpAddresss(self): # RepeatList
return self.get_query_params().get('PrivateIpAddress')
def set_PrivateIpAddresss(self, PrivateIpAddress): # RepeatList
for depth1 in range(len(PrivateIpAddress)):
self.add_query_param('PrivateIpAddress.' + str(depth1 + 1), PrivateIpAddress[depth1])
def get_Ipv6Addresss(self): # RepeatList
return self.get_query_params().get('Ipv6Address')
def set_Ipv6Addresss(self, Ipv6Address): # RepeatList
for depth1 in range(len(Ipv6Address)):
self.add_query_param('Ipv6Address.' + str(depth1 + 1), Ipv6Address[depth1])
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Ipv6Prefixs(self): # RepeatList
return self.get_query_params().get('Ipv6Prefix')
def set_Ipv6Prefixs(self, Ipv6Prefix): # RepeatList
for depth1 in range(len(Ipv6Prefix)):
self.add_query_param('Ipv6Prefix.' + str(depth1 + 1), Ipv6Prefix[depth1])
def get_SecurityGroupId(self): # String
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self, SecurityGroupId): # String
self.add_query_param('SecurityGroupId', SecurityGroupId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_Ipv6PrefixCount(self): # Integer
return self.get_query_params().get('Ipv6PrefixCount')
def set_Ipv6PrefixCount(self, Ipv6PrefixCount): # Integer
self.add_query_param('Ipv6PrefixCount', Ipv6PrefixCount)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_QueuePairNumber(self): # Integer
return self.get_query_params().get('QueuePairNumber')
def set_QueuePairNumber(self, QueuePairNumber): # Integer
self.add_query_param('QueuePairNumber', QueuePairNumber)
def get_SecurityGroupIdss(self): # RepeatList
return self.get_query_params().get('SecurityGroupIds')
def set_SecurityGroupIdss(self, SecurityGroupIds): # RepeatList
for depth1 in range(len(SecurityGroupIds)):
self.add_query_param('SecurityGroupIds.' + str(depth1 + 1), SecurityGroupIds[depth1])
def get_NetworkInterfaceTrafficMode(self): # String
return self.get_query_params().get('NetworkInterfaceTrafficMode')
def set_NetworkInterfaceTrafficMode(self, NetworkInterfaceTrafficMode): # String
self.add_query_param('NetworkInterfaceTrafficMode', NetworkInterfaceTrafficMode)
def get_Ipv4PrefixCount(self): # Integer
return self.get_query_params().get('Ipv4PrefixCount')
def set_Ipv4PrefixCount(self, Ipv4PrefixCount): # Integer
self.add_query_param('Ipv4PrefixCount', Ipv4PrefixCount)
def get_PrimaryIpAddress(self): # String
return self.get_query_params().get('PrimaryIpAddress')
def set_PrimaryIpAddress(self, PrimaryIpAddress): # String
self.add_query_param('PrimaryIpAddress', PrimaryIpAddress) | null |
157 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class SaveSingleTaskForCreatingOrderActivateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'SaveSingleTaskForCreatingOrderActivate')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Country(self): # String
return self.get_query_params().get('Country')
def set_Country(self, Country): # String
self.add_query_param('Country', Country)
def get_SubscriptionDuration(self): # Integer
return self.get_query_params().get('SubscriptionDuration')
def set_SubscriptionDuration(self, SubscriptionDuration): # Integer
self.add_query_param('SubscriptionDuration', SubscriptionDuration)
def get_PermitPremiumActivation(self): # Boolean
return self.get_query_params().get('PermitPremiumActivation')
def set_PermitPremiumActivation(self, PermitPremiumActivation): # Boolean
self.add_query_param('PermitPremiumActivation', PermitPremiumActivation)
def get_City(self): # String
return self.get_query_params().get('City')
def set_City(self, City): # String
self.add_query_param('City', City)
def get_Dns2(self): # String
return self.get_query_params().get('Dns2')
def set_Dns2(self, Dns2): # String
self.add_query_param('Dns2', Dns2)
def get_Dns1(self): # String
return self.get_query_params().get('Dns1')
def set_Dns1(self, Dns1): # String
self.add_query_param('Dns1', Dns1)
def get_RegistrantProfileId(self): # Long
return self.get_query_params().get('RegistrantProfileId')
def set_RegistrantProfileId(self, RegistrantProfileId): # Long
self.add_query_param('RegistrantProfileId', RegistrantProfileId)
def get_CouponNo(self): # String
return self.get_query_params().get('CouponNo')
def set_CouponNo(self, CouponNo): # String
self.add_query_param('CouponNo', CouponNo)
def get_AliyunDns(self): # Boolean
return self.get_query_params().get('AliyunDns')
def set_AliyunDns(self, AliyunDns): # Boolean
self.add_query_param('AliyunDns', AliyunDns)
def get_ZhCity(self): # String
return self.get_query_params().get('ZhCity')
def set_ZhCity(self, ZhCity): # String
self.add_query_param('ZhCity', ZhCity)
def get_TelExt(self): # String
return self.get_query_params().get('TelExt')
def set_TelExt(self, TelExt): # String
self.add_query_param('TelExt', TelExt)
def get_ZhRegistrantName(self): # String
return self.get_query_params().get('ZhRegistrantName')
def set_ZhRegistrantName(self, ZhRegistrantName): # String
self.add_query_param('ZhRegistrantName', ZhRegistrantName)
def get_Province(self): # String
return self.get_query_params().get('Province')
def set_Province(self, Province): # String
self.add_query_param('Province', Province)
def get_PostalCode(self): # String
return self.get_query_params().get('PostalCode')
def set_PostalCode(self, PostalCode): # String
self.add_query_param('PostalCode', PostalCode)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_Email(self): # String
return self.get_query_params().get('Email')
def set_Email(self, Email): # String
self.add_query_param('Email', Email)
def get_ZhRegistrantOrganization(self): # String
return self.get_query_params().get('ZhRegistrantOrganization')
def set_ZhRegistrantOrganization(self, ZhRegistrantOrganization): # String
self.add_query_param('ZhRegistrantOrganization', ZhRegistrantOrganization)
def get_Address(self): # String
return self.get_query_params().get('Address')
def set_Address(self, Address): # String
self.add_query_param('Address', Address)
def get_TelArea(self): # String
return self.get_query_params().get('TelArea')
def set_TelArea(self, TelArea): # String
self.add_query_param('TelArea', TelArea)
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_ZhAddress(self): # String
return self.get_query_params().get('ZhAddress')
def set_ZhAddress(self, ZhAddress): # String
self.add_query_param('ZhAddress', ZhAddress)
def get_RegistrantType(self): # String
return self.get_query_params().get('RegistrantType')
def set_RegistrantType(self, RegistrantType): # String
self.add_query_param('RegistrantType', RegistrantType)
def get_Telephone(self): # String
return self.get_query_params().get('Telephone')
def set_Telephone(self, Telephone): # String
self.add_query_param('Telephone', Telephone)
def get_TrademarkDomainActivation(self): # Boolean
return self.get_query_params().get('TrademarkDomainActivation')
def set_TrademarkDomainActivation(self, TrademarkDomainActivation): # Boolean
self.add_query_param('TrademarkDomainActivation', TrademarkDomainActivation)
def get_UseCoupon(self): # Boolean
return self.get_query_params().get('UseCoupon')
def set_UseCoupon(self, UseCoupon): # Boolean
self.add_query_param('UseCoupon', UseCoupon)
def get_ZhProvince(self): # String
return self.get_query_params().get('ZhProvince')
def set_ZhProvince(self, ZhProvince): # String
self.add_query_param('ZhProvince', ZhProvince)
def get_RegistrantOrganization(self): # String
return self.get_query_params().get('RegistrantOrganization')
def set_RegistrantOrganization(self, RegistrantOrganization): # String
self.add_query_param('RegistrantOrganization', RegistrantOrganization)
def get_PromotionNo(self): # String
return self.get_query_params().get('PromotionNo')
def set_PromotionNo(self, PromotionNo): # String
self.add_query_param('PromotionNo', PromotionNo)
def get_EnableDomainProxy(self): # Boolean
return self.get_query_params().get('EnableDomainProxy')
def set_EnableDomainProxy(self, EnableDomainProxy): # Boolean
self.add_query_param('EnableDomainProxy', EnableDomainProxy)
def METHOD_NAME(self): # String
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self, UserClientIp): # String
self.add_query_param('UserClientIp', UserClientIp)
def get_RegistrantName(self): # String
return self.get_query_params().get('RegistrantName')
def set_RegistrantName(self, RegistrantName): # String
self.add_query_param('RegistrantName', RegistrantName)
def get_UsePromotion(self): # Boolean
return self.get_query_params().get('UsePromotion')
def set_UsePromotion(self, UsePromotion): # Boolean
self.add_query_param('UsePromotion', UsePromotion) | null |
158 | import logging
import re
from galaxy.datatypes.data import (
get_file_peek,
Text,
)
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.protocols import DatasetProtocol
from galaxy.datatypes.sniff import (
build_sniff_from_prefix,
FilePrefix,
get_headers,
)
from galaxy.datatypes.tabular import Tabular
from galaxy.util import nice_size
log = logging.getLogger(__name__)
@build_sniff_from_prefix
class Smat(Text):
file_ext = "smat"
def display_peek(self, dataset: DatasetProtocol) -> str:
try:
return dataset.peek
except Exception:
return f"ESTScan scores matrices ({nice_size(dataset.get_size())})"
def set_peek(self, dataset: DatasetProtocol, **kwd) -> None:
if not dataset.dataset.purged:
dataset.peek = get_file_peek(dataset.file_name)
dataset.blurb = "ESTScan scores matrices"
else:
dataset.peek = "file does not exist"
dataset.blurb = "file purged from disc"
def METHOD_NAME(self, file_prefix: FilePrefix) -> bool:
"""
The use of ESTScan implies the creation of scores matrices which
reflect the codons preferences in the studied organisms. The
ESTScan package includes scripts for generating these files. The
output of these scripts consists of the matrices, one for each
isochor, and which look like this:
FORMAT: hse_4is.conf CODING REGION 6 3 1 s C+G: 0 44
-1 0 2 -2
2 1 -8 0
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test_space.txt')
>>> Smat().sniff(fname)
False
>>> fname = get_test_fname('test_tab.bed')
>>> Smat().sniff(fname)
False
>>> fname = get_test_fname('1.smat')
>>> Smat().sniff(fname)
True
"""
line_no = 0
fh = file_prefix.string_io()
for line in fh:
line_no += 1
if line_no > 10000:
return True
if line_no == 1 and not line.startswith("FORMAT"):
# The first line is always the start of a format section.
return False
if not line.startswith("FORMAT"):
if line.find("\t") >= 0:
# Smat files are not tabular.
return False
items = line.split()
if len(items) != 4:
return False
for item in items:
# Make sure each item is an integer.
if re.match(r"[-+]?\d+$", item) is None:
return False
# Ensure at least a few matching lines are found.
return line_no > 2
class PlantTribesKsComponents(Tabular):
file_ext = "ptkscmp"
MetadataElement(
name="number_comp",
default=0,
desc="Number of significant components in the Ks distribution",
readonly=True,
visible=True,
no_value=0,
)
def display_peek(self, dataset: DatasetProtocol) -> str:
try:
return dataset.peek
except Exception:
return f"Significant components in the Ks distribution ({nice_size(dataset.get_size())})"
def set_meta(self, dataset: DatasetProtocol, overwrite: bool = True, **kwd) -> None:
"""
Set the number of significant components in the Ks distribution.
The dataset will always be on the order of less than 10 lines.
"""
super().set_meta(dataset, overwrite=overwrite, **kwd)
significant_components = []
with open(dataset.file_name) as fh:
for i, line in enumerate(fh):
if i == 0:
# Skip the first line.
continue
line = line.strip()
items = line.split()
try:
# Could be \t.
significant_components.append(int(items[2]))
except Exception:
continue
if len(significant_components) > 0:
dataset.metadata.number_comp = max(significant_components)
def set_peek(self, dataset: DatasetProtocol, **kwd) -> None:
if not dataset.dataset.purged:
dataset.peek = get_file_peek(dataset.file_name)
if dataset.metadata.number_comp == 1:
dataset.blurb = "1 significant component"
else:
dataset.blurb = f"{dataset.metadata.number_comp} significant components"
else:
dataset.peek = "file does not exist"
dataset.blurb = "file purged from disk"
def sniff(self, filename: str) -> bool:
"""
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('test_tab.bed')
>>> PlantTribesKsComponents().sniff(fname)
False
>>> fname = get_test_fname('1.ptkscmp')
>>> PlantTribesKsComponents().sniff(fname)
True
"""
try:
line_item_str = get_headers(filename, "\\t", 1)[0][0]
return line_item_str == "species\tn\tnumber_comp\tlnL\tAIC\tBIC\tmean\tvariance\tporportion"
except Exception:
return False | null |
159 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdyplsapi.endpoint import endpoint_data
class BindAxbRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dyplsapi', '2017-05-25', 'BindAxb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_CallDisplayType(self): # Integer
return self.get_query_params().get('CallDisplayType')
def set_CallDisplayType(self, CallDisplayType): # Integer
self.add_query_param('CallDisplayType', CallDisplayType)
def get_CallTimeout(self): # Integer
return self.get_query_params().get('CallTimeout')
def set_CallTimeout(self, CallTimeout): # Integer
self.add_query_param('CallTimeout', CallTimeout)
def get_PhoneNoX(self): # String
return self.get_query_params().get('PhoneNoX')
def set_PhoneNoX(self, PhoneNoX): # String
self.add_query_param('PhoneNoX', PhoneNoX)
def get_RingConfig(self): # String
return self.get_query_params().get('RingConfig')
def set_RingConfig(self, RingConfig): # String
self.add_query_param('RingConfig', RingConfig)
def get_ASRStatus(self): # Boolean
return self.get_query_params().get('ASRStatus')
def set_ASRStatus(self, ASRStatus): # Boolean
self.add_query_param('ASRStatus', ASRStatus)
def get_PhoneNoB(self): # String
return self.get_query_params().get('PhoneNoB')
def set_PhoneNoB(self, PhoneNoB): # String
self.add_query_param('PhoneNoB', PhoneNoB)
def get_PhoneNoA(self): # String
return self.get_query_params().get('PhoneNoA')
def set_PhoneNoA(self, PhoneNoA): # String
self.add_query_param('PhoneNoA', PhoneNoA)
def get_ExpectCity(self): # String
return self.get_query_params().get('ExpectCity')
def set_ExpectCity(self, ExpectCity): # String
self.add_query_param('ExpectCity', ExpectCity)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_DtmfConfig(self): # String
return self.get_query_params().get('DtmfConfig')
def set_DtmfConfig(self, DtmfConfig): # String
self.add_query_param('DtmfConfig', DtmfConfig)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def METHOD_NAME(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_OutOrderId(self): # String
return self.get_query_params().get('OutOrderId')
def set_OutOrderId(self, OutOrderId): # String
self.add_query_param('OutOrderId', OutOrderId)
def get_PoolKey(self): # String
return self.get_query_params().get('PoolKey')
def set_PoolKey(self, PoolKey): # String
self.add_query_param('PoolKey', PoolKey)
def get_Expiration(self): # String
return self.get_query_params().get('Expiration')
def set_Expiration(self, Expiration): # String
self.add_query_param('Expiration', Expiration)
def get_IsRecordingEnabled(self): # Boolean
return self.get_query_params().get('IsRecordingEnabled')
def set_IsRecordingEnabled(self, IsRecordingEnabled): # Boolean
self.add_query_param('IsRecordingEnabled', IsRecordingEnabled)
def get_OutId(self): # String
return self.get_query_params().get('OutId')
def set_OutId(self, OutId): # String
self.add_query_param('OutId', OutId)
def get_ASRModelId(self): # String
return self.get_query_params().get('ASRModelId')
def set_ASRModelId(self, ASRModelId): # String
self.add_query_param('ASRModelId', ASRModelId)
def get_CallRestrict(self): # String
return self.get_query_params().get('CallRestrict')
def set_CallRestrict(self, CallRestrict): # String
self.add_query_param('CallRestrict', CallRestrict) | null |
160 | #!/usr/bin/python3 -u
# Copyright 2021 Memgraph Ltd.
#
# Use of this software is governed by the Business Source License
# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
# License, and you may not use this file except in compliance with the Business Source License.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0, included in the file
# licenses/APL.txt.
import argparse
import atexit
import os
import subprocess
import sys
import tempfile
import time
from typing import List
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
PROJECT_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "..", ".."))
UNAUTHORIZED_ERROR = r"^You are not authorized to execute this query.*?Please contact your database administrator\."
def wait_for_server(port, delay=0.1):
cmd = ["nc", "-z", "-w", "1", "127.0.0.1", str(port)]
while subprocess.call(cmd) != 0:
time.sleep(0.01)
time.sleep(delay)
def execute_tester(
binary, queries, should_fail=False, failure_message="", username="", password="", check_failure=True
):
args = [binary, "--username", username, "--password", password]
if should_fail:
args.append("--should-fail")
if failure_message:
args.extend(["--failure-message", failure_message])
if check_failure:
args.append("--check-failure")
args.extend(queries)
subprocess.run(args).check_returncode()
def execute_filtering(
binary: str, queries: List[str], expected: int, username: str = "", password: str = "", db: str = "memgraph"
) -> None:
args = [binary, "--username", username, "--password", password, "--use-db", db]
args.extend(queries)
args.append(str(expected))
subprocess.run(args).check_returncode()
def execute_test(memgraph_binary: str, tester_binary: str, filtering_binary: str) -> None:
storage_directory = tempfile.TemporaryDirectory()
memgraph_args = [memgraph_binary, "--data-directory", storage_directory.name]
def execute_admin_queries(queries):
return execute_tester(
tester_binary, queries, should_fail=False, check_failure=True, username="admin", password="admin"
)
def execute_user_queries(queries, should_fail=False, failure_message="", check_failure=True):
return execute_tester(tester_binary, queries, should_fail, failure_message, "user", "user", check_failure)
# Start the memgraph binary
memgraph = subprocess.Popen(list(map(str, memgraph_args)))
time.sleep(0.1)
assert memgraph.poll() is None, "Memgraph process died prematurely!"
wait_for_server(7687)
# Register cleanup function
@atexit.register
def cleanup():
if memgraph.poll() is None:
memgraph.terminate()
assert memgraph.wait() == 0, "Memgraph process didn't exit cleanly!"
# Prepare all users
def setup_user():
execute_admin_queries(
[
"CREATE USER admin IDENTIFIED BY 'admin'",
"GRANT ALL PRIVILEGES TO admin",
"CREATE USER user IDENTIFIED BY 'user'",
"GRANT ALL PRIVILEGES TO user",
"GRANT LABELS :label1, :label2, :label3 TO user",
"GRANT EDGE_TYPES :edgeType1, :edgeType2 TO user",
]
)
def METHOD_NAME():
execute_admin_queries(
[
"MERGE (l1:label1 {name: 'test1'})",
"MERGE (l2:label2 {name: 'test2'})",
"MATCH (l1:label1),(l2:label2) WHERE l1.name = 'test1' AND l2.name = 'test2' CREATE (l1)-[r:edgeType1]->(l2)",
"MERGE (l3:label3 {name: 'test3'})",
"MATCH (l1:label1),(l3:label3) WHERE l1.name = 'test1' AND l3.name = 'test3' CREATE (l1)-[r:edgeType2]->(l3)",
"MERGE (mix:label3:label1 {name: 'test4'})",
"MATCH (l1:label1),(mix:label3) WHERE l1.name = 'test1' AND mix.name = 'test4' CREATE (l1)-[r:edgeType2]->(mix)",
]
)
METHOD_NAME() # default db setup
execute_admin_queries(["CREATE DATABASE db1", "USE DATABASE db1"])
METHOD_NAME() # db1 setup
print("\033[1;36m~~ Starting edge filtering test ~~\033[0m")
for db in ["memgraph", "db1"]:
setup_user()
# Run the test with all combinations of permissions
execute_filtering(filtering_binary, ["MATCH (n)-[r]->(m) RETURN n,r,m"], 3, "user", "user", db)
execute_admin_queries(["DENY EDGE_TYPES :edgeType1 TO user"])
execute_filtering(filtering_binary, ["MATCH (n)-[r]->(m) RETURN n,r,m"], 2, "user", "user", db)
execute_admin_queries(["GRANT EDGE_TYPES :edgeType1 TO user", "DENY LABELS :label3 TO user"])
execute_filtering(filtering_binary, ["MATCH (n)-[r]->(m) RETURN n,r,m"], 1, "user", "user", db)
execute_admin_queries(["DENY LABELS :label1 TO user"])
execute_filtering(filtering_binary, ["MATCH (n)-[r]->(m) RETURN n,r,m"], 0, "user", "user", db)
execute_admin_queries(["REVOKE LABELS * FROM user", "REVOKE EDGE_TYPES * FROM user"])
execute_filtering(filtering_binary, ["MATCH (n)-[r]->(m) RETURN n,r,m"], 0, "user", "user", db)
print("\033[1;36m~~ Finished edge filtering test ~~\033[0m\n")
# Shutdown the memgraph binary
memgraph.terminate()
assert memgraph.wait() == 0, "Memgraph process didn't exit cleanly!"
if __name__ == "__main__":
memgraph_binary = os.path.join(PROJECT_DIR, "build", "memgraph")
tester_binary = os.path.join(PROJECT_DIR, "build", "tests", "integration", "lba", "tester")
filtering_binary = os.path.join(PROJECT_DIR, "build", "tests", "integration", "lba", "filtering")
parser = argparse.ArgumentParser()
parser.add_argument("--memgraph", default=memgraph_binary)
parser.add_argument("--tester", default=tester_binary)
parser.add_argument("--filtering", default=filtering_binary)
args = parser.parse_args()
execute_test(args.memgraph, args.tester, args.filtering)
sys.exit(0) | null |
161 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeCapacityReservationsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeCapacityReservations','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Platform(self): # String
return self.get_query_params().get('Platform')
def set_Platform(self, Platform): # String
self.add_query_param('Platform', Platform)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_InstanceChargeType(self): # String
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self, InstanceChargeType): # String
self.add_query_param('InstanceChargeType', InstanceChargeType)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_PrivatePoolOptionsIds(self): # String
return self.get_query_params().get('PrivatePoolOptions.Ids')
def set_PrivatePoolOptionsIds(self, PrivatePoolOptionsIds): # String
self.add_query_param('PrivatePoolOptions.Ids', PrivatePoolOptionsIds)
def get_MaxResults(self): # Integer
return self.get_query_params().get('MaxResults')
def METHOD_NAME(self, MaxResults): # Integer
self.add_query_param('MaxResults', MaxResults)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status) | null |
162 | from typing import Type
from collections import OrderedDict
from AnyQt.QtWidgets import QLineEdit, QSizePolicy
from Orange.data import Table
from Orange.data.sql.backend import Backend
from Orange.data.sql.backend.base import BackendError
from Orange.widgets import gui, report
from Orange.widgets.credentials import CredentialManager
from Orange.widgets.settings import Setting
from Orange.widgets.utils.signals import Output
from Orange.widgets.widget import OWWidget, Msg
class OWBaseSql(OWWidget, openclass=True):
"""Base widget for connecting to a database.
Override `get_backend` when subclassing to get corresponding backend.
"""
class Outputs:
data = Output("Data", Table)
class Error(OWWidget.Error):
connection = Msg("{}")
want_main_area = False
resizing_enabled = False
host = Setting(None) # type: Optional[str]
port = Setting(None) # type: Optional[str]
database = Setting(None) # type: Optional[str]
schema = Setting(None) # type: Optional[str]
username = ""
password = ""
def __init__(self):
super().__init__()
self.backend = None # type: Optional[Backend]
self.data_desc_table = None # type: Optional[Table]
self.database_desc = None # type: Optional[OrderedDict]
self._setup_gui()
self.connect()
def _setup_gui(self):
self.controlArea.setMinimumWidth(360)
vbox = gui.vBox(self.controlArea, "Server")
self.serverbox = gui.vBox(vbox)
self.servertext = QLineEdit(self.serverbox)
self.servertext.setPlaceholderText("Server")
self.servertext.setToolTip("Server")
self.servertext.editingFinished.connect(self._load_credentials)
if self.host:
self.servertext.setText(self.host if not self.port else
"{}:{}".format(self.host, self.port))
self.serverbox.layout().addWidget(self.servertext)
self.databasetext = QLineEdit(self.serverbox)
self.databasetext.setPlaceholderText("Database[/Schema]")
self.databasetext.setToolTip("Database or optionally Database/Schema")
if self.database:
self.databasetext.setText(
self.database if not self.schema else
"{}/{}".format(self.database, self.schema))
self.serverbox.layout().addWidget(self.databasetext)
self.usernametext = QLineEdit(self.serverbox)
self.usernametext.setPlaceholderText("Username")
self.usernametext.setToolTip("Username")
self.serverbox.layout().addWidget(self.usernametext)
self.passwordtext = QLineEdit(self.serverbox)
self.passwordtext.setPlaceholderText("Password")
self.passwordtext.setToolTip("Password")
self.passwordtext.setEchoMode(QLineEdit.Password)
self.serverbox.layout().addWidget(self.passwordtext)
self._load_credentials()
self.connectbutton = gui.button(self.serverbox, self, "Connect",
callback=self.connect)
self.connectbutton.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
def _load_credentials(self):
self._parse_host_port()
cm = self._credential_manager(self.host, self.port)
self.username = cm.username
self.password = cm.password
if self.username:
self.usernametext.setText(self.username)
if self.password:
self.passwordtext.setText(self.password)
def METHOD_NAME(self):
cm = self._credential_manager(self.host, self.port)
cm.username = self.username or ""
cm.password = self.password or ""
@staticmethod
def _credential_manager(host, port):
return CredentialManager("SQL Table: {}:{}".format(host, port))
def _parse_host_port(self):
hostport = self.servertext.text().split(":")
self.host = hostport[0]
self.port = hostport[1] if len(hostport) == 2 else None
def _check_db_settings(self):
self._parse_host_port()
self.database, _, self.schema = self.databasetext.text().partition("/")
self.username = self.usernametext.text() or None
self.password = self.passwordtext.text() or None
def connect(self):
self.clear()
self._check_db_settings()
if not self.host or not self.database:
return
try:
backend = self.get_backend()
if backend is None:
return
self.backend = backend(dict(
host=self.host,
port=self.port,
database=self.database,
user=self.username,
password=self.password
))
self.on_connection_success()
except BackendError as err:
self.on_connection_error(err)
def get_backend(self) -> Type[Backend]:
"""
Derived widgets should override this to get corresponding backend.
Returns
-------
backend: Type[Backend]
"""
raise NotImplementedError
def on_connection_success(self):
self.METHOD_NAME()
self.database_desc = OrderedDict((
("Host", self.host), ("Port", self.port),
("Database", self.database), ("User name", self.username)
))
def on_connection_error(self, err):
error = str(err).split("\n")[0]
self.Error.connection(error)
def open_table(self):
data = self.get_table()
self.data_desc_table = data
self.Outputs.data.send(data)
def get_table(self) -> Table:
"""
Derived widgets should override this to get corresponding table.
Returns
-------
table: Table
"""
raise NotImplementedError
def clear(self):
self.Error.connection.clear()
self.database_desc = None
self.data_desc_table = None
self.Outputs.data.send(None)
def send_report(self):
if not self.database_desc:
self.report_paragraph("No database connection.")
return
self.report_items("Database", self.database_desc)
if self.data_desc_table:
self.report_items("Data",
report.describe_data(self.data_desc_table)) | null |
163 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class ModifyForwardEntryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'ModifyForwardEntry','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_ForwardTableId(self): # String
return self.get_query_params().get('ForwardTableId')
def set_ForwardTableId(self, ForwardTableId): # String
self.add_query_param('ForwardTableId', ForwardTableId)
def get_InternalIp(self): # String
return self.get_query_params().get('InternalIp')
def set_InternalIp(self, InternalIp): # String
self.add_query_param('InternalIp', InternalIp)
def get_ForwardEntryId(self): # String
return self.get_query_params().get('ForwardEntryId')
def set_ForwardEntryId(self, ForwardEntryId): # String
self.add_query_param('ForwardEntryId', ForwardEntryId)
def get_ExternalIp(self): # String
return self.get_query_params().get('ExternalIp')
def set_ExternalIp(self, ExternalIp): # String
self.add_query_param('ExternalIp', ExternalIp)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_IpProtocol(self): # String
return self.get_query_params().get('IpProtocol')
def set_IpProtocol(self, IpProtocol): # String
self.add_query_param('IpProtocol', IpProtocol)
def get_ForwardEntryName(self): # String
return self.get_query_params().get('ForwardEntryName')
def set_ForwardEntryName(self, ForwardEntryName): # String
self.add_query_param('ForwardEntryName', ForwardEntryName)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def METHOD_NAME(self): # String
return self.get_query_params().get('InternalPort')
def set_InternalPort(self, InternalPort): # String
self.add_query_param('InternalPort', InternalPort)
def get_PortBreak(self): # Boolean
return self.get_query_params().get('PortBreak')
def set_PortBreak(self, PortBreak): # Boolean
self.add_query_param('PortBreak', PortBreak)
def get_ExternalPort(self): # String
return self.get_query_params().get('ExternalPort')
def set_ExternalPort(self, ExternalPort): # String
self.add_query_param('ExternalPort', ExternalPort) | null |
164 | from functools import partial
from urllib.parse import unquote, urlparse
from django.conf import settings
from django.urls import reverse
from creme.creme_core.models import FakeContact, FakeOrganisation, FieldsConfig
from creme.creme_core.tests.views.base import ViewsTestCase
class SearchAndViewTestCase(ViewsTestCase):
SEARCHNVIEW_URL = reverse('creme_core__search_n_view_entities')
def assertDetailview(self, response, entity):
self.assertEqual(200, response.status_code)
self.assertRedirects(response, entity.get_absolute_url())
def test_one_model_one_field(self):
user = self.login_as_root_and_get()
phone = '123456789'
url = self.SEARCHNVIEW_URL
data = {
'models': 'creme_core-fakecontact',
'fields': 'phone',
'value': phone,
}
self.assertGET404(url, data=data)
create_contact = partial(FakeContact.objects.create, user=user)
onizuka = create_contact(first_name='Eikichi', last_name='Onizuka')
create_contact(first_name='Ryuji', last_name='Danma', phone='987654', mobile=phone)
self.assertGET404(url, data=data)
onizuka.phone = phone
onizuka.save()
self.assertPOST405(url, data=data)
self.assertDetailview(self.client.get(url, data=data, follow=True), onizuka)
def METHOD_NAME(self):
user = self.login_as_root_and_get()
phone = '999999999'
url = self.SEARCHNVIEW_URL
data = {
'models': 'creme_core-fakecontact',
'fields': 'phone,mobile',
'value': phone,
}
self.assertGET404(url, data=data)
create_contact = partial(FakeContact.objects.create, user=user)
onizuka = create_contact(first_name='Eikichi', last_name='Onizuka', mobile=phone)
create_contact(first_name='Ryuji', last_name='Danma', phone='987654')
self.assertDetailview(self.client.get(url, data=data, follow=True), onizuka)
def test_two_models_two_fields(self):
user = self.login_as_root_and_get()
phone = '696969'
url = self.SEARCHNVIEW_URL
data = {
'models': 'creme_core-fakecontact,creme_core-fakeorganisation',
'fields': 'phone,mobile',
'value': phone,
}
self.assertGET404(url, data=data)
create_contact = partial(FakeContact.objects.create, user=user)
onizuka = create_contact(first_name='Eikichi', last_name='Onizuka', mobile='55555')
create_contact(first_name='Ryuji', last_name='Danma', phone='987654')
onibaku = FakeOrganisation.objects.create(user=user, name='Onibaku', phone=phone)
self.assertDetailview(self.client.get(url, data=data, follow=True), onibaku)
onizuka.mobile = phone
onizuka.save()
self.assertDetailview(self.client.get(url, data=data, follow=True), onizuka)
def test_errors(self):
user = self.login_as_root_and_get()
url = self.SEARCHNVIEW_URL
base_data = {
'models': 'creme_core-fakecontact,creme_core-fakeorganisation',
'fields': 'mobile,phone',
'value': '696969',
}
create_contact = partial(FakeContact.objects.create, user=user)
create_contact(first_name='Eikichi', last_name='Onizuka', mobile='55555')
create_contact(first_name='Ryuji', last_name='Danma', phone='987654')
FakeOrganisation.objects.create(user=user, name='Onibaku', phone='54631357')
self.assertGET404(url, data={**base_data, 'models': 'foo-bar'})
self.assertGET404(url, data={**base_data, 'models': 'foobar'})
self.assertGET404(url, data={**base_data, 'values': ''})
self.assertGET404(url, data={**base_data, 'models': ''})
self.assertGET404(url, data={**base_data, 'fields': ''})
# Not CremeEntity
self.assertGET404(url, data={**base_data, 'models': 'persons-civility'})
def test_credentials(self):
user = self.login_as_standard()
self._set_all_perms_on_own(user)
phone = '44444'
url = self.SEARCHNVIEW_URL
data = {
'models': 'creme_core-fakecontact,creme_core-fakeorganisation',
'fields': 'phone,mobile',
'value': phone,
}
create_contact = FakeContact.objects.create
# Phone is OK but not readable
onizuka = create_contact(
user=self.get_root_user(), first_name='Eikichi', last_name='Onizuka', mobile=phone,
)
# Phone is KO
ryuji = create_contact(
user=user, first_name='Ryuji', last_name='Danma', phone='987654',
)
onibaku = FakeOrganisation.objects.create(
user=user, name='Onibaku', phone=phone,
) # Phone OK and readable
has_perm = user.has_perm_to_view
self.assertFalse(has_perm(onizuka))
self.assertTrue(has_perm(ryuji))
self.assertTrue(has_perm(onibaku))
self.assertDetailview(self.client.get(url, data=data, follow=True), onibaku)
def test_app_credentials(self):
user = self.login_as_standard(allowed_apps=['documents']) # Not 'creme_core'
phone = '31337'
data = {
'models': 'creme_core-fakecontact',
'fields': 'phone',
'value': phone,
}
# Would match if apps was allowed
FakeContact.objects.create(
user=user, first_name='Eikichi', last_name='Onizuka', phone=phone,
)
self.assertGET403(self.SEARCHNVIEW_URL, data=data)
def test_fields_config(self):
"Phone field is hidden."
self.login_as_root()
FieldsConfig.objects.create(
content_type=FakeContact,
descriptions=[('phone', {FieldsConfig.HIDDEN: True})],
)
self.assertGET409(
self.SEARCHNVIEW_URL,
data={
'models': 'creme_core-fakecontact',
'fields': 'phone',
'value': '123456789',
},
)
def test_not_logged(self):
url = self.SEARCHNVIEW_URL
models = 'creme_core-fakecontact'
fields = 'phone'
value = '123456789'
response = self.assertGET200(
url, follow=True,
data={
'models': models,
'fields': fields,
'value': value,
},
)
# NB: problem with order (only python3.5- ?)
# self.assertRedirects(
# response,
# '{login_url}?next={search_url}'
# '%3Fmodels%3Dcreme_core-fakecontact'
# '%26fields%3Dphone'
# '%26value%3D123456789'.format(
# login_url=reverse(settings.LOGIN_URL),
# search_url=url,
# )
# )
self.assertEqual(1, len(response.redirect_chain))
parsed_url = urlparse(response.redirect_chain[0][0])
self.assertEqual(reverse(settings.LOGIN_URL), parsed_url.path)
next_param = parsed_url.query
self.assertStartsWith(next_param, 'next=')
self.assertURLEqual(
f'{url}?models={models}&fields={fields}&value={value}',
unquote(next_param[len('next='):]),
) | null |
165 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class CreateSslVpnServerRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'CreateSslVpnServer','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_LocalSubnet(self): # String
return self.get_query_params().get('LocalSubnet')
def set_LocalSubnet(self, LocalSubnet): # String
self.add_query_param('LocalSubnet', LocalSubnet)
def get_IDaaSRegionId(self): # String
return self.get_query_params().get('IDaaSRegionId')
def set_IDaaSRegionId(self, IDaaSRegionId): # String
self.add_query_param('IDaaSRegionId', IDaaSRegionId)
def get_EnableMultiFactorAuth(self): # Boolean
return self.get_query_params().get('EnableMultiFactorAuth')
def set_EnableMultiFactorAuth(self, EnableMultiFactorAuth): # Boolean
self.add_query_param('EnableMultiFactorAuth', EnableMultiFactorAuth)
def get_IDaaSInstanceId(self): # String
return self.get_query_params().get('IDaaSInstanceId')
def set_IDaaSInstanceId(self, IDaaSInstanceId): # String
self.add_query_param('IDaaSInstanceId', IDaaSInstanceId)
def get_Cipher(self): # String
return self.get_query_params().get('Cipher')
def set_Cipher(self, Cipher): # String
self.add_query_param('Cipher', Cipher)
def get_ClientIpPool(self): # String
return self.get_query_params().get('ClientIpPool')
def set_ClientIpPool(self, ClientIpPool): # String
self.add_query_param('ClientIpPool', ClientIpPool)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_Compress(self): # Boolean
return self.get_query_params().get('Compress')
def set_Compress(self, Compress): # Boolean
self.add_query_param('Compress', Compress)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_VpnGatewayId(self): # String
return self.get_query_params().get('VpnGatewayId')
def set_VpnGatewayId(self, VpnGatewayId): # String
self.add_query_param('VpnGatewayId', VpnGatewayId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def METHOD_NAME(self): # Integer
return self.get_query_params().get('Port')
def set_Port(self, Port): # Integer
self.add_query_param('Port', Port)
def get_Proto(self): # String
return self.get_query_params().get('Proto')
def set_Proto(self, Proto): # String
self.add_query_param('Proto', Proto)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name) | null |
166 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbr.endpoint import endpoint_data
class DescribeHanaBackupsAsyncRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hbr', '2017-09-08', 'DescribeHanaBackupsAsync')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RecoveryPointInTime(self): # Long
return self.get_query_params().get('RecoveryPointInTime')
def set_RecoveryPointInTime(self, RecoveryPointInTime): # Long
self.add_query_param('RecoveryPointInTime', RecoveryPointInTime)
def METHOD_NAME(self): # Long
return self.get_query_params().get('LogPosition')
def set_LogPosition(self, LogPosition): # Long
self.add_query_param('LogPosition', LogPosition)
def get_VaultId(self): # String
return self.get_query_params().get('VaultId')
def set_VaultId(self, VaultId): # String
self.add_query_param('VaultId', VaultId)
def get_IncludeLog(self): # Boolean
return self.get_query_params().get('IncludeLog')
def set_IncludeLog(self, IncludeLog): # Boolean
self.add_query_param('IncludeLog', IncludeLog)
def get_Source(self): # String
return self.get_query_params().get('Source')
def set_Source(self, Source): # String
self.add_query_param('Source', Source)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_Mode(self): # String
return self.get_query_params().get('Mode')
def set_Mode(self, Mode): # String
self.add_query_param('Mode', Mode)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_IncludeIncremental(self): # Boolean
return self.get_query_params().get('IncludeIncremental')
def set_IncludeIncremental(self, IncludeIncremental): # Boolean
self.add_query_param('IncludeIncremental', IncludeIncremental)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_UseBackint(self): # Boolean
return self.get_query_params().get('UseBackint')
def set_UseBackint(self, UseBackint): # Boolean
self.add_query_param('UseBackint', UseBackint)
def get_DatabaseName(self): # String
return self.get_query_params().get('DatabaseName')
def set_DatabaseName(self, DatabaseName): # String
self.add_query_param('DatabaseName', DatabaseName)
def get_VolumeId(self): # Integer
return self.get_query_params().get('VolumeId')
def set_VolumeId(self, VolumeId): # Integer
self.add_query_param('VolumeId', VolumeId)
def get_SourceClusterId(self): # String
return self.get_query_params().get('SourceClusterId')
def set_SourceClusterId(self, SourceClusterId): # String
self.add_query_param('SourceClusterId', SourceClusterId)
def get_IncludeDifferential(self): # Boolean
return self.get_query_params().get('IncludeDifferential')
def set_IncludeDifferential(self, IncludeDifferential): # Boolean
self.add_query_param('IncludeDifferential', IncludeDifferential)
def get_SystemCopy(self): # Boolean
return self.get_query_params().get('SystemCopy')
def set_SystemCopy(self, SystemCopy): # Boolean
self.add_query_param('SystemCopy', SystemCopy) | null |
167 | #!/usr/bin/python3
#
# Bluetooth conenection agent (C) 2019 @kaymes
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, print_function, unicode_literals
from optparse import OptionParser
import sys
import time
import dbus
import dbus.service
import dbus.mainloop.glib
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
BUS_NAME = 'org.bluez'
AGENT_INTERFACE = 'org.bluez.Agent1'
AGENT_PATH = "/moode/agent"
WELL_KNOWN_NAME = 'org.moodeaudio.bluez.agent'
PAIR_MODE_INTERFACE = 'org.moodeaudio.bluez.agent.PairMode'
mainloop = None
class Rejected(dbus.DBusException):
_dbus_error_name = "org.bluez.Error.Rejected"
class Cancelled(dbus.DBusException):
_dbus_error_name = "org.bluez.Error.Canceled"
class Agent(dbus.service.Object):
def __init__(self, conn, object_path, bus_name):
super(Agent, self).__init__(conn, object_path, bus_name)
self.pair_mode_active_until = -float('inf')
@property
def pair_mode_active(self):
return time.time() <= self.pair_mode_active_until
@dbus.service.method(PAIR_MODE_INTERFACE, in_signature="d", out_signature="")
def ActivatePairMode(self, timeout):
print("ActivatePairMode called with %s" % (timeout,))
if timeout > 0:
self.pair_mode_active_until = time.time() + timeout
else:
self.pair_mode_active_until = -float('inf')
@dbus.service.method(AGENT_INTERFACE, in_signature="", out_signature="")
def Release(self):
print("Release")
if mainloop is not None:
mainloop.quit()
@dbus.service.method(AGENT_INTERFACE, in_signature="os", out_signature="")
def AuthorizeService(self, device, uuid):
if self.pair_mode_active:
print("Authorizing service (%s, %s)" % (device, uuid))
return
else:
raise Rejected("Pair mode not activated.")
@dbus.service.method(AGENT_INTERFACE, in_signature="o", out_signature="s")
def RequestPinCode(self, device):
raise Cancelled("Pin code not supported")
@dbus.service.method(AGENT_INTERFACE, in_signature="o", out_signature="u")
def RequestPasskey(self, device):
raise Cancelled("Passkey code not supported")
@dbus.service.method(AGENT_INTERFACE, in_signature="ouq", out_signature="")
def DisplayPasskey(self, device, passkey, entered):
raise Cancelled("Passkey code not supported")
@dbus.service.method(AGENT_INTERFACE, in_signature="os", out_signature="")
def DisplayPinCode(self, device, pincode):
raise Cancelled("Pin code not supported")
@dbus.service.method(AGENT_INTERFACE, in_signature="ou", out_signature="")
def RequestConfirmation(self, device, passkey):
raise Cancelled("Confirmation not supported")
@dbus.service.method(AGENT_INTERFACE, in_signature="o", out_signature="")
def RequestAuthorization(self, device):
if self.pair_mode_active:
print("Authorizing device %s" % (device))
return
else:
raise Rejected("Pair mode not activated.")
@dbus.service.method(AGENT_INTERFACE, in_signature="", out_signature="")
def METHOD_NAME(self):
print("Cancel")
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-a", "--agent", action="store_true", dest="agent", help="Run as Bluetooth agent. If not given, a running agent is contacted to change the pairing mode.")
parser.add_option("-w", "--wait_for_bluez", action="store_true", dest="wait_for_bluez", help="During system startup, bluez might not yet be available. this option causes the agent to re-try repeatedly until bluez has started.")
parser.add_option("-p", "--pair_mode", action="store_true", dest="pair_mode", help="Activate pairing mode.")
parser.add_option("-t", "--timeout", action="store", type="int", dest="timeout", help="Timeout in seconds for pairing mode. If not given, pairing mode will be active indefinitely.")
parser.add_option("-d", "--disable_pair_mode", action="store_true", dest="disable_pair_mode", help="Disable pairing mode.")
parser.add_option("-s", "--disable_pair_mode_switch", action="store_true", dest="disable_pair_mode_switch", help="Don't register a well known name with the dbus. This disables switching pairing mode from another process. Use it if the necessary dbus permissions aren't set up.")
(options, args) = parser.parse_args()
if options.pair_mode and options.disable_pair_mode:
print("The options pair_mode and disable_pair_mode are mutally exclusive.")
sys.exit()
if options.agent:
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
if options.disable_pair_mode_switch:
bus_name = None
else:
bus_name = dbus.service.BusName(WELL_KNOWN_NAME, bus)
agent = Agent(bus, AGENT_PATH, bus_name)
if options.pair_mode:
if options.timeout is not None:
timeout = options.timeout
else:
timeout = float('inf')
agent.ActivatePairMode(timeout)
mainloop = GObject.MainLoop()
while True:
try:
obj = bus.get_object(BUS_NAME, "/org/bluez");
except dbus.exceptions.DBusException:
if options.wait_for_bluez:
time.sleep(1)
else:
raise
else:
break
manager = dbus.Interface(obj, "org.bluez.AgentManager1")
manager.RegisterAgent(AGENT_PATH, "NoInputNoOutput")
print("Agent registered")
manager.RequestDefaultAgent(AGENT_PATH)
print("Agent registered as default agent.")
mainloop.run()
else:
bus = dbus.SystemBus()
obj = bus.get_object(WELL_KNOWN_NAME, AGENT_PATH)
other_agent = dbus.Interface(obj, PAIR_MODE_INTERFACE)
if options.pair_mode:
if options.timeout is not None:
timeout = options.timeout
else:
timeout = float('inf')
other_agent.ActivatePairMode(float(timeout))
if options.disable_pair_mode:
other_agent.ActivatePairMode(float('-inf')) | null |
168 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeDisksFullStatusRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeDisksFullStatus','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_EventIds(self): # RepeatList
return self.get_query_params().get('EventId')
def set_EventIds(self, EventId): # RepeatList
for depth1 in range(len(EventId)):
self.add_query_param('EventId.' + str(depth1 + 1), EventId[depth1])
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_EventTimeStart(self): # String
return self.get_query_params().get('EventTime.Start')
def set_EventTimeStart(self, EventTimeStart): # String
self.add_query_param('EventTime.Start', EventTimeStart)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_DiskIds(self): # RepeatList
return self.get_query_params().get('DiskId')
def set_DiskIds(self, DiskId): # RepeatList
for depth1 in range(len(DiskId)):
self.add_query_param('DiskId.' + str(depth1 + 1), DiskId[depth1])
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def METHOD_NAME(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_EventTimeEnd(self): # String
return self.get_query_params().get('EventTime.End')
def set_EventTimeEnd(self, EventTimeEnd): # String
self.add_query_param('EventTime.End', EventTimeEnd)
def get_HealthStatus(self): # String
return self.get_query_params().get('HealthStatus')
def set_HealthStatus(self, HealthStatus): # String
self.add_query_param('HealthStatus', HealthStatus)
def get_EventType(self): # String
return self.get_query_params().get('EventType')
def set_EventType(self, EventType): # String
self.add_query_param('EventType', EventType)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status) | null |
169 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcams.endpoint import endpoint_data
import json
class SendChatappMessageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'cams', '2020-06-06', 'SendChatappMessage')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_MessageType(self): # String
return self.get_body_params().get('MessageType')
def set_MessageType(self, MessageType): # String
self.add_body_params('MessageType', MessageType)
def get_Language(self): # String
return self.get_body_params().get('Language')
def set_Language(self, Language): # String
self.add_body_params('Language', Language)
def get_CustWabaId(self): # String
return self.get_body_params().get('CustWabaId')
def set_CustWabaId(self, CustWabaId): # String
self.add_body_params('CustWabaId', CustWabaId)
def get_Type(self): # String
return self.get_body_params().get('Type')
def set_Type(self, Type): # String
self.add_body_params('Type', Type)
def get_FallBackContent(self): # String
return self.get_body_params().get('FallBackContent')
def set_FallBackContent(self, FallBackContent): # String
self.add_body_params('FallBackContent', FallBackContent)
def get_Content(self): # String
return self.get_query_params().get('Content')
def set_Content(self, Content): # String
self.add_query_param('Content', Content)
def get_TemplateParams(self): # Map
return self.get_body_params().get('TemplateParams')
def set_TemplateParams(self, TemplateParams): # Map
self.add_body_params("TemplateParams", json.dumps(TemplateParams))
def get_Payload(self): # Array
return self.get_query_params().get('Payload')
def set_Payload(self, Payload): # Array
self.add_query_param("Payload", json.dumps(Payload))
def get_ChannelType(self): # String
return self.get_body_params().get('ChannelType')
def set_ChannelType(self, ChannelType): # String
self.add_body_params('ChannelType', ChannelType)
def get_From(self): # String
return self.get_body_params().get('From')
def set_From(self, _From): # String
self.add_body_params('From', _From)
def get_Tag(self): # String
return self.get_body_params().get('Tag')
def set_Tag(self, Tag): # String
self.add_body_params('Tag', Tag)
def get_TrackingData(self): # String
return self.get_body_params().get('TrackingData')
def set_TrackingData(self, TrackingData): # String
self.add_body_params('TrackingData', TrackingData)
def METHOD_NAME(self): # String
return self.get_body_params().get('TaskId')
def set_TaskId(self, TaskId): # String
self.add_body_params('TaskId', TaskId)
def get_IsvCode(self): # String
return self.get_body_params().get('IsvCode')
def set_IsvCode(self, IsvCode): # String
self.add_body_params('IsvCode', IsvCode)
def get_ContextMessageId(self): # String
return self.get_body_params().get('ContextMessageId')
def set_ContextMessageId(self, ContextMessageId): # String
self.add_body_params('ContextMessageId', ContextMessageId)
def get_Label(self): # String
return self.get_body_params().get('Label')
def set_Label(self, Label): # String
self.add_body_params('Label', Label)
def get_FallBackId(self): # String
return self.get_body_params().get('FallBackId')
def set_FallBackId(self, FallBackId): # String
self.add_body_params('FallBackId', FallBackId)
def get_Ttl(self): # Integer
return self.get_body_params().get('Ttl')
def set_Ttl(self, Ttl): # Integer
self.add_body_params('Ttl', Ttl)
def get_FallBackDuration(self): # Integer
return self.get_body_params().get('FallBackDuration')
def set_FallBackDuration(self, FallBackDuration): # Integer
self.add_body_params('FallBackDuration', FallBackDuration)
def get_CustSpaceId(self): # String
return self.get_body_params().get('CustSpaceId')
def set_CustSpaceId(self, CustSpaceId): # String
self.add_body_params('CustSpaceId', CustSpaceId)
def get_To(self): # String
return self.get_body_params().get('To')
def set_To(self, To): # String
self.add_body_params('To', To)
def get_TemplateCode(self): # String
return self.get_body_params().get('TemplateCode')
def set_TemplateCode(self, TemplateCode): # String
self.add_body_params('TemplateCode', TemplateCode) | null |
170 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class UpdateCdrsMonitorRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CDRS', '2020-11-01', 'UpdateCdrsMonitor')
self.set_method('POST')
def get_CorpId(self):
return self.get_body_params().get('CorpId')
def set_CorpId(self,CorpId):
self.add_body_params('CorpId', CorpId)
def get_Description(self):
return self.get_body_params().get('Description')
def set_Description(self,Description):
self.add_body_params('Description', Description)
def get_RuleName(self):
return self.get_body_params().get('RuleName')
def set_RuleName(self,RuleName):
self.add_body_params('RuleName', RuleName)
def get_PicOperateType(self):
return self.get_body_params().get('PicOperateType')
def set_PicOperateType(self,PicOperateType):
self.add_body_params('PicOperateType', PicOperateType)
def get_AttributeName(self):
return self.get_body_params().get('AttributeName')
def set_AttributeName(self,AttributeName):
self.add_body_params('AttributeName', AttributeName)
def get_AttributeOperateType(self):
return self.get_body_params().get('AttributeOperateType')
def set_AttributeOperateType(self,AttributeOperateType):
self.add_body_params('AttributeOperateType', AttributeOperateType)
def get_RuleExpression(self):
return self.get_body_params().get('RuleExpression')
def set_RuleExpression(self,RuleExpression):
self.add_body_params('RuleExpression', RuleExpression)
def get_NotifierTimeOut(self):
return self.get_body_params().get('NotifierTimeOut')
def set_NotifierTimeOut(self,NotifierTimeOut):
self.add_body_params('NotifierTimeOut', NotifierTimeOut)
def get_TaskId(self):
return self.get_body_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_body_params('TaskId', TaskId)
def get_DeviceOperateType(self):
return self.get_body_params().get('DeviceOperateType')
def set_DeviceOperateType(self,DeviceOperateType):
self.add_body_params('DeviceOperateType', DeviceOperateType)
def get_PicList(self):
return self.get_body_params().get('PicList')
def set_PicList(self,PicList):
self.add_body_params('PicList', PicList)
def METHOD_NAME(self):
return self.get_body_params().get('AttributeValueList')
def set_AttributeValueList(self,AttributeValueList):
self.add_body_params('AttributeValueList', AttributeValueList)
def get_NotifierAppSecret(self):
return self.get_body_params().get('NotifierAppSecret')
def set_NotifierAppSecret(self,NotifierAppSecret):
self.add_body_params('NotifierAppSecret', NotifierAppSecret)
def get_NotifierExtendValues(self):
return self.get_body_params().get('NotifierExtendValues')
def set_NotifierExtendValues(self,NotifierExtendValues):
self.add_body_params('NotifierExtendValues', NotifierExtendValues)
def get_DeviceList(self):
return self.get_body_params().get('DeviceList')
def set_DeviceList(self,DeviceList):
self.add_body_params('DeviceList', DeviceList)
def get_NotifierUrl(self):
return self.get_body_params().get('NotifierUrl')
def set_NotifierUrl(self,NotifierUrl):
self.add_body_params('NotifierUrl', NotifierUrl)
def get_NotifierType(self):
return self.get_body_params().get('NotifierType')
def set_NotifierType(self,NotifierType):
self.add_body_params('NotifierType', NotifierType)
def get_AlgorithmVendor(self):
return self.get_body_params().get('AlgorithmVendor')
def set_AlgorithmVendor(self,AlgorithmVendor):
self.add_body_params('AlgorithmVendor', AlgorithmVendor | null |
171 | #!/usr/bin/env python
import argparse
import pathlib
import re
import requests
import toml
HERE = pathlib.Path(__file__).resolve().parent
ROOT = HERE.parent
FALCON_CREATOR = 'kgriffs'
FALCON_REPOSITORY_API = 'https://api.github.com/repos/falconry/falcon'
STABLE_RELEASE_TAG = r'^\d+\.\d+\.\d+(\.post\d+)?$'
AUTHORS_SEPARATOR = '\n(et al.)\n\n'
AUTHORS_LINE = r'^\* (?:(?:.+ \(([\w-]+)\)$)|([\w-]+$))'
RST_CONTRIBUTOR_LINE = r'- `[\w-]+ <https://github.com/([\w-]+)>`__?\n'
RST_CONTRIBUTOR_TEMPLATE = '- `{login} <https://github.com/{login}>`__\n'
def get_latest_tag(headers=None):
uri = f'{FALCON_REPOSITORY_API}/tags'
resp = requests.get(uri, headers=headers)
resp.raise_for_status()
for tag in resp.json():
if re.match(STABLE_RELEASE_TAG, tag['name']):
return tag['name'], tag['commit']['sha']
def iter_commits(until=None, headers=None):
page = 1
uri = f'{FALCON_REPOSITORY_API}/commits'
resp = requests.get(uri, headers=headers)
resp.raise_for_status()
while commits := resp.json():
for commit in commits:
if until and commit['sha'].startswith(until):
return
yield commit
page += 1
uri = f'{FALCON_REPOSITORY_API}/commits?page={page}'
resp = requests.get(uri, headers=headers)
resp.raise_for_status()
def METHOD_NAME(until=None, headers=None):
result = {}
for commit in iter_commits(until, headers=headers):
author = commit.get('author') or {}
login = author.get('login')
if not login:
continue
if login in result:
result.pop(login)
# NOTE(vytas): Exploit dictionary ordering in Python 3.7+.
result[login] = commit['commit']['author']['name']
return dict(item for item in reversed(result.items()))
def _get_towncrier_filename():
with open(ROOT / 'pyproject.toml', 'r') as pyproject_toml:
project = toml.load(pyproject_toml)
return project['tool']['towncrier']['filename']
def _update_authors(contributors):
with open(ROOT / 'AUTHORS', 'r') as authors_file:
content = authors_file.read()
authors, separator, footer = content.partition(AUTHORS_SEPARATOR)
assert separator, 'AUTHORS file structure not understood, please inspect manually'
existing = set({FALCON_CREATOR})
for line in reversed(authors.splitlines()):
match = re.match(AUTHORS_LINE, line)
if not match:
break
login = match.group(1) or match.group(2)
existing.add(login.lower())
with open(ROOT / 'AUTHORS', 'w') as authors_file:
authors_file.write(authors)
for login, name in contributors.items():
if login.lower() in existing:
continue
if login == name:
authors_file.write(f'* {login}\n')
else:
authors_file.write(f'* {name} ({login})\n')
authors_file.write(separator)
authors_file.write(footer)
def _update_towncrier_template(template, contributors):
with open(template, 'r') as template_file:
content = template_file.read()
content, *matches = re.split(RST_CONTRIBUTOR_LINE, content)
contributors = set(contributors)
contributors.update(matches[::2])
for separator in matches[1::2]:
assert (
separator == ''
), f'unexpected separator between contributor lines: {separator!r}'
with open(template, 'w') as template_file:
template_file.write(content)
for login in sorted(contributors, key=lambda s: s.lower()):
template_file.write(RST_CONTRIBUTOR_TEMPLATE.format(login=login))
def main():
towncrier_template = _get_towncrier_filename()
description = (
'Find new contributors to Falcon since the last Git tag. '
'Optionally append them to AUTHORS and the active Towncrier template.'
)
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
'-a', '--auth', help='supply authentication token for GitHub requests'
)
parser.add_argument(
'-t',
'--treeish',
help='aggregate since this commit (default: detect latest tag)',
)
parser.add_argument(
'-n', '--dry-run', action='store_true', help='dry run: do not write any files'
)
parser.add_argument(
'--no-authors', action='store_true', help='do not write AUTHORS'
)
parser.add_argument(
'--no-towncrier', action='store_true', help=f'do not write {towncrier_template}'
)
args = parser.parse_args()
headers = {'Authorization': f'Bearer {args.auth}'} if args.auth else None
if args.treeish:
commit = args.treeish
info = f'Contributors since commit {commit}):'
else:
tag, commit = get_latest_tag(headers=headers)
info = f'Contributors since the latest stable tag ({tag}):'
contributors = METHOD_NAME(until=commit, headers=headers)
if contributors:
print(info)
for login, name in contributors.items():
print(f' * {name} ({login})')
else:
print('No contributors (with a GitHub account) found since the latest tag.')
return
if not args.dry_run:
if not args.no_authors:
_update_authors(contributors)
if not args.no_towncrier:
_update_towncrier_template(towncrier_template, contributors)
if __name__ == '__main__':
main() | null |
172 | import os.path
from unittest import TestCase
from pcs import settings
from pcs.common.reports import codes as report_codes
from pcs.lib.commands import cluster
from pcs_test.tools import fixture
from pcs_test.tools.command_env import get_env_tools
class SuccessMinimal(TestCase):
def setUp(self):
self.env_assist, self.config = get_env_tools(self)
self.nodes = [f"node{i}" for i in range(3)]
def test_live_cib_required(self):
self.config.env.set_cib_data("<cib />")
self.env_assist.assert_raise_library_error(
lambda: cluster.remove_nodes_from_cib(
self.env_assist.get_env(),
self.nodes,
),
[
fixture.error(
report_codes.LIVE_ENVIRONMENT_REQUIRED,
forbidden_options=["CIB"],
)
],
expected_in_processor=False,
)
def test_success_pcmk_running(self):
self.config.services.is_running("pacemaker")
for node in self.nodes:
self.config.runner.pcmk.remove_node(
node,
name=f"remove_node.{node}",
)
cluster.remove_nodes_from_cib(self.env_assist.get_env(), self.nodes)
def test_failure_pcmk_running(self):
err_msg = "an error"
self.config.services.is_running("pacemaker")
self.config.runner.pcmk.remove_node(
self.nodes[0],
)
self.config.runner.pcmk.remove_node(
self.nodes[1],
returncode=1,
stderr=err_msg,
name="remove_node_failure",
)
self.env_assist.assert_raise_library_error(
lambda: cluster.remove_nodes_from_cib(
self.env_assist.get_env(),
self.nodes,
),
[
fixture.error(
report_codes.NODE_REMOVE_IN_PACEMAKER_FAILED,
node_list_to_remove=[self.nodes[1]],
node="",
reason=err_msg,
)
],
expected_in_processor=False,
)
def METHOD_NAME(self):
cmd_env = dict(CIB_file=os.path.join(settings.cib_dir, "cib.xml"))
self.config.services.is_running("pacemaker", return_value=False)
for node in self.nodes:
self.config.runner.place(
[
settings.cibadmin_exec,
"--delete-all",
"--force",
f"--xpath=/cib/configuration/nodes/node[@uname='{node}']",
],
name=f"remove_node.{node}",
env=cmd_env,
)
cluster.remove_nodes_from_cib(self.env_assist.get_env(), self.nodes)
def test_failure_pcmk_not_running(self):
err_msg = "an error"
cmd_env = dict(CIB_file=os.path.join(settings.cib_dir, "cib.xml"))
cmd = [settings.cibadmin_exec, "--delete-all", "--force"]
cmd_xpath = "--xpath=/cib/configuration/nodes/node[@uname='{}']"
self.config.services.is_running("pacemaker", return_value=False)
self.config.runner.place(
cmd + [cmd_xpath.format(self.nodes[0])],
name="remove_node_success",
env=cmd_env,
)
self.config.runner.place(
cmd + [cmd_xpath.format(self.nodes[1])],
returncode=1,
stderr=err_msg,
name="remove_node_failure",
env=cmd_env,
)
self.env_assist.assert_raise_library_error(
lambda: cluster.remove_nodes_from_cib(
self.env_assist.get_env(),
self.nodes,
),
[
fixture.error(
report_codes.NODE_REMOVE_IN_PACEMAKER_FAILED,
node_list_to_remove=[self.nodes[1]],
node="",
reason=err_msg,
)
],
expected_in_processor=False,
) | null |
173 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class DescribeImageListWithBaselineNameRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeImageListWithBaselineName')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Pod(self): # String
return self.get_query_params().get('Pod')
def set_Pod(self, Pod): # String
self.add_query_param('Pod', Pod)
def get_ClusterName(self): # String
return self.get_query_params().get('ClusterName')
def set_ClusterName(self, ClusterName): # String
self.add_query_param('ClusterName', ClusterName)
def get_Criteria(self): # String
return self.get_query_params().get('Criteria')
def set_Criteria(self, Criteria): # String
self.add_query_param('Criteria', Criteria)
def get_RepoNamespace(self): # String
return self.get_query_params().get('RepoNamespace')
def set_RepoNamespace(self, RepoNamespace): # String
self.add_query_param('RepoNamespace', RepoNamespace)
def get_ImageDigest(self): # String
return self.get_query_params().get('ImageDigest')
def set_ImageDigest(self, ImageDigest): # String
self.add_query_param('ImageDigest', ImageDigest)
def get_ScanRanges(self): # RepeatList
return self.get_query_params().get('ScanRange')
def set_ScanRanges(self, ScanRange): # RepeatList
for depth1 in range(len(ScanRange)):
self.add_query_param('ScanRange.' + str(depth1 + 1), ScanRange[depth1])
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_CriteriaType(self): # String
return self.get_query_params().get('CriteriaType')
def set_CriteriaType(self, CriteriaType): # String
self.add_query_param('CriteriaType', CriteriaType)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_Image(self): # String
return self.get_query_params().get('Image')
def set_Image(self, Image): # String
self.add_query_param('Image', Image)
def get_CurrentPage(self): # Integer
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self, CurrentPage): # Integer
self.add_query_param('CurrentPage', CurrentPage)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def METHOD_NAME(self): # String
return self.get_query_params().get('RepoName')
def set_RepoName(self, RepoName): # String
self.add_query_param('RepoName', RepoName)
def get_Namespace(self): # String
return self.get_query_params().get('Namespace')
def set_Namespace(self, Namespace): # String
self.add_query_param('Namespace', Namespace)
def get_BaselineNameKey(self): # String
return self.get_query_params().get('BaselineNameKey')
def set_BaselineNameKey(self, BaselineNameKey): # String
self.add_query_param('BaselineNameKey', BaselineNameKey)
def get_RepoInstanceId(self): # String
return self.get_query_params().get('RepoInstanceId')
def set_RepoInstanceId(self, RepoInstanceId): # String
self.add_query_param('RepoInstanceId', RepoInstanceId)
def get_ContainerId(self): # String
return self.get_query_params().get('ContainerId')
def set_ContainerId(self, ContainerId): # String
self.add_query_param('ContainerId', ContainerId) | null |
174 | # Copyright (c) 2015 - 2023, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import sys
PYTHON_PATHS = [
"/usr/lib/python3.6/site-packages",
"/usr/lib64/python3.6/site-packages"]
for p in PYTHON_PATHS:
if p not in sys.path:
sys.path.insert(0, p)
import os
import json
import copy
import pbs
from geopmdpy import pio
from geopmdpy import system_files
_SAVED_CONTROLS_PATH = "/run/geopm-pbs-hooks/SAVE_FILES"
_SAVED_CONTROLS_FILE = _SAVED_CONTROLS_PATH + "/power-limit-save-control.json"
_POWER_LIMIT_RESOURCE = "geopm-node-power-limit"
_power_limit_control = {
"name": "MSR::PLATFORM_POWER_LIMIT:PL1_POWER_LIMIT",
"domain_type": "board",
"domain_idx": 0,
"setting": None
}
_controls = [
{"name": "MSR::PLATFORM_POWER_LIMIT:PL1_TIME_WINDOW",
"domain_type": "board",
"domain_idx": 0,
"setting": 0.013}, # SDM Vol. 4. Table 2.39 - Recommends 0xD = 13
{"name": "MSR::PLATFORM_POWER_LIMIT:PL1_CLAMP_ENABLE",
"domain_type": "board",
"domain_idx": 0,
"setting": 1},
_power_limit_control,
{"name": "MSR::PLATFORM_POWER_LIMIT:PL1_LIMIT_ENABLE",
"domain_type": "board",
"domain_idx": 0,
"setting": 1}
]
def read_controls(controls):
try:
for c in controls:
c["setting"] = pio.read_signal(c["name"], c["domain_type"],
c["domain_idx"])
except RuntimeError as e:
reject_event(f"Unable to read signal {c['name']}: {e}")
def write_controls(controls):
try:
for c in controls:
pio.write_control(c["name"], c["domain_type"], c["domain_idx"],
c["setting"])
except RuntimeError as e:
reject_event(f"Unable to write control {c['name']}: {e}")
def resource_to_float(resource_name, resource_str):
try:
value = float(resource_str)
return value
except ValueError:
reject_event(f"Invalid value provided for: {resource_name}")
def METHOD_NAME(file_name, controls):
try:
with open(file_name, "w") as f:
f.write(json.dumps(controls))
except (OSError, ValueError) as e:
reject_event(f"Unable to write to saved controls file: {e}")
def reject_event(msg):
e = pbs.event()
e.job.delete()
e.reject(f"{e.hook_name}: {msg}")
def restore_controls_from_file(file_name):
controls_json = None
try:
with open(file_name) as f:
controls_json = f.read()
except (OSError, ValueError) as e:
reject_event(f"Unable to read saved controls file: {e}")
try:
controls = json.loads(controls_json)
if not controls:
reject_event("Encountered empty saved controls file")
write_controls(controls)
except (json.decoder.JSONDecodeError, KeyError, TypeError) as e:
reject_event(f"Malformed saved controls file: {e}")
os.unlink(file_name)
def do_power_limit_prologue():
e = pbs.event()
job_id = e.job.id
server_job = pbs.server().job(job_id)
power_limit_requested = False
try:
power_limit_str = server_job.Resource_List[_POWER_LIMIT_RESOURCE]
power_limit_requested = bool(power_limit_str)
except KeyError:
pass
if not power_limit_requested:
if os.path.exists(_SAVED_CONTROLS_FILE):
restore_controls_from_file(_SAVED_CONTROLS_FILE)
e.accept()
return
power_limit = resource_to_float(_POWER_LIMIT_RESOURCE, power_limit_str)
pbs.logmsg(pbs.LOG_DEBUG, f"{e.hook_name}: Requested power limit: {power_limit}")
current_settings = copy.deepcopy(_controls)
read_controls(current_settings)
system_files.secure_make_dirs(_SAVED_CONTROLS_PATH)
METHOD_NAME(_SAVED_CONTROLS_FILE, current_settings)
_power_limit_control["setting"] = power_limit
write_controls(_controls)
e.accept()
def do_power_limit_epilogue():
e = pbs.event()
if os.path.exists(_SAVED_CONTROLS_FILE):
restore_controls_from_file(_SAVED_CONTROLS_FILE)
e.accept()
def hook_main():
try:
event_type = pbs.event().type
if event_type == pbs.EXECJOB_PROLOGUE:
do_power_limit_prologue()
elif event_type == pbs.EXECJOB_EPILOGUE:
do_power_limit_epilogue()
else:
reject_event("Power limit hook incorrectly configured!")
except SystemExit:
pass
except:
_, e, _ = sys.exc_info()
reject_event(f"Unexpected error: {str(e)}")
# Begin hook...
hook_main() | null |
175 | import time
def _wait_and_get_responses(job_ids, batch_client):
"""Waits on all given jobs to terminate; returns the job descriptions."""
job_responses = {}
for job in job_ids:
terminated = False
while not terminated:
response = batch_client.describe_jobs(jobs=[job])
job_response = response["jobs"][0]
status = job_response["status"]
if status in {"FAILED", "SUCCEEDED"}:
job_responses[job] = job_response
terminated = True
else:
time.sleep(1)
return job_responses
def _get_job_stats(job_responses):
"""Computes and returns some statistics about the given jobs."""
usage_interval_per_instance = {}
stats = {
"num_jobs": 0,
"num_succeeded": 0,
"num_retried": 0,
"num_instances": 0,
"max_duration_sec": 0,
"total_duration_hr": 0,
}
for job_id, resp in job_responses.items():
status = resp["status"]
attempts = resp["attempts"]
job_sec = 0
for attempt in attempts:
started_at = int(attempt["startedAt"])
stopped_at = int(attempt["stoppedAt"])
instance = attempt["container"]["containerInstanceArn"]
job_sec = (stopped_at - started_at) / 1e3
if instance not in usage_interval_per_instance:
usage_interval_per_instance[instance] = (started_at, stopped_at)
else:
curr = usage_interval_per_instance[instance]
usage_interval_per_instance[instance] = (
min(curr[0], started_at),
max(curr[1], stopped_at),
)
stats["max_duration_sec"] = max(stats["max_duration_sec"], job_sec)
stats["num_retried"] += 1 if len(attempts) > 1 else 0
stats["num_succeeded"] += 1 if status == "SUCCEEDED" else 0
stats["num_jobs"] += 1
stats["num_instances"] = len(usage_interval_per_instance.keys())
# Compute aggregate stats.
for instance, interval in usage_interval_per_instance.items():
# The interval is a pair (earliest-job-start, latest-job-stop).
duration_sec = (interval[1] - interval[0]) / 1e3
duration_hr = duration_sec / 3600.0
stats["total_duration_hr"] += duration_hr
return stats
def METHOD_NAME(stats, instance_price_per_hr=1.00):
"""Prints a report from the given stats."""
print("Finished waiting on {} jobs:".format(stats["num_jobs"]))
print("- {} / {} jobs succeeded".format(stats["num_succeeded"], stats["num_jobs"]))
print("- {} jobs had more than 1 attempt.".format(stats["num_retried"]))
print("- {} instances used.".format(stats["num_instances"]))
print(
"- Longest job took {:.3f} seconds (includes retries).".format(
stats["max_duration_sec"]
)
)
# Note on the cost estimate. For each job attempt, we note the instance ID
# it executed on. For every instance, we compute a "usage interval" of
# (earliest-job-start, latest-job-stop). This interval is used to compute
# a duration per instance, which is aggregated into a duration for the total
# compute used.
print(
"- Total cost estimate ${:.2f} ({:.2f} hours @ ${:.4f} / hr)".format(
stats["total_duration_hr"] * instance_price_per_hr,
stats["total_duration_hr"],
instance_price_per_hr,
)
)
def wait_all(job_ids, batch_client, verbose=True, job_name=None):
"""Waits for all given jobs to terminate; returns stats about the jobs."""
if verbose:
msg = "Waiting on {} jobs to terminate...".format(len(job_ids))
if job_name is not None:
msg += " [{}]".format(job_name)
print(msg)
# Wait for all jobs to finish.
job_responses = _wait_and_get_responses(job_ids, batch_client)
# Process response data.
stats = _get_job_stats(job_responses)
# Check for warning.
all_succeeded = stats["num_succeeded"] == stats["num_jobs"]
if verbose and not all_succeeded:
print("\nWARNING: one or more jobs failed.")
return stats | null |
176 | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_utils.pipe_cli import pipe_tag_get, pipe_tag_set, pipe_tag_delete
OBJECT_1 = ('key_1', 'value_1', 'string')
OBJECT_2 = ('key_2', 'value_2', 'string')
OBJECT_3 = ('key_3', 'value_3', 'string')
OBJECT_4 = ('key_4', 'value_4', 'string')
NON_EXISTING_OBJECT = ('non-existing', 'value_2', 'string')
NEW_OBJECT_2 = ('key_2', 'new_value', 'string')
def access_denied(entity_class, entity_identifier, token):
expected_tags = prepare_for_role_model_test(entity_class, entity_identifier)
# try to get tags for object
error_text = pipe_tag_get(entity_class, entity_identifier, token=token)[1]
assert_access_is_denied(entity_class, entity_identifier, error_text, expected_tags)
access_is_denied_for_write_operations(entity_class, entity_identifier, expected_tags, token)
def prepare_for_role_model_test(entity_class, entity_identifier):
expected_tags = list()
expected_tags.append(OBJECT_1)
expected_tags.append(OBJECT_2)
# create tags
set_result = pipe_tag_set(entity_class, entity_identifier, args=METHOD_NAME(expected_tags))
assert not set_result[1]
assert set_result[0][0] == "Metadata for {} {} updated.".format(entity_class, entity_identifier)
get_result = pipe_tag_get(entity_class, entity_identifier)
result_tags = parse_output_table(get_result[0])
assert not get_result[1]
assert result_tags == expected_tags
return expected_tags
def access_is_denied_for_write_operations(entity_class, entity_identifier, expected_tags, token):
# try to create tags for object
error_text = pipe_tag_set(entity_class, entity_identifier, token=token,
args=METHOD_NAME(list(OBJECT_3)))[1]
assert_access_is_denied(entity_class, entity_identifier, error_text, expected_tags)
# try to delete keys for object
keys = convert_list_to_input_keys(expected_tags)
error_text = pipe_tag_delete(entity_class, entity_identifier, args=keys, token=token)[1]
assert_access_is_denied(entity_class, entity_identifier, error_text, expected_tags)
# try to update existing key for object
error_text = pipe_tag_set(entity_class, entity_identifier, token=token,
args=METHOD_NAME(list(NEW_OBJECT_2)))[1]
assert_access_is_denied(entity_class, entity_identifier, error_text, expected_tags)
# try to delete all data for object
error_text = pipe_tag_delete(entity_class, entity_identifier, token=token)[1]
assert_access_is_denied(entity_class, entity_identifier, error_text, expected_tags)
def crud_metadata(entity_class, entity_identifier, token=None):
tags_to_set = list()
tags_to_set.append(OBJECT_1)
tags_to_set.append(OBJECT_2)
# create tags
set_result = pipe_tag_set(entity_class, entity_identifier, token=token,
args=METHOD_NAME(tags_to_set))
assert not set_result[1]
assert set_result[0][0] == "Metadata for {} {} updated.".format(entity_class, entity_identifier)
assert_tags(entity_class, entity_identifier, tags_to_set, token)
# delete keys
tags_to_set.remove(OBJECT_2)
keys = convert_list_to_input_keys(tags_to_set)
delete_result = pipe_tag_delete(entity_class, entity_identifier, token=token, args=keys)
tags_to_set.remove(OBJECT_1)
tags_to_set.append(OBJECT_2)
assert not delete_result[1]
assert delete_result[0][0] == "Deleted keys from metadata for {} {}: {}" \
.format(entity_class, entity_identifier, ', '.join(keys))
assert_tags(entity_class, entity_identifier, tags_to_set, token)
# update existing key
tags_to_set.append(NEW_OBJECT_2)
tags_to_set.remove(OBJECT_2)
set_result = pipe_tag_set(entity_class, entity_identifier, token=token,
args=METHOD_NAME(tags_to_set))
assert not set_result[1]
assert set_result[0][0] == "Metadata for {} {} updated.".format(entity_class, entity_identifier)
assert_tags(entity_class, entity_identifier, tags_to_set, token)
# add new keys
tags_to_set.remove(NEW_OBJECT_2)
tags_to_set.append(OBJECT_4)
tags_to_set.append(OBJECT_3)
set_result = pipe_tag_set(entity_class, entity_identifier, token=token,
args=METHOD_NAME(tags_to_set))
assert not set_result[1]
assert set_result[0][0] == "Metadata for {} {} updated.".format(entity_class, entity_identifier)
tags_to_set.append(NEW_OBJECT_2)
assert_tags(entity_class, entity_identifier, tags_to_set, token)
# delete all data
delete_result = pipe_tag_delete(entity_class, entity_identifier, token=token)
assert not delete_result[1]
assert delete_result[0][0] == "Metadata for {} {} deleted.".format(entity_class, entity_identifier)
get_result = pipe_tag_get(entity_class, entity_identifier, token=token)
assert not get_result[1]
assert get_result[0][0] == "No metadata available for {} {}.".format(entity_class, entity_identifier)
# could not delete non existing metadata
delete_result = pipe_tag_delete(entity_class, entity_identifier, token=token)
assert "Error: Failed to fetch data from server. Server responded with message:". \
format(entity_identifier, str(entity_class).upper()) in delete_result[1][0]
def assert_tags(entity_class, entity_identifier, tags_to_set, token):
get_result = pipe_tag_get(entity_class, entity_identifier, token=token)
result_tags = parse_output_table(get_result[0])
assert not get_result[1]
assert result_tags == tags_to_set
def assert_access_is_denied(entity_class, entity_identifier, error_text, expected_tags):
assert 'Access is denied' in error_text[0]
get_result = pipe_tag_get(entity_class, entity_identifier)
assert not get_result[1]
result_tags = parse_output_table(get_result[0])
assert result_tags == expected_tags
def parse_output_table(get_result):
if len(get_result) == 1:
assert "No metadata available for" in get_result[0]
return list()
get_result.pop(0)
get_result.pop(0)
get_result.pop(0) # skip header
result_tags = list()
for item in get_result:
splitted = filter(None, item.replace(" ", "").split("|"))
if not str(splitted[0]).startswith("+"):
result_tags.append((splitted[0], splitted[1], splitted[2]))
return result_tags
def METHOD_NAME(tags):
result = list()
for tag in tags:
result.append('='.join([tag[0], tag[1]]))
return result
def convert_list_to_input_keys(tags):
result = list()
for tag in tags:
result.append(tag[0])
return result | null |
177 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalb.endpoint import endpoint_data
class CreateHealthCheckTemplateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alb', '2020-06-16', 'CreateHealthCheckTemplate','alb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_HealthCheckTimeout(self): # Integer
return self.get_query_params().get('HealthCheckTimeout')
def set_HealthCheckTimeout(self, HealthCheckTimeout): # Integer
self.add_query_param('HealthCheckTimeout', HealthCheckTimeout)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_HealthCheckProtocol(self): # String
return self.get_query_params().get('HealthCheckProtocol')
def set_HealthCheckProtocol(self, HealthCheckProtocol): # String
self.add_query_param('HealthCheckProtocol', HealthCheckProtocol)
def get_UnhealthyThreshold(self): # Integer
return self.get_query_params().get('UnhealthyThreshold')
def set_UnhealthyThreshold(self, UnhealthyThreshold): # Integer
self.add_query_param('UnhealthyThreshold', UnhealthyThreshold)
def get_HealthyThreshold(self): # Integer
return self.get_query_params().get('HealthyThreshold')
def set_HealthyThreshold(self, HealthyThreshold): # Integer
self.add_query_param('HealthyThreshold', HealthyThreshold)
def get_HealthCheckPath(self): # String
return self.get_query_params().get('HealthCheckPath')
def set_HealthCheckPath(self, HealthCheckPath): # String
self.add_query_param('HealthCheckPath', HealthCheckPath)
def get_HealthCheckCodes(self): # Array
return self.get_query_params().get('HealthCheckCodes')
def set_HealthCheckCodes(self, HealthCheckCodes): # Array
for index1, value1 in enumerate(HealthCheckCodes):
self.add_query_param('HealthCheckCodes.' + str(index1 + 1), value1)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_HealthCheckMethod(self): # String
return self.get_query_params().get('HealthCheckMethod')
def set_HealthCheckMethod(self, HealthCheckMethod): # String
self.add_query_param('HealthCheckMethod', HealthCheckMethod)
def get_HealthCheckHost(self): # String
return self.get_query_params().get('HealthCheckHost')
def set_HealthCheckHost(self, HealthCheckHost): # String
self.add_query_param('HealthCheckHost', HealthCheckHost)
def get_HealthCheckInterval(self): # Integer
return self.get_query_params().get('HealthCheckInterval')
def set_HealthCheckInterval(self, HealthCheckInterval): # Integer
self.add_query_param('HealthCheckInterval', HealthCheckInterval)
def get_HealthCheckTemplateName(self): # String
return self.get_query_params().get('HealthCheckTemplateName')
def set_HealthCheckTemplateName(self, HealthCheckTemplateName): # String
self.add_query_param('HealthCheckTemplateName', HealthCheckTemplateName)
def METHOD_NAME(self): # String
return self.get_query_params().get('HealthCheckHttpVersion')
def set_HealthCheckHttpVersion(self, HealthCheckHttpVersion): # String
self.add_query_param('HealthCheckHttpVersion', HealthCheckHttpVersion)
def get_HealthCheckConnectPort(self): # Integer
return self.get_query_params().get('HealthCheckConnectPort')
def set_HealthCheckConnectPort(self, HealthCheckConnectPort): # Integer
self.add_query_param('HealthCheckConnectPort', HealthCheckConnectPort) | null |
178 | """Collection of compose pipelines for segmentation task."""
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from collections.abc import Sequence
from copy import deepcopy
import numpy as np
from mmcv.utils import build_from_cfg
from mmseg.datasets.builder import PIPELINES
from scipy.ndimage import gaussian_filter
# pylint: disable=consider-using-f-string
@PIPELINES.register_module()
class ProbCompose:
"""Compose pipelines in a list and enable or disable them with the probability."""
def __init__(self, transforms, probs):
assert isinstance(transforms, Sequence)
assert isinstance(probs, Sequence)
assert len(transforms) == len(probs)
assert all(p >= 0.0 for p in probs)
sum_probs = float(sum(probs))
assert sum_probs > 0.0
norm_probs = [float(p) / sum_probs for p in probs]
self.limits = np.cumsum([0.0] + norm_probs)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError(f"transform must be callable or a dict, but got {type(transform)}")
def __call__(self, data):
"""Callback function of ProbCompose."""
rand_value = np.random.rand()
transform_id = np.max(np.where(rand_value > self.limits)[0])
transform = self.transforms[transform_id]
data = transform(data)
return data
def __repr__(self):
"""Repr."""
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += f" {t}"
format_string += "\n)"
return format_string
@PIPELINES.register_module()
class MaskCompose:
"""Compose mask-related pipelines in a list and enable or disable them with the probability."""
def __init__(self, transforms, prob, lambda_limits=(4, 16), keep_original=False):
self.keep_original = keep_original
self.prob = prob
assert 0.0 <= self.prob <= 1.0
assert isinstance(lambda_limits, Sequence)
assert len(lambda_limits) == 2
assert 0.0 < lambda_limits[0] < lambda_limits[1]
self.lambda_limits = lambda_limits
assert isinstance(transforms, Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError(f"transform must be callable or a dict, but got {type(transform)}")
@staticmethod
def _apply_transforms(data, transforms):
for t in transforms:
data = t(data)
if data is None:
return None
return data
@staticmethod
def METHOD_NAME(shape, lambda_limits):
noise = np.random.randn(*shape)
sigma = np.exp(np.log10(np.random.uniform(lambda_limits[0], lambda_limits[1])))
soft_mask = gaussian_filter(noise, sigma=sigma)
threshold = np.median(soft_mask)
hard_mask = soft_mask > threshold
return hard_mask
@staticmethod
def _mix_img(main_img, aux_img, mask):
return np.where(np.expand_dims(mask, axis=2), main_img, aux_img)
def __call__(self, data):
"""Callback function of MaskCompose."""
main_data = self._apply_transforms(deepcopy(data), self.transforms)
assert main_data is not None
if not self.keep_original and np.random.rand() > self.prob:
return main_data
aux_data = self._apply_transforms(deepcopy(data), self.transforms)
assert aux_data is not None
assert main_data["img"].shape == aux_data["img"].shape
mask = self.METHOD_NAME(main_data["img"].shape[:2], self.lambda_limits)
mixed_img = self._mix_img(main_data["img"], aux_data["img"], mask)
if self.keep_original:
main_data["aux_img"] = mixed_img
else:
main_data["img"] = mixed_img
return main_data
def __repr__(self):
"""Repr."""
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += f" {t}"
format_string += "\n)"
return format_string | null |
179 | # coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset collection catalog documentation template."""
import abc
import textwrap
from typing import List
import tensorflow_datasets as tfds
from tensorflow_datasets.scripts.documentation import dataset_markdown_builder as dmb
from tensorflow_datasets.scripts.documentation import doc_utils
# Token used to indicate the section shouldn't be displayed
_SKIP_SECTION = object()
class CollectionSection(abc.ABC):
"""Abstract class for a documentation Section (description, homepage, ...)."""
NAME: str
EXTRA_DOC: str = ''
@abc.abstractmethod
def get_key(self, loader: tfds.core.DatasetCollectionLoader) -> dmb.Key:
"""Get the key of the section.
The key is used to merge similar sections across builder configs. For
instance, https://www.tensorflow.org/datasets/catalog/wiki40b only display
once the `FeatureDict`, homepage,... as those sections are the same for
all configs.
Args:
loader: The loader of the DatasetCollection to document.
Returns:
key: The section key.
"""
pass
@abc.abstractmethod
def content(self, loader: tfds.core.DatasetCollectionLoader) -> str:
"""Returns the section content."""
pass
def display(self, loader: tfds.core.DatasetCollectionLoader) -> str:
"""Returns the section content."""
content = self.content(loader)
if content is _SKIP_SECTION:
return ''
header = f'* **{self.NAME}**{self.EXTRA_DOC}'
is_block = isinstance(content, dmb.Block)
if not isinstance(content, dmb.IntentedBlock):
content = content.strip() # Note: `strip()` cast `Block` -> `str`
if is_block:
content = f'{header}:\n\n{content}\n\n'
else:
content = f'{header}: {content}\n\n'
return content
# ------------------------------ Sections -------------------------------------
# pylint:disable = missing-class-docstring
class CollectionHomepageSection(CollectionSection):
NAME = 'Homepage'
def get_key(self, loader: tfds.core.DatasetCollectionLoader):
return loader.collection.info.homepage
def content(self, loader: tfds.core.DatasetCollectionLoader):
homepage = self.get_key(loader)
if homepage:
homepage = doc_utils.format_homepage_url(homepage)
else:
homepage = ''
return dmb.Block(homepage)
class CollectionDescriptionSection(CollectionSection):
NAME = 'Description'
def get_key(self, loader: tfds.core.DatasetCollectionLoader):
return loader.collection.info.description
def content(self, loader: tfds.core.DatasetCollectionLoader):
return dmb.Block(loader.collection.info.description)
class CollectionVersionSection(CollectionSection):
"""Lists versions in the dataset collection, along with their datasets."""
NAME = 'Versions'
def _list_versions(self, loader: tfds.core.DatasetCollectionLoader):
"""List versions."""
# Default version is the latest.
default_version = loader.collection.get_latest_version()
all_versions = [
*loader.collection.info.release_notes,
*loader.collection.datasets,
]
all_versions_and_notes = loader.collection.info.release_notes
all_versions = set(tfds.core.Version(v) for v in all_versions)
for v in sorted(all_versions): # List all available versions.
if v == default_version: # Highlight the default version.
version_name = '**`{}`** (default)'.format(str(v))
else:
version_name = '`{}`'.format(str(v))
description = all_versions_and_notes.get(str(v), 'No release notes.')
yield f' * {version_name}: {description}'
def get_key(self, loader: tfds.core.DatasetCollectionLoader):
release_key = tuple(
(k, v) for k, v in loader.collection.info.release_notes.items()
)
return (tuple(loader.collection.all_versions), release_key)
def content(self, loader: tfds.core.DatasetCollectionLoader):
return dmb.IntentedBlock('\n'.join(self._list_versions(loader)))
class CollectionDatasetsSection(CollectionSection):
NAME = 'Datasets in the default version'
def get_key(self, loader: tfds.core.DatasetCollectionLoader):
return loader.collection.get_latest_version()
def METHOD_NAME(self, reference):
"""Returns the link to a dataset documentation in TFDS catalog."""
prefix = 'https://www.tensorflow.org/datasets/catalog/'
# Links to TFDS catalog's entries don't include versions.
ds_link = prefix + reference.dataset_name
# For configs, return precise link to config documentation.
if reference.config:
ds_link += f'#{reference.config}'
return ds_link
def _list_datasets(
self, collection_loader: tfds.core.DatasetCollectionLoader
):
"""List datasets for the latest version."""
# get_collection retrieves the datasets in the default (= latest) version.
datasets = collection_loader.collection.get_collection()
for name, reference in datasets.items():
ds_link = self.METHOD_NAME(reference)
yield f' * `{name}`: [`{reference.tfds_name()}`]({ds_link})'
def content(self, loader: tfds.core.DatasetCollectionLoader):
return dmb.IntentedBlock('\n'.join(self._list_datasets(loader)))
class CollectionCitationSection(CollectionSection):
NAME = 'Citation'
def get_key(self, loader: tfds.core.DatasetCollectionLoader):
return loader.collection.info.citation
def content(self, loader: tfds.core.DatasetCollectionLoader):
if not loader.collection.info.citation:
return ''
return dmb.Block(
textwrap.dedent(
f"""
```
{tfds.core.utils.indent(loader.collection.info.citation, ' ')}
```
"""
)
)
# --------------------------- Main page ---------------------------
def _display_collection_heading(
collection: tfds.core.DatasetCollectionLoader,
) -> str:
return f"""
# `{collection.collection_name}`
"""
def _display_collection_sections(
loader: tfds.core.DatasetCollectionLoader,
all_sections: List[CollectionSection],
) -> str:
return ''.join([section.display(loader) for section in all_sections])
def get_collection_markdown_string(
*,
collection: tfds.core.DatasetCollectionLoader,
) -> str:
"""Build the collection markdown."""
all_sections = [
CollectionDescriptionSection(),
CollectionHomepageSection(),
CollectionVersionSection(),
CollectionDatasetsSection(),
CollectionCitationSection(),
]
doc_str = [
_display_collection_heading(collection),
_display_collection_sections(
loader=collection, all_sections=all_sections
),
]
return '\n\n'.join([tfds.core.utils.dedent(s) for s in doc_str if s]) | null |
180 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbr.endpoint import endpoint_data
import json
class UpdateBackupPlanRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hbr', '2017-09-08', 'UpdateBackupPlan')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_VaultId(self): # String
return self.get_query_params().get('VaultId')
def set_VaultId(self, VaultId): # String
self.add_query_param('VaultId', VaultId)
def get_Prefix(self): # String
return self.get_query_params().get('Prefix')
def set_Prefix(self, Prefix): # String
self.add_query_param('Prefix', Prefix)
def get_Rules(self): # RepeatList
return self.get_body_params().get('Rule')
def set_Rules(self, Rule): # RepeatList
for depth1 in range(len(Rule)):
if Rule[depth1].get('Schedule') is not None:
self.add_body_params('Rule.' + str(depth1 + 1) + '.Schedule', Rule[depth1].get('Schedule'))
if Rule[depth1].get('DestinationRegionId') is not None:
self.add_body_params('Rule.' + str(depth1 + 1) + '.DestinationRegionId', Rule[depth1].get('DestinationRegionId'))
if Rule[depth1].get('Disabled') is not None:
self.add_body_params('Rule.' + str(depth1 + 1) + '.Disabled', Rule[depth1].get('Disabled'))
if Rule[depth1].get('RuleName') is not None:
self.add_body_params('Rule.' + str(depth1 + 1) + '.RuleName', Rule[depth1].get('RuleName'))
if Rule[depth1].get('DestinationRetention') is not None:
self.add_body_params('Rule.' + str(depth1 + 1) + '.DestinationRetention', Rule[depth1].get('DestinationRetention'))
if Rule[depth1].get('Retention') is not None:
self.add_body_params('Rule.' + str(depth1 + 1) + '.Retention', Rule[depth1].get('Retention'))
if Rule[depth1].get('BackupType') is not None:
self.add_body_params('Rule.' + str(depth1 + 1) + '.BackupType', Rule[depth1].get('BackupType'))
if Rule[depth1].get('DoCopy') is not None:
self.add_body_params('Rule.' + str(depth1 + 1) + '.DoCopy', Rule[depth1].get('DoCopy'))
def get_Paths(self): # RepeatList
return self.get_query_params().get('Path')
def set_Paths(self, Path): # RepeatList
for depth1 in range(len(Path)):
self.add_query_param('Path.' + str(depth1 + 1), Path[depth1])
def get_PlanName(self): # String
return self.get_query_params().get('PlanName')
def set_PlanName(self, PlanName): # String
self.add_query_param('PlanName', PlanName)
def get_Options(self): # String
return self.get_body_params().get('Options')
def set_Options(self, Options): # String
self.add_body_params('Options', Options)
def get_SourceType(self): # String
return self.get_query_params().get('SourceType')
def set_SourceType(self, SourceType): # String
self.add_query_param('SourceType', SourceType)
def get_Exclude(self): # String
return self.get_body_params().get('Exclude')
def set_Exclude(self, Exclude): # String
self.add_body_params('Exclude', Exclude)
def get_Retention(self): # Long
return self.get_query_params().get('Retention')
def set_Retention(self, Retention): # Long
self.add_query_param('Retention', Retention)
def METHOD_NAME(self): # String
return self.get_body_params().get('Include')
def set_Include(self, Include): # String
self.add_body_params('Include', Include)
def get_KeepLatestSnapshots(self): # Long
return self.get_query_params().get('KeepLatestSnapshots')
def set_KeepLatestSnapshots(self, KeepLatestSnapshots): # Long
self.add_query_param('KeepLatestSnapshots', KeepLatestSnapshots)
def get_Schedule(self): # String
return self.get_query_params().get('Schedule')
def set_Schedule(self, Schedule): # String
self.add_query_param('Schedule', Schedule)
def get_OtsDetail(self): # Struct
return self.get_body_params().get('OtsDetail')
def set_OtsDetail(self, OtsDetail): # Struct
self.add_body_params("OtsDetail", json.dumps(OtsDetail))
def get_SpeedLimit(self): # String
return self.get_query_params().get('SpeedLimit')
def set_SpeedLimit(self, SpeedLimit): # String
self.add_query_param('SpeedLimit', SpeedLimit)
def get_PlanId(self): # String
return self.get_query_params().get('PlanId')
def set_PlanId(self, PlanId): # String
self.add_query_param('PlanId', PlanId)
def get_Detail(self): # String
return self.get_query_params().get('Detail')
def set_Detail(self, Detail): # String
self.add_query_param('Detail', Detail)
def get_UpdatePaths(self): # Boolean
return self.get_query_params().get('UpdatePaths')
def set_UpdatePaths(self, UpdatePaths): # Boolean
self.add_query_param('UpdatePaths', UpdatePaths) | null |
181 | from __future__ import annotations
import logging
import os
from xia2.Decorators.DecoratorFactory import DecoratorFactory
from xia2.Driver.DriverFactory import DriverFactory
from xia2.Handlers.Phil import PhilIndex
from xia2.lib.bits import transpose_loggraph
from xia2.Wrappers.CCP4.Ctruncate import Ctruncate
from xia2.Wrappers.XIA.FrenchWilson import FrenchWilson
logger = logging.getLogger("xia2.Wrappers.CCP4.Truncate")
def METHOD_NAME(DriverType=None):
"""A factory for TruncateWrapper classes."""
if PhilIndex.params.ccp4.truncate.program == "ctruncate":
return Ctruncate(DriverType)
elif PhilIndex.params.ccp4.truncate.program == "cctbx":
return FrenchWilson(DriverType)
DriverInstance = DriverFactory.Driver(DriverType)
CCP4DriverInstance = DecoratorFactory.Decorate(DriverInstance, "ccp4")
class TruncateWrapper(CCP4DriverInstance.__class__):
"""A wrapper for Truncate, using the CCP4-ified Driver."""
def __init__(self):
# generic things
CCP4DriverInstance.__class__.__init__(self)
self.set_executable(os.path.join(os.environ.get("CBIN", ""), "truncate"))
self._anomalous = False
self._nres = 0
# should we do wilson scaling?
self._wilson = True
self._b_factor = 0.0
self._moments = None
self._wilson_fit_grad = 0.0
self._wilson_fit_grad_sd = 0.0
self._wilson_fit_m = 0.0
self._wilson_fit_m_sd = 0.0
self._wilson_fit_range = None
# numbers of reflections in and out, and number of absences
# counted
self._nref_in = 0
self._nref_out = 0
self._nabsent = 0
self._xmlout = None
def set_anomalous(self, anomalous):
self._anomalous = anomalous
def set_wilson(self, wilson):
"""Set the use of Wilson scaling - if you set this to False
Wilson scaling will be switched off..."""
self._wilson = wilson
def get_xmlout(self):
return self._xmlout
def truncate(self):
"""Actually perform the truncation procedure."""
self.check_hklin()
self.check_hklout()
self.start()
if self._anomalous:
self.input("anomalous yes")
else:
self.input("anomalous no")
if self._nres:
self.input("nres %d" % self._nres)
if not self._wilson:
self.input("scale 1")
self.close_wait()
try:
self.check_for_errors()
self.check_ccp4_errors()
except RuntimeError:
try:
os.remove(self.get_hklout())
except Exception:
pass
raise RuntimeError("truncate failure")
# parse the output for interesting things, including the
# numbers of reflections in and out (isn't that a standard CCP4
# report?) and the number of absent reflections.
self._nref_in, self._nref_out = self.read_nref_hklin_hklout(
self.get_all_output()
)
# FIXME guess I should be reading this properly...
self._nabsent = self._nref_in - self._nref_out
for line in self.get_all_output():
if "Least squares straight line gives" in line:
list = line.replace("=", " ").split()
if "***" not in list[6]:
self._b_factor = float(list[6])
else:
logger.debug("no B factor available")
if "LSQ Line Gradient" in line:
self._wilson_fit_grad = float(line.split()[-1])
if self._wilson_fit_grad > 0:
logger.debug("Positive gradient but not much wilson plot")
if "Uncertainty in Gradient" in line:
self._wilson_fit_grad_sd = float(line.split()[-1])
if "X Intercept" in line:
self._wilson_fit_m = float(line.split()[-1])
if "Uncertainty in Intercept" in line:
self._wilson_fit_m_sd = float(line.split()[-1])
if "Resolution range" in line:
self._wilson_fit_range = list(map(float, line.split()[-2:]))
results = self.parse_ccp4_loggraph()
moments = transpose_loggraph(
results["Acentric Moments of E for k = 1,3,4,6,8"]
)
# keys we want in this are "Resln_Range" "1/resol^2" and
# MomentZ2. The last of these should be around two, but is
# likely to be a little different to this.
self._moments = moments
def get_b_factor(self):
return self._b_factor
def get_wilson_fit(self):
return (
self._wilson_fit_grad,
self._wilson_fit_grad_sd,
self._wilson_fit_m,
self._wilson_fit_m_sd,
)
def get_wilson_fit_range(self):
return self._wilson_fit_range
def get_moments(self):
return self._moments
def get_nref_in(self):
return self._nref_in
def get_nref_out(self):
return self._nref_out
def get_nabsent(self):
return self._nabsent
def read_nref_hklin_hklout(self, records):
"""Look to see how many reflections came in through HKLIN, and
how many went out again in HKLOUT."""
nref_in = 0
nref_out = 0
current_logical = None
for record in records:
if "Logical Name" in record:
current_logical = record.split()[2]
assert current_logical in ["HKLIN", "HKLOUT", "SYMINFO"]
if "Number of Reflections" in record:
if current_logical == "HKLIN":
nref_in = int(record.split()[-1])
elif current_logical == "HKLOUT":
nref_out = int(record.split()[-1])
return nref_in, nref_out
return TruncateWrapper() | null |
182 | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Wallet encryption"""
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
assert_greater_than,
assert_greater_than_or_equal,
)
class WalletEncryptionTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def METHOD_NAME(self):
self.skip_if_no_wallet()
def run_test(self):
passphrase = "WalletPassphrase"
passphrase2 = "SecondWalletPassphrase"
# Make sure the wallet isn't encrypted first
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
assert_equal(privkey[:1], "c")
assert_equal(len(privkey), 52)
assert_raises_rpc_error(-15, "Error: running with an unencrypted wallet, but walletpassphrase was called", self.nodes[0].walletpassphrase, 'ff', 1)
assert_raises_rpc_error(-15, "Error: running with an unencrypted wallet, but walletpassphrasechange was called.", self.nodes[0].walletpassphrasechange, 'ff', 'ff')
# Encrypt the wallet
assert_raises_rpc_error(-8, "passphrase can not be empty", self.nodes[0].encryptwallet, '')
self.nodes[0].encryptwallet(passphrase)
# Test that the wallet is encrypted
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
assert_raises_rpc_error(-15, "Error: running with an encrypted wallet, but encryptwallet was called.", self.nodes[0].encryptwallet, 'ff')
assert_raises_rpc_error(-8, "passphrase can not be empty", self.nodes[0].walletpassphrase, '', 1)
assert_raises_rpc_error(-8, "passphrase can not be empty", self.nodes[0].walletpassphrasechange, '', 'ff')
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
# Check that the timeout is right
time.sleep(2)
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wrong passphrase
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
# Test timeout bounds
assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10)
# Check the timeout
# Check a time less than the limit
MAX_VALUE = 100000000
expected_time = int(time.time()) + MAX_VALUE - 600
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE - 600)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
# Check a time greater than the limit
expected_time = int(time.time()) + MAX_VALUE - 1
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE + 1000)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
if __name__ == '__main__':
WalletEncryptionTest().main() | null |
183 | """
Author : Dhruv B Kakadiya
"""
import pygame as pg
from .statics import *
from .pieces import *
# checker board creation
class checker_board:
def __init__(self):
self.board = []
self.selected = None
self.black_l = self.white_l = 12
self.black_k = self.white_k = 0
self.METHOD_NAME()
# to design the board
def draw_cubes(self, window):
window.fill(green)
for row in range(rows):
for col in range(row % 2, cols, 2):
pg.draw.rect(
window, yellow, (row * sq_size, col * sq_size, sq_size, sq_size)
)
def move(self, piece, row, col):
self.board[piece.row][piece.col], self.board[row][col] = (
self.board[row][col],
self.board[piece.row][piece.col],
)
piece.move(row, col)
if row == rows - 1 or row == 0:
piece.make_king()
if piece.color == white:
self.white_k += 1
else:
self.black_k += 1
# to get piece whatever they want
def get_piece(self, row, col):
return self.board[row][col]
def METHOD_NAME(self):
for row in range(rows):
self.board.append([])
for col in range(cols):
if col % 2 == ((row + 1) % 2):
if row < 3:
self.board[row].append(pieces(row, col, white))
elif row > 4:
self.board[row].append(pieces(row, col, black))
else:
self.board[row].append(0)
else:
self.board[row].append(0)
def draw(self, window):
self.draw_cubes(window)
for row in range(rows):
for col in range(cols):
piece = self.board[row][col]
if piece != 0:
piece.draw(window)
def get_valid_moves(self, piece):
moves = {}
l = piece.col - 1
r = piece.col + 1
row = piece.row
if piece.color == black or piece.king:
moves.update(
self._traverse_l(row - 1, max(row - 3, -1), -1, piece.color, l)
)
moves.update(
self._traverse_r(row - 1, max(row - 3, -1), -1, piece.color, r)
)
if piece.color == white or piece.king:
moves.update(
self._traverse_l(row + 1, min(row + 3, rows), 1, piece.color, l)
)
moves.update(
self._traverse_r(row + 1, min(row + 3, rows), 1, piece.color, r)
)
return moves
def remove(self, pieces):
for piece in pieces:
self.board[piece.row][piece.col] = 0
if piece != 0:
if piece.color == black:
self.black_l -= 1
else:
self.white_l -= 1
def winner(self):
if self.black_l <= 0:
return white
elif self.white_l <= 0:
return black
return None
# Traversal Left
def _traverse_l(self, start, stop, step, color, l, skip=[]):
moves = {}
last = []
for r in range(start, stop, step):
if l < 0:
break
current = self.board[r][l]
if current == 0:
if skip and not last:
break
elif skip:
moves[(r, l)] = last + skip
else:
moves[(r, l)] = last
if last:
if step == -1:
row = max(r - 3, 0)
else:
row = min(r + 3, rows)
moves.update(
self._traverse_l(r + step, row, step, color, l - 1, skip=last)
)
moves.update(
self._traverse_r(r + step, row, step, color, l + 1, skip=last)
)
break
elif current.color == color:
break
else:
last = [current]
l -= 1
return moves
# Traversal Right
def _traverse_r(self, start, stop, step, color, right, skip=[]):
moves = {}
last = []
for r in range(start, stop, step):
if right >= cols:
break
current = self.board[r][right]
if current == 0:
if skip and not last:
break
elif skip:
moves[(r, right)] = last + skip
else:
moves[(r, right)] = last
if last:
if step == -1:
row = max(r - 3, 0)
else:
row = min(r + 3, rows)
moves.update(
self._traverse_l(
r + step, row, step, color, right - 1, skip=last
)
)
moves.update(
self._traverse_r(
r + step, row, step, color, right + 1, skip=last
)
)
break
elif current.color == color:
break
else:
last = [current]
right += 1
return moves | null |
184 | # GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# character generation, racial enemy (GUICG15)
# this file can't handle leveling up over two boundaries (more than 1 new
# racial enemy), but that will only affect testers (6-10 level gain or more)
import GemRB
import GUICommon
import CharOverview
import CommonTables
from GUIDefines import *
from ie_stats import IE_CLASS, IE_LEVELRANGER, IE_HATEDRACE, IE_HATEDRACE2
RaceWindow = 0
TextAreaControl = 0
DoneButton = 0
RacialEnemyTable = 0
RaceCount = 0
TopIndex = 0
CharGen = 0
RacialEnemies = [255] * 9
RacialStats = [0] * 9
def DisplayRaces():
global TopIndex
TopIndex=GemRB.GetVar("TopIndex")
for i in range(11):
Button = RaceWindow.GetControl(i+22)
Val = RacialEnemyTable.GetValue(i+TopIndex,0)
raceIDS = RacialEnemyTable.GetValue (i+TopIndex, 1)
if Val == 0:
Button.SetText("")
Button.SetState(IE_GUI_BUTTON_DISABLED)
# also disable already picked ones
elif raceIDS in RacialEnemies:
Button.SetText (Val)
Button.SetState (IE_GUI_BUTTON_DISABLED)
else:
Button.SetText(Val)
Button.SetState(IE_GUI_BUTTON_ENABLED)
Button.OnPress (RacePress)
Button.SetVarAssoc ("HatedRace", raceIDS)
return
def METHOD_NAME():
OpenEnemyWindow (1)
def OpenEnemyWindow(chargen=0):
global RaceWindow, TextAreaControl, DoneButton
global RacialEnemyTable, RaceCount, TopIndex
global CharGen
CharGen = chargen
rankDiff = 0
if chargen:
RaceWindow = GemRB.LoadWindow (15, "GUICG")
CharOverview.PositionCharGenWin (RaceWindow)
pc = GemRB.GetVar ("Slot")
Class = GemRB.GetPlayerStat (pc, IE_CLASS)
else:
RaceWindow = GemRB.LoadWindow (16, "GUIREC")
pc = GemRB.GameGetSelectedPCSingle ()
Class = GemRB.GetVar ("LUClass") + 1
LevelDiff = GemRB.GetVar ("LevelDiff")
rangerLevel = GemRB.GetPlayerStat (pc, IE_LEVELRANGER)
rankDiff = (rangerLevel+LevelDiff)//5 - rangerLevel//5
ClassName = GUICommon.GetClassRowName (Class, "class")
TableName = CommonTables.ClassSkills.GetValue(ClassName, "HATERACE")
if TableName == "*":
print("Skipping Racial enemies: chosen class doesn't know the concept!")
NextPress (0)
return
# at this point it is already guaranteed that we have a ranger
# but they get new racial enemies only at level 5 and each 5th level
if not chargen and rankDiff == 0:
print("Skipping Racial enemies: iwd2 gives them every 5th level!")
NextPress (0)
return
RacialEnemyTable = GemRB.LoadTable(TableName)
RaceCount = RacialEnemyTable.GetRowCount()-11
if RaceCount<0:
RaceCount=0
GenerateHateLists (pc)
for i in range(11):
Button = RaceWindow.GetControl(i+22)
Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON,OP_OR)
if chargen:
BackButton = RaceWindow.GetControl (10)
BackButton.SetText (15416)
BackButton.MakeEscape()
BackButton.OnPress (BackPress)
else:
RaceWindow.DeleteControl (10)
DoneButton = RaceWindow.GetControl(11)
DoneButton.SetText(11973)
DoneButton.MakeDefault()
DoneButton.SetState(IE_GUI_BUTTON_DISABLED)
TextAreaControl = RaceWindow.GetControl(8)
TextAreaControl.SetText(17256)
TopIndex = 0
GemRB.SetVar("TopIndex",0)
ScrollBarControl = RaceWindow.GetControl(1)
ScrollBarControl.SetVarAssoc("TopIndex",RaceCount)
ScrollBarControl.OnChange (DisplayRaces)
DoneButton.OnPress (NextPress)
if not chargen:
RaceWindow.ShowModal (MODAL_SHADOW_GRAY)
else:
RaceWindow.Focus()
DisplayRaces()
return
def GenerateHateLists (pc):
global RacialEnemies, RacialStats
RacialEnemies[0] = GemRB.GetPlayerStat (pc, IE_HATEDRACE, 1)
RacialStats[0] = IE_HATEDRACE
for i in range(1, len(RacialEnemies)-1):
RacialStats[i] = IE_HATEDRACE2 + i-1
RacialEnemies[i] = GemRB.GetPlayerStat (pc, RacialStats[i], 1) % 255
def RacePress():
Race = GemRB.GetVar("HatedRace")
Row = RacialEnemyTable.FindValue(1, Race)
TextAreaControl.SetText(RacialEnemyTable.GetValue(Row, 2) )
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
return
def BackPress():
if RaceWindow:
RaceWindow.Close ()
GemRB.SetVar("HatedRace",0) #scrapping the race value
GemRB.SetNextScript("CharGen6")
return
def NextPress(save=1):
if RaceWindow:
RaceWindow.Close ()
if CharGen:
GemRB.SetNextScript("Skills")
return
if save:
# find the index past the last set stat
last = RacialEnemies.index (0)
# save, but note that racial enemies are stored in many stats
pc = GemRB.GameGetSelectedPCSingle ()
newHated = GemRB.GetVar ("HatedRace")
GemRB.SetPlayerStat (pc, RacialStats[last], newHated)
# open up the next levelup window
import Skills
Skills.OpenSkillsWindow (0)
return | null |
185 | import unittest
from decimal import Decimal
from pathlib import Path
from typing import Dict
from unittest.mock import patch
import yaml
from hummingbot.client.config.config_helpers import ClientConfigAdapter
from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.settings import ConnectorSetting, ConnectorType
from hummingbot.core.data_type.trade_fee import TradeFeeSchema
from hummingbot.strategy.hedge.hedge_config_map_pydantic import HedgeConfigMap, MarketConfigMap
class HedgeConfigMapPydanticTest(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.base_asset = "BTC"
cls.quote_asset = "USDT"
cls.trading_pair = f"{cls.base_asset}-{cls.quote_asset}"
cls.hedge_connector = "bybit_perpetual_testnet"
cls.connector = "binance"
@patch("hummingbot.client.settings.AllConnectorSettings.get_exchange_names")
@patch("hummingbot.client.settings.AllConnectorSettings.get_connector_settings")
def setUp(self, get_connector_settings_mock, get_exchange_names_mock) -> None:
super().setUp()
config_settings = self.get_default_map()
get_exchange_names_mock.return_value = set(self.METHOD_NAME().keys())
get_connector_settings_mock.return_value = self.METHOD_NAME()
self.config_map = ClientConfigAdapter(HedgeConfigMap(**config_settings))
def METHOD_NAME(self):
conf_var_connector_maker = ConfigVar(key='mock_paper_exchange', prompt="")
conf_var_connector_maker.value = 'mock_paper_exchange'
settings = {
"mock_paper_exchange": ConnectorSetting(
name='mock_paper_exchange',
type=ConnectorType.Exchange,
example_pair='BTC-ETH',
centralised=True,
use_ethereum_wallet=False,
trade_fee_schema=TradeFeeSchema(
percent_fee_token=None,
maker_percent_fee_decimal=Decimal('0.001'),
taker_percent_fee_decimal=Decimal('0.001'),
buy_percent_fee_deducted_from_returns=False,
maker_fixed_fees=[],
taker_fixed_fees=[]),
config_keys={
'connector': conf_var_connector_maker
},
is_sub_domain=False,
parent_name=None,
domain_parameter=None,
use_eth_gas_lookup=False)
}
return settings
def get_default_map(self) -> Dict[str, str]:
config_settings = {
"hedge_connector": self.hedge_connector,
"hedge_markets": self.trading_pair,
"connector_0": {
"connector": self.connector,
"markets": [self.trading_pair],
"offsets": [0]},
"connector_1": 'n',
"connector_2": 'n',
"connector_3": 'n',
"connector_4": 'n',
}
return config_settings
def test_hedge_markets_prompt(self):
self.config_map.hedge_connector = self.connector
self.config_map.hedge_markets = self.trading_pair
self.config_map.value_mode = True
self.assertEqual(
self.config_map.hedge_markets_prompt(self.config_map)[:12],
"Value mode: ",
)
self.config_map.value_mode = False
self.assertEqual(
self.config_map.hedge_markets_prompt(self.config_map)[:13],
"Amount mode: "
)
def test_hedge_offsets_prompt(self):
self.config_map.hedge_connector = self.connector
self.config_map.hedge_markets = self.trading_pair
self.config_map.value_mode = True
base = self.trading_pair.split("-")[0]
self.assertEqual(
self.config_map.hedge_offsets_prompt(self.config_map),
f"Enter the offset for {base}. (Example: 0.1 = +0.1{base} used in calculation of hedged value)"
)
self.config_map.value_mode = False
self.assertEqual(
self.config_map.hedge_offsets_prompt(self.config_map),
"Enter the offsets to use to hedge the markets comma seperated. "
"(Example: 0.1,-0.2 = +0.1BTC,-0.2ETH, 0LTC will be offset for the exchange amount "
"if markets is BTC-USDT,ETH-USDT,LTC-USDT)"
)
def test_trading_pair_prompt(self):
connector_map = MarketConfigMap(
connector=self.connector,
markets = self.trading_pair,
offsets = [Decimal("0")]
)
connector_map.trading_pair_prompt(connector_map)
def test_load_configs_from_yaml(self):
cur_dir = Path(__file__).parent
f_path = cur_dir / "test_config.yml"
with open(f_path, "r") as file:
data = yaml.safe_load(file)
loaded_config_map = ClientConfigAdapter(HedgeConfigMap(**data))
self.assertIsInstance(loaded_config_map, ClientConfigAdapter) | null |
186 | #!/usr/bin/env conda-execute
# conda execute
# env:
# - python
# - click
# - jinja2
# - requests
# - ruamel.yaml
# - conda-smithy
# - pygithub
# - fuzzywuzzy
# channels:
# - conda-forge
# run_with: python
import click
import conda_smithy.feedstocks as feedstocks
import jinja2
import json
import requests
import ruamel.yaml
from ruamel.yaml.scanner import ScannerError
import os
from github import Github
import conda_smithy.github as smithy_github
from fuzzywuzzy import process
# patch over differences between PY2 and PY3
try:
text_type = unicode
except NameError:
text_type = str
class NullUndefined(jinja2.Undefined):
def __unicode__(self):
return text_type(self._undefined_name)
def __getattr__(self, name):
return text_type('{}.{}'.format(self, name))
def __getitem__(self, name):
return '{}["{}"]'.format(self, name)
env = jinja2.Environment(undefined=NullUndefined)
@click.group()
def cli():
"""Match package names in pr against existing feedstocks.
Tools to match package names in from all the recipes in a pr against
the existing conda-forge feedstocks.
"""
pass
@cli.command('build-feedstock-index', help='create json index of feedstocks.')
@click.argument('filename')
@click.option('--gh-org', default='conda-forge', help='Set GitHub organization name.')
def build_feedstock_index(filename, gh_org='conda-forge'):
"Iterate over feedstocks and return dict of pkg-name:feedstock"
pkg_index = {}
for repo in feedstocks.feedstock_repos(gh_org):
try:
meta = repo.get_file_contents(path='recipe/meta.yaml').decoded_content
pkg_name = _extract_package_name(meta)
except (AttributeError, KeyError, ScannerError) as err:
# unable to parse the bob.io.image-feedstock
print('Unable to parse meta.yaml for {}'.format(repo.url))
print('guessing pkg name from feedstock url')
print('Traceback: \n', err)
pkg_name = repo.url.split('/')[-1].split('-feedstock')[0].lower()
pkg_index[pkg_name] = repo.full_name
with open(filename, 'w') as f:
json.dump(pkg_index, f)
print('feedstocks index written to {}'.format(filename))
@cli.command('build-pr-index', help='create json index of pull requests.')
@click.argument('filename')
@click.option('--gh-org', default='conda-forge', help='Set GitHub organization name.')
@click.option('--staged-recipes-repo', default='staged-recipes', help='Set staged recipe repo.')
def build_pr_index(filename, gh_org='conda-forge', staged_recipes_repo='staged-recipes'):
"Iterate over open pull requests in staged_recipes and return dict of pr:pkg-name"
token = smithy_github.gh_token()
gh = Github(token)
org = gh.get_organization(gh_org)
repo = org.get_repo(staged_recipes_repo)
pkg_index = {}
for pr in list(repo.get_pulls()):
for f in pr.get_files():
if f.filename.lower().endswith('meta.yaml'):
try:
meta = requests.get(f.raw_url).content
pkg_name = _extract_package_name(meta)
idx = 'pr {} ({}) /{}'.format(pr.number, pkg_name, f.filename)
pkg_index[idx] = pkg_name
except (AttributeError, ScannerError) as err:
pkg_index[idx] = None
print('Unable to parse meta.yaml for pr #{}'.format(pr.number))
print('setting pkg_name to None')
print('Traceback: \n', err)
with open(filename, 'w') as f:
json.dump(pkg_index, f)
print('pull requests index written to {}'.format(filename))
@cli.command('compare-indices', help='compare pr index to feedstock index.')
@click.argument('pr-index')
@click.argument('feedstock-index')
@click.option('--threshold', default=85, help='only return matches with scores above threshold')
@click.option('--limit', default=2, help='maximum number of matches')
def compare_indices(pr_index, feedstock_index, threshold, limit):
pr_index = json.load(open(pr_index))
feedstock_index = json.load(open(feedstock_index))
matches = {}
for pr, name in list(pr_index.items()):
m = METHOD_NAME(name, feedstock_index, threshold=threshold, limit=limit)
if len(m) > 0:
matches[pr] = m
_format_output(matches, threshold, limit)
@cli.command('check-pr', help='check pr against feedstock index.')
@click.argument('pr', type=int)
@click.argument('feedstock-index')
@click.option('--threshold', default=85, help='only return matches with scores above threshold')
@click.option('--limit', default=2, help='maximum number of matches')
@click.option('--gh-org', default='conda-forge', help='Set GitHub organization name.')
@click.option('--staged-recipes-repo', default='staged-recipes', help='Set staged recipe repo.')
def check_pr(pr, feedstock_index, threshold, limit, gh_org, staged_recipes_repo):
feedstock_index = json.load(open(feedstock_index))
token = smithy_github.gh_token()
gh = Github(token)
org = gh.get_organization(gh_org)
repo = org.get_repo(staged_recipes_repo)
pr = repo.get_pull(pr)
packages = {}
for f in pr.get_files():
if f.filename.lower().endswith('meta.yaml'):
try:
meta = requests.get(f.raw_url).content
pkg_name = _extract_package_name(meta)
idx = 'pr {} ({}) /{}'.format(pr.number, pkg_name, f.filename)
packages[idx] = pkg_name
except AttributeError:
packages[idx] = None
matches = {}
for k, pkg_name in packages.items():
matches[k] = METHOD_NAME(pkg_name, feedstock_index, threshold, limit)
_format_output(matches, threshold, limit)
@cli.command('check-pkg', help='check pkg name against feedstock index.')
@click.argument('name')
@click.argument('feedstock-index')
@click.option('--threshold', default=85, help='only return matches with scores above threshold')
@click.option('--limit', default=2, help='maximum number of matches')
def check_pkg(name, feedstock_index, threshold, limit):
feedstock_index = json.load(open(feedstock_index))
matches = METHOD_NAME(name, feedstock_index, threshold, limit)
_format_output({name: matches}, threshold, limit)
def _format_output(matches, threshold, limit):
vals = (threshold, limit)
print('-------------------------------------------')
print('match(es) found using threshold={}, limit={}'.format(*vals))
print('-------------------------------------------')
for k, repo in sorted(matches.items()):
for recipe in repo:
if len(recipe) > 0:
print('{} matches --> pkg={}, score={}, feedstock={}'.format(k, *recipe))
def METHOD_NAME(name, feedstock_index, threshold, limit):
choices = list(feedstock_index.keys())
matches = process.extract(name, choices, limit=limit)
result = []
for match in matches:
pkg, score = match
if score >= threshold:
result.append((pkg, score, feedstock_index[pkg]))
return result
def _extract_package_name(meta):
"""Extract package name from meta.yaml"""
content = env.from_string(meta.decode('utf8')).render(os=os)
meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)
return meta['package']['name'].lower()
if __name__ == '__main__':
cli() | null |
187 | import gradio as gr
import numpy as np
import os
import sentencepiece as spm
import time
import ctranslate2
from onmt.utils.logging import init_logger
from onmt.translate.translator import build_translator
from onmt.inputters.text_utils import textbatch_to_tensor
from onmt.inputters.inputter import IterOnDevice
from onmt.transforms import get_transforms_cls, TransformPipe
from onmt.transforms import make_transforms
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
from onmt.utils.misc import use_gpu, set_random_seed
inf_type = "-py"
# inf_type = "ct2"
CACHE = {}
tokenizer_dir = "llama"
max_context_length = 4096
def make_prompt(chat_history):
task_description = "Below is an instruction that describes a task. Write a response that appropriately completes the request.⦅newline⦆⦅newline⦆" # noqa:E501
sp = CACHE["tokenizer"]
nb_user_tokens = []
nb_bot_tokens = [0]
parsed_instructions = []
parsed_responses = []
def parse_instruction(text):
parsed_text = f"### Instruction:⦅newline⦆ {text} ⦅newline⦆⦅newline⦆"
parsed_text_sp = parsed_text.replace("⦅newline⦆", "\n")
tokens = sp.encode(parsed_text_sp, out_type=str)
nb_user_tokens.append(len(tokens))
return parsed_text
def parse_response(text):
parsed_text = f"### Response:⦅newline⦆{text}"
tokens = sp.encode(parsed_text, out_type=str)
nb_bot_tokens.append(len(tokens))
return parsed_text
out = [task_description]
for _user_message, _bot_message in chat_history:
parsed_instructions.append(parse_instruction(_user_message))
if _bot_message is not None:
parsed_responses.append(parse_response(_bot_message))
else:
parsed_responses.append("### Response:⦅newline⦆")
keep_indices = prune_history(
nb_user_tokens, nb_bot_tokens, max_context_length - len(task_description)
)
for i in keep_indices:
out.append(parsed_instructions[i])
out.append(parsed_responses[i])
prompt = "".join(out)
return prompt
def prune_history(x, y, L):
reversed_indices = list(range(len(x)))[::-1]
keep_indices = []
_x, _y = x[::-1], y[::-1]
z = [sum(i) for i in zip(_x, _y)]
for i, n in enumerate(np.cumsum(z)):
if n < L:
keep_indices.append(reversed_indices[i])
keep_indices.reverse()
return keep_indices
######################
# Inference with CT2 #
######################
model_dir = "finetuned_llama7B/llama7B-vicuna-onmt_step_4000.concat_CT2"
def load_models(model_dir, tokenizer_dir):
if CACHE.get("generator", None) is None:
CACHE["generator"] = ctranslate2.Generator(model_dir, device="cuda")
CACHE["tokenizer"] = spm.SentencePieceProcessor(
os.path.join(tokenizer_dir, "tokenizer.model")
)
def generate_words(prompt, add_bos=True):
generator, sp = CACHE["generator"], CACHE["tokenizer"]
prompt_tokens = sp.encode(prompt, out_type=str)
if add_bos:
prompt_tokens.insert(0, "<s>")
step_results = generator.generate_tokens(
prompt_tokens, sampling_temperature=0.1, sampling_topk=40, max_length=512
)
output_ids = []
for step_result in step_results:
is_new_word = step_result.token.startswith("▁")
if is_new_word and output_ids:
yield " " + sp.decode(output_ids)
output_ids = []
output_ids.append(step_result.token_id)
if output_ids:
yield " " + sp.decode(output_ids)
def make_bot_message_ct2(prompt):
prompt = prompt.replace("⦅newline⦆", "\n")
words = []
for _out in generate_words(prompt):
words.append(_out)
bot_message = "".join(words[:-1])
return bot_message
######################
# Inference with -py #
######################
ckpt_path = "finetuned_llama7B/llama7B-vicuna-onmt_step_4000.concat_added_key.pt"
# ckpt_path = "finetuned_llama7B/llama7B-vicuna-onmt_step_4000.pt"
translation_opts_config = "translate_opts.yaml"
def _get_parser():
parser = ArgumentParser(description="translate.py")
opts.config_opts(parser)
opts.translate_opts(parser, dynamic=True)
return parser
def METHOD_NAME(opt):
if CACHE.get("translator", None) is None:
ArgumentParser.validate_translate_opts(opt)
ArgumentParser._get_all_transform_translate(opt)
ArgumentParser._validate_transforms_opts(opt)
ArgumentParser.validate_translate_opts_dynamic(opt)
logger = init_logger(opt.log_file)
set_random_seed(opt.seed, use_gpu(opt))
CACHE["translator"] = build_translator(opt, logger=logger, report_score=True)
CACHE["tokenizer"] = spm.SentencePieceProcessor(
os.path.join(tokenizer_dir, "tokenizer.model")
)
transforms_cls = get_transforms_cls(opt._all_transform)
transforms = make_transforms(opt, transforms_cls, CACHE["translator"].vocabs)
data_transform = [
transforms[name] for name in opt.transforms if name in transforms
]
CACHE["transform"] = TransformPipe.build_from(data_transform)
CACHE["device"] = (
CACHE["translator"]._dev.index if CACHE["translator"]._use_cuda else -1
)
def make_bot_message_py(prompt):
# we receive a text box content
# might be good to split also based on full period (later)
prompt = prompt.replace("\n", "⦅newline⦆")
batch = []
ex = {"src": prompt.split(" "), "tgt": ""}
batch.append((ex, None, "infer"))
trf_batch = CACHE["transform"].batch_apply(
batch, is_train=False, corpus_name="infer"
)
# we reformat the transformed batch to be numericalized / tensorified
batch = []
for ex, _, cid in trf_batch:
ex["src"] = {"src": " ".join(ex["src"])}
ex["tgt"] = {"tgt": " ".join(ex["tgt"])}
batch.append(ex)
infer_iter = textbatch_to_tensor(CACHE["translator"].vocabs, batch)
infer_iter = IterOnDevice(infer_iter, CACHE["device"])
scores, predictions = CACHE["translator"]._translate(
infer_iter, transform=CACHE["transform"]
)
print("\n".join([predictions[i][0] for i in range(len(predictions))]))
bot_message = "\n".join(sent[0] for sent in predictions)
bot_message = bot_message.replace("⦅newline⦆", "\n")
return bot_message
######
# UI #
######
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox()
submit = gr.Button("Submit")
clear = gr.Button("Clear")
if inf_type == "ct2":
load_models(model_dir, tokenizer_dir)
elif inf_type == "-py":
parser = _get_parser()
base_args = (
["-model", ckpt_path]
+ ["-src", "dummy"]
+ ["-config", translation_opts_config]
)
opt = parser.parse_args(base_args)
METHOD_NAME(opt)
def user(user_message, history):
return "", history + [[user_message, None]]
def bot(history):
prompt = make_prompt(history)
if inf_type == "ct2":
bot_message = make_bot_message_ct2(prompt)
elif inf_type == "-py":
bot_message = make_bot_message_py(prompt)
history[-1][1] = ""
for character in bot_message:
history[-1][1] += character
time.sleep(0)
yield history
submit.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
clear.click(lambda: None, None, chatbot, queue=False)
demo.queue()
demo.launch(server_port=1851, server_name="0.0.0.0")
# What are the 3 best french cities ?
# Which one is better if I like outdoor activities ?
# Which one is better if I like cultural outings?
# What are the best neighborhoods in these 5 cities? | null |
188 | # pylint: disable=redefined-outer-name
# -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
import pytest
import os
import tempfile
import shutil
import copy
from inginious.common.base import directory_compare_from_hash, directory_content_with_hash, hash_file, id_checker, \
load_json_or_yaml, \
write_json_or_yaml
@pytest.fixture()
def init_tmp_dir(request):
""" Create a temporary folder """
dir_path = tempfile.mkdtemp()
yield (dir_path)
""" Some FUT could create content in the prefix """
shutil.rmtree(dir_path)
class TestIdChecker(object):
""" Test the id checker """
def test_id_checker_valid_1(self):
assert id_checker("azertyuiopZERTYUIO65456_5-a") is True
def test_id_checker_invalid_1(self):
assert id_checker("a@a") is False
def test_id_checker_invalid_2(self):
assert id_checker("") is False
def test_id_checker_invalid_3(self):
assert id_checker("test/test") is False
class TestJSONYAMLReaderWriter(object):
""" Test the functions load_json_or_yaml and write_json_or_yaml """
def METHOD_NAME(self, init_tmp_dir):
tmp_dir = init_tmp_dir
with open(os.path.join(tmp_dir, "input.json"), "w") as f:
f.write('{"key1":"data1","key2":{"key3":[1,2]}}')
assert load_json_or_yaml(os.path.join(tmp_dir, "input.json")) == {'key1': 'data1',
'key2': {'key3': [1, 2]}}
def test_json_write(self, init_tmp_dir):
tmp_dir = init_tmp_dir
write_json_or_yaml(os.path.join(tmp_dir, "output.json"), {'key1': 'data1', 'key2': {'key3': [1, 2]}})
assert load_json_or_yaml(os.path.join(tmp_dir, "output.json")) == {'key1': 'data1',
'key2': {'key3': [1, 2]}}
def test_yaml_read(self, init_tmp_dir):
tmp_dir = init_tmp_dir
with open(os.path.join(tmp_dir, "input.yaml"), "w") as f:
f.write("""
key1: data1
key2:
key3:
- 1
- 2
""")
assert load_json_or_yaml(os.path.join(tmp_dir, "input.yaml")) == {'key1': 'data1',
'key2': {'key3': [1, 2]}}
def test_yaml_write(self,init_tmp_dir):
tmp_dir = init_tmp_dir
write_json_or_yaml(os.path.join(tmp_dir, "output.yaml"), {'key1': 'data1', 'key2': {'key3': [1, 2]}})
assert load_json_or_yaml(os.path.join(tmp_dir, "output.yaml")) == {'key1': 'data1',
'key2': {'key3': [1, 2]}}
class TestDirectoryHash(object):
""" Test all the functions that involves file hash """
def test_hash_file(self):
with tempfile.TemporaryFile() as tmp:
tmp.write(b"some random text")
tmp.flush()
tmp.seek(0)
the_hash = hash_file(tmp)
assert the_hash == "07671a038c0eb43723d421693b073c3b"
def test_directory_content_with_hash(self, init_tmp_dir):
temp_dir = init_tmp_dir
test_dir = os.path.join(temp_dir, "test1")
# Create data
os.mkdir(test_dir)
os.mkdir(os.path.join(test_dir, "subdir"))
goal = {}
with open(os.path.join(test_dir, "file1"), "w") as f:
f.write("random text 1")
goal["file1"] = ("d7e62e68f60f6974309b263192d5fea2", os.stat(os.path.join(test_dir, "file1")).st_mode)
with open(os.path.join(test_dir, "file2"), "w") as f:
f.write("random text 2")
goal["file2"] = ("5ae848320fda7796dc2f3a1a68300e07", os.stat(os.path.join(test_dir, "file2")).st_mode)
with open(os.path.join(test_dir, "subdir", "file3"), "w") as f:
f.write("random text 3")
goal["subdir/file3"] = (
"312aa75e0816015cdb5ef1989de7bf3f", os.stat(os.path.join(test_dir, "subdir", "file3")).st_mode)
# Test the function
assert directory_content_with_hash(test_dir) == goal
def test_directory_compare_from_hash(self, init_tmp_dir):
temp_dir = init_tmp_dir
test_dir = os.path.join(temp_dir, "test2")
# Create data
os.mkdir(test_dir)
os.mkdir(os.path.join(test_dir, "subdir"))
with open(os.path.join(test_dir, "file1"), "w") as f:
f.write("random text 1")
with open(os.path.join(test_dir, "file2"), "w") as f:
f.write("random text 2")
with open(os.path.join(test_dir, "subdir", "file3"), "w") as f:
f.write("random text 3")
with open(os.path.join(test_dir, "file4"), "w") as f:
f.write("random text 4")
with open(os.path.join(test_dir, "file5"), "w") as f:
f.write("random text 5")
with open(os.path.join(test_dir, "file6"), "w") as f:
f.write("random text 6")
l1 = directory_content_with_hash(test_dir)
l2 = copy.deepcopy(l1)
# Perturb the data
l2["file1"] = (l2["file1"][0], 0)
l2["file2"] = ("not a valid hash", l2["file2"])
l2["file4"] = ("not a valid hash", 0)
del l2["file5"]
# Compare and test
to_update, to_delete = directory_compare_from_hash(l2, l1)
assert set(to_update) == set(["file1", "file2", "file4"])
assert set(to_delete) == set(["file5"]) | null |
189 | import pytest
from conftest import SYSTEM_RELEASE_ENV
from envparse import env
def assign_packages(packages=None):
# If nothing was passed down to packages, set it to an empty list
if not packages:
packages = []
ol_7_pkgs = ["oracle-release-el7", "usermode", "rhn-setup", "oracle-logos"]
ol_8_pkgs = ["oraclelinux-release-el8", "usermode", "rhn-setup", "oracle-logos"]
cos_7_pkgs = ["centos-release", "usermode", "rhn-setup", "python-syspurpose", "centos-logos"]
cos_8_pkgs = ["centos-linux-release", "usermode", "rhn-setup", "python3-syspurpose", "centos-logos"]
# The packages 'python-syspurpose' and 'python3-syspurpose' were removed in Oracle Linux 7.9
# and Oracle Linux 8.2 respectively.
if "centos-7" in SYSTEM_RELEASE_ENV:
packages += cos_7_pkgs
elif "centos-8" in SYSTEM_RELEASE_ENV:
packages += cos_8_pkgs
elif "oracle-7" in SYSTEM_RELEASE_ENV:
packages += ol_7_pkgs
elif "oracle-8" in SYSTEM_RELEASE_ENV:
packages += ol_8_pkgs
return packages
def install_packages(shell, packages):
"""
Helper function.
Install packages that cause trouble/needs to be checked during/after rollback.
Some packages were removed during the conversion and were not backed up/installed back when the rollback occurred.
"""
packages_to_remove_at_cleanup = []
for package in packages:
if f"{package} is not installed" in shell(f"rpm -q {package}").output:
packages_to_remove_at_cleanup.append(package)
# Run this only once as package managers take too long to figure out
# dependencies and install the packages.
print(f"PREP: Setting up {','.join(packages_to_remove_at_cleanup)}")
assert shell(f"yum install -y {' '.join(packages_to_remove_at_cleanup)}").returncode == 0
return packages_to_remove_at_cleanup
def METHOD_NAME(shell, packages):
"""
Helper function.
Remove additionally installed packages.
"""
if not packages:
return
print(f"CLEAN: Removing {','.join(packages)}")
assert shell(f"yum remove -y {' '.join(packages)}").returncode == 0
def is_installed_post_rollback(shell, packages):
"""
Helper function.
Iterate over list of packages and verify that untracked packages remain installed after the rollback.
"""
for package in packages:
print(f"CHECK: Checking for {package}")
query = shell(f"rpm -q {package}")
assert f"{package} is not installed" not in query.output
def terminate_and_assert_good_rollback(c2r):
"""
Helper function.
Run conversion and terminate it to start the rollback.
"""
# Use 'Ctrl + c' first to check for unexpected behaviour
# of the rollback feature after process termination
c2r.sendcontrol("c")
# Assert the rollback finished all tasks by going through its last task
assert c2r.expect("Rollback: Remove installed RHSM certificate", timeout=120) == 0
assert c2r.exitstatus != 1
@pytest.mark.test_rhsm_cleanup
def test_proper_rhsm_clean_up(shell, convert2rhel):
"""
Verify that the system has been successfully unregistered after the rollback.
Verify that usermode, rhn-setup and os-release packages are not removed.
"""
packages_to_remove_at_cleanup = install_packages(shell, assign_packages())
with convert2rhel(
"--serverurl {} --username {} --password {} --pool {} --debug --no-rpm-va".format(
env.str("RHSM_SERVER_URL"),
env.str("RHSM_USERNAME"),
env.str("RHSM_PASSWORD"),
env.str("RHSM_POOL"),
)
) as c2r:
c2r.expect("Continue with the system conversion?")
c2r.sendline("y")
assert c2r.expect("Successfully attached a subscription") == 0
c2r.sendcontrol("c")
c2r.expect("Calling command 'subscription-manager unregister'")
c2r.expect("System unregistered successfully.")
assert c2r.exitstatus != 0
is_installed_post_rollback(shell, assign_packages())
METHOD_NAME(shell, packages_to_remove_at_cleanup)
@pytest.mark.test_packages_untracked_graceful_rollback
def test_check_untrack_pkgs_graceful(convert2rhel, shell):
"""
Provide c2r with incorrect username and password, so the registration fails and c2r performs rollback.
Primary issue - checking for python/3-syspurpose not being removed.
"""
username = "foo"
password = "bar"
packages_to_remove_at_cleanup = install_packages(shell, assign_packages())
with convert2rhel(f"--debug -y --no-rpm-va --username {username} --password {password}") as c2r:
assert c2r.exitstatus != 0
is_installed_post_rollback(shell, assign_packages())
METHOD_NAME(shell, packages_to_remove_at_cleanup)
@pytest.mark.test_packages_untracked_forced_rollback
def test_check_untrack_pkgs_force(convert2rhel, shell):
"""
Terminate the c2r process forcefully, so the rollback is performed.
Primary issue - verify that python-syspurpose is not removed.
"""
packages_to_remove_at_cleanup = install_packages(shell, assign_packages())
with convert2rhel("--debug -y --no-rpm-va") as c2r:
c2r.expect("Username")
c2r.sendcontrol("c")
assert c2r.exitstatus != 0
is_installed_post_rollback(shell, assign_packages())
METHOD_NAME(shell, packages_to_remove_at_cleanup)
@pytest.mark.test_terminate_on_registration_start
def test_terminate_registration_start(convert2rhel):
"""
Send termination signal immediately after c2r tries the registration.
Verify that c2r goes successfully through the rollback.
"""
with convert2rhel(
"--debug -y --no-rpm-va --serverurl {} --username {} --password {}".format(
env.str("RHSM_SERVER_URL"),
env.str("RHSM_USERNAME"),
env.str("RHSM_PASSWORD"),
),
unregister=True,
) as c2r:
if c2r.expect("Registering the system using subscription-manager") == 0:
terminate_and_assert_good_rollback(c2r)
@pytest.mark.test_terminate_on_registration_success
def test_terminate_registration_success(convert2rhel):
"""
Send termination signal immediately after c2r successfully finishes the registration.
Verify that c2r goes successfully through the rollback.
Verify that the subscription is auto-attached.
"""
with convert2rhel(
"--debug -y --no-rpm-va --serverurl {} --username {} --password {}".format(
env.str("RHSM_SERVER_URL"),
env.str("RHSM_USERNAME"),
env.str("RHSM_PASSWORD"),
),
unregister=True,
) as c2r:
c2r.expect("Registering the system using subscription-manager")
assert c2r.expect("System registration succeeded.", timeout=180) == 0
# Verify auto-attachment of the subscription
assert c2r.expect("Auto-attaching compatible subscriptions to the system ...", timeout=180) == 0
assert c2r.expect("DEBUG - Calling command 'subscription-manager attach --auto'", timeout=180) == 0
if c2r.expect("Status: Subscribed", timeout=180) == 0:
terminate_and_assert_good_rollback(c2r)
@pytest.mark.test_terminate_on_username
def test_terminate_on_username_prompt(convert2rhel):
"""
Send termination signal on the user prompt for username.
Verify that c2r goes successfully through the rollback.
"""
with convert2rhel("--debug -y --no-rpm-va") as c2r:
if c2r.expect("Username:") == 0:
terminate_and_assert_good_rollback(c2r)
@pytest.mark.test_terminate_on_password
def test_terminate_on_password_prompt(convert2rhel):
"""
Send termination signal on the user prompt for password.
Verify that c2r goes successfully through the rollback.
"""
with convert2rhel("--debug -y --no-rpm-va --username {}".format(env.str("RHSM_USERNAME"))) as c2r:
if c2r.expect("Password:") == 0:
terminate_and_assert_good_rollback(c2r) | null |
190 | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function, unicode_literals
#################################################################################################
import time
from datetime import datetime
from ..helper import LazyLogger
#################################################################################################
LOG = LazyLogger(__name__)
#################################################################################################
class Credentials(object):
credentials = None
def __init__(self):
LOG.debug("Credentials initializing...")
def set_credentials(self, credentials):
self.credentials = credentials
def get_credentials(self):
return self.get()
def _ensure(self):
if not self.credentials:
try:
LOG.info(self.credentials)
if not isinstance(self.credentials, dict):
raise ValueError("invalid credentials format")
except Exception as e: # File is either empty or missing
LOG.warning(e)
self.credentials = {}
LOG.debug("credentials initialized with: %s", self.credentials)
self.credentials['Servers'] = self.credentials.setdefault('Servers', [])
def get(self):
self._ensure()
return self.credentials
def set(self, data):
if data:
self.credentials.update(data)
else:
self._clear()
LOG.debug("credentialsupdated")
def _clear(self):
self.credentials.clear()
def METHOD_NAME(self, server, user):
for existing in server.setdefault('Users', []):
if existing['Id'] == user['Id']:
# Merge the data
existing['IsSignedInOffline'] = True
break
else:
server['Users'].append(user)
def add_update_server(self, servers, server):
if server.get('Id') is None:
raise KeyError("Server['Id'] cannot be null or empty")
# Add default DateLastAccessed if doesn't exist.
server.setdefault('DateLastAccessed', "1970-01-01T00:00:00Z")
for existing in servers:
if existing['Id'] == server['Id']:
# Merge the data
if server.get('DateLastAccessed') and self._date_object(server['DateLastAccessed']) > self._date_object(existing['DateLastAccessed']):
existing['DateLastAccessed'] = server['DateLastAccessed']
if server.get('UserLinkType'):
existing['UserLinkType'] = server['UserLinkType']
if server.get('AccessToken'):
existing['AccessToken'] = server['AccessToken']
existing['UserId'] = server['UserId']
if server.get('ExchangeToken'):
existing['ExchangeToken'] = server['ExchangeToken']
if server.get('ManualAddress'):
existing['ManualAddress'] = server['ManualAddress']
if server.get('LocalAddress'):
existing['LocalAddress'] = server['LocalAddress']
if server.get('Name'):
existing['Name'] = server['Name']
if server.get('LastConnectionMode') is not None:
existing['LastConnectionMode'] = server['LastConnectionMode']
if server.get('ConnectServerId'):
existing['ConnectServerId'] = server['ConnectServerId']
return existing
servers.append(server)
return server
def _date_object(self, date):
# Convert string to date
try:
date_obj = time.strptime(date, "%Y-%m-%dT%H:%M:%SZ")
except (ImportError, TypeError):
# TypeError: attribute of type 'NoneType' is not callable
# Known Kodi/python error
date_obj = datetime(*(time.strptime(date, "%Y-%m-%dT%H:%M:%SZ")[0:6]))
return date_obj | null |
191 | import pytest
from api_tests.utils import UserRoles
from osf_tests.factories import AuthUserFactory
from osf_tests.factories import NodeFactory, CollectionFactory, CollectionProviderFactory
from osf.migrations import update_provider_auth_groups
from osf.models import CollectionSubmission
from osf.utils.workflows import CollectionSubmissionsTriggers, CollectionSubmissionStates
GET_URL = '/v2/collection_submissions/{}/actions/'
@pytest.fixture()
def collection_provider():
collection_provider = CollectionProviderFactory()
update_provider_auth_groups()
return collection_provider
@pytest.fixture()
def METHOD_NAME(collection_provider):
METHOD_NAME = NodeFactory(is_public=True)
METHOD_NAME.save()
return METHOD_NAME
@pytest.fixture()
def collection(collection_provider):
collection = CollectionFactory(is_public=True)
collection.provider = collection_provider
collection.save()
return collection
@pytest.fixture()
def collection_submission(METHOD_NAME, collection):
collection_submission = CollectionSubmission(
guid=METHOD_NAME.guids.first(),
collection=collection,
creator=METHOD_NAME.creator,
)
collection_submission.save()
return collection_submission
def configure_test_auth(METHOD_NAME, user_role):
if user_role is UserRoles.UNAUTHENTICATED:
return None
user = AuthUserFactory()
if user_role is UserRoles.MODERATOR:
collection_submission = CollectionSubmission.objects.get(guid=METHOD_NAME.guids.first())
collection_submission.collection.provider.get_group('moderator').user_set.add(user)
elif user_role in UserRoles.contributor_roles():
METHOD_NAME.add_contributor(user, user_role.get_permissions_string())
return user.auth
@pytest.mark.django_db
class TestCollectionSubmissionsActionsDetailGETPermissions:
@pytest.mark.parametrize('user_role', UserRoles)
def test_status_code__200(self, app, METHOD_NAME, user_role, collection_submission):
test_auth = configure_test_auth(METHOD_NAME, user_role)
resp = app.get(GET_URL.format(collection_submission._id), auth=test_auth, expect_errors=True)
assert resp.status_code == 200
@pytest.mark.parametrize('user_role', [UserRoles.UNAUTHENTICATED, UserRoles.NONCONTRIB])
def test_private_collection_noncontribs(self, app, METHOD_NAME, collection, user_role, collection_submission):
collection.is_public = False
collection.save()
test_auth = configure_test_auth(METHOD_NAME, user_role)
resp = app.get(GET_URL.format(collection_submission._id), auth=test_auth, expect_errors=True)
assert resp.status_code in (401, 403)
@pytest.mark.parametrize('user_role', UserRoles.excluding(*[UserRoles.UNAUTHENTICATED, UserRoles.NONCONTRIB]))
def test_private_collection_contribs(self, app, METHOD_NAME, collection, user_role, collection_submission):
collection.is_public = False
collection.save()
test_auth = configure_test_auth(METHOD_NAME, user_role)
resp = app.get(GET_URL.format(collection_submission._id), auth=test_auth, expect_errors=True)
assert resp.status_code == 200
@pytest.mark.django_db
class TestCollectionSubmissionsActionsDetailGETBehavior:
def test_return_action(self, app, METHOD_NAME, collection_submission):
collection_submission_action = collection_submission.actions.last()
resp = app.get(GET_URL.format(collection_submission._id), expect_errors=True)
data = resp.json['data'][0]
assert data['id'] == collection_submission_action._id
assert data['attributes']['from_state'] == CollectionSubmissionStates.IN_PROGRESS.db_name
assert data['attributes']['to_state'] == CollectionSubmissionStates.ACCEPTED.db_name
assert data['attributes']['trigger'] == CollectionSubmissionsTriggers.SUBMIT.db_name
assert data['attributes']['comment'] == 'Initial submission action'
assert data['relationships']['creator']['data']['id'] == collection_submission.creator._id
assert data['relationships']['collection']['data']['id'] == collection_submission.collection._id
assert data['relationships']['target']['data']['id'] \
== f'{collection_submission.guid._id}-{collection_submission.collection._id}'
assert resp.status_code == 200
@pytest.mark.django_db
class TestCollectionSubmissionsActionsDetailUnsupportedMethods:
@pytest.mark.parametrize('user_role', UserRoles)
def test_cannot_PATCH(self, app, user_role, METHOD_NAME, collection_submission):
auth = configure_test_auth(METHOD_NAME, user_role)
resp = app.patch_json_api(GET_URL.format(collection_submission._id), auth=auth, expect_errors=True)
assert resp.status_code == 405
@pytest.mark.parametrize('user_role', UserRoles)
def test_cannot_POST(self, app, user_role, METHOD_NAME, collection_submission):
auth = configure_test_auth(METHOD_NAME, user_role)
resp = app.post_json_api(GET_URL.format(collection_submission._id), auth=auth, expect_errors=True)
assert resp.status_code == 405
@pytest.mark.parametrize('user_role', UserRoles)
def test_cannot_PUT(self, app, user_role, METHOD_NAME, collection_submission):
auth = configure_test_auth(METHOD_NAME, user_role)
resp = app.put_json_api(GET_URL.format(collection_submission._id), auth=auth, expect_errors=True)
assert resp.status_code == 405
@pytest.mark.parametrize('user_role', UserRoles)
def test_cannot_DELETE(self, app, user_role, METHOD_NAME, collection_submission):
auth = configure_test_auth(METHOD_NAME, user_role)
resp = app.delete_json_api(GET_URL.format(collection_submission._id), auth=auth, expect_errors=True)
assert resp.status_code == 405 | null |
192 | import sys
import types
import py
from py.builtin import set, frozenset
def test_enumerate():
l = [0,1,2]
for i,x in enumerate(l):
assert i == x
def test_any():
assert not py.builtin.any([0,False, None])
assert py.builtin.any([0,False, None,1])
def test_all():
assert not py.builtin.all([True, 1, False])
assert py.builtin.all([True, 1, object])
def test_BaseException():
assert issubclass(IndexError, py.builtin.BaseException)
assert issubclass(Exception, py.builtin.BaseException)
assert issubclass(KeyboardInterrupt, py.builtin.BaseException)
class MyRandomClass(object):
pass
assert not issubclass(MyRandomClass, py.builtin.BaseException)
assert py.builtin.BaseException.__module__ in ('exceptions', 'builtins')
assert Exception.__name__ == 'Exception'
def test_GeneratorExit():
assert py.builtin.GeneratorExit.__module__ in ('exceptions', 'builtins')
assert issubclass(py.builtin.GeneratorExit, py.builtin.BaseException)
def test_reversed():
reversed = py.builtin.reversed
r = reversed("hello")
assert iter(r) is r
s = "".join(list(r))
assert s == "olleh"
assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o']
py.test.raises(TypeError, reversed, reversed("hello"))
def test_simple():
s = set([1, 2, 3, 4])
assert s == set([3, 4, 2, 1])
s1 = s.union(set([5, 6]))
assert 5 in s1
assert 1 in s1
def test_frozenset():
s = set([frozenset([0, 1]), frozenset([1, 0])])
assert len(s) == 1
def test_print_simple():
from py.builtin import print_
py.test.raises(TypeError, "print_(hello=3)")
f = py.io.TextIO()
print_("hello", "world", file=f)
s = f.getvalue()
assert s == "hello world\n"
f = py.io.TextIO()
print_("hello", end="", file=f)
s = f.getvalue()
assert s == "hello"
f = py.io.TextIO()
print_("xyz", "abc", sep="", end="", file=f)
s = f.getvalue()
assert s == "xyzabc"
class X:
def __repr__(self): return "rep"
f = py.io.TextIO()
print_(X(), file=f)
assert f.getvalue() == "rep\n"
def test_execfile(tmpdir):
test_file = tmpdir.join("test.py")
test_file.write("x = y\ndef f(): pass")
ns = {"y" : 42}
py.builtin.execfile(str(test_file), ns)
assert ns["x"] == 42
assert py.code.getrawcode(ns["f"]).co_filename == str(test_file)
class A:
y = 3
x = 4
py.builtin.execfile(str(test_file))
assert A.x == 3
def test_getfuncdict():
def f():
raise NotImplementedError
f.x = 4
assert py.builtin._getfuncdict(f)["x"] == 4
assert py.builtin._getfuncdict(2) is None
def test_callable():
class A: pass
assert py.builtin.callable(test_callable)
assert py.builtin.callable(A)
assert py.builtin.callable(list)
assert py.builtin.callable(id)
assert not py.builtin.callable(4)
assert not py.builtin.callable("hi")
def test_totext():
py.builtin._totext("hello", "UTF-8")
def METHOD_NAME():
if sys.version_info[0] < 3:
assert py.builtin.text == unicode
assert py.builtin.bytes == str
else:
assert py.builtin.text == str
assert py.builtin.bytes == bytes
def test_totext_badutf8():
# this was in printouts within the pytest testsuite
# totext would fail
if sys.version_info >= (3,):
errors = 'surrogateescape'
else: # old python has crappy error handlers
errors = 'replace'
py.builtin._totext("\xa6", "UTF-8", errors)
def test_reraise():
from py.builtin import _reraise
try:
raise Exception()
except Exception:
cls, val, tb = sys.exc_info()
excinfo = py.test.raises(Exception, "_reraise(cls, val, tb)")
def test_exec():
l = []
py.builtin.exec_("l.append(1)")
assert l == [1]
d = {}
py.builtin.exec_("x=4", d)
assert d['x'] == 4
def test_tryimport():
py.test.raises(ImportError, py.builtin._tryimport, 'xqwe123')
x = py.builtin._tryimport('asldkajsdl', 'py')
assert x == py
x = py.builtin._tryimport('asldkajsdl', 'py.path')
assert x == py.path
def test_getcode():
code = py.builtin._getcode(test_getcode)
assert isinstance(code, types.CodeType)
assert py.builtin._getcode(4) is None | null |
193 | from django.urls import reverse
from .base import AuthenticatedAPITestCase
from pydis_site.apps.api.models import DocumentationLink
class UnauthedDocumentationLinkAPITests(AuthenticatedAPITestCase):
def setUp(self):
super().setUp()
self.client.force_authenticate(user=None)
def test_detail_lookup_returns_401(self):
url = reverse('api:bot:documentationlink-detail', args=('whatever',))
response = self.client.get(url)
self.assertEqual(response.status_code, 401)
def test_list_returns_401(self):
url = reverse('api:bot:documentationlink-list')
response = self.client.get(url)
self.assertEqual(response.status_code, 401)
def METHOD_NAME(self):
url = reverse('api:bot:documentationlink-list')
response = self.client.post(url, data={'hi': 'there'})
self.assertEqual(response.status_code, 401)
def test_delete_returns_401(self):
url = reverse('api:bot:documentationlink-detail', args=('whatever',))
response = self.client.delete(url)
self.assertEqual(response.status_code, 401)
class EmptyDatabaseDocumentationLinkAPITests(AuthenticatedAPITestCase):
def test_detail_lookup_returns_404(self):
url = reverse('api:bot:documentationlink-detail', args=('whatever',))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_list_all_returns_empty_list(self):
url = reverse('api:bot:documentationlink-list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), [])
def test_delete_returns_404(self):
url = reverse('api:bot:documentationlink-detail', args=('whatever',))
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
class DetailLookupDocumentationLinkAPITests(AuthenticatedAPITestCase):
@classmethod
def setUpTestData(cls):
cls.doc_link = DocumentationLink.objects.create(
package='testpackage',
base_url='https://example.com/',
inventory_url='https://example.com'
)
cls.doc_json = {
'package': cls.doc_link.package,
'base_url': cls.doc_link.base_url,
'inventory_url': cls.doc_link.inventory_url
}
def test_detail_lookup_unknown_package_returns_404(self):
url = reverse('api:bot:documentationlink-detail', args=('whatever',))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_detail_lookup_created_package_returns_package(self):
url = reverse('api:bot:documentationlink-detail', args=(self.doc_link.package,))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), self.doc_json)
def test_list_all_packages_shows_created_package(self):
url = reverse('api:bot:documentationlink-list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), [self.doc_json])
def test_create_invalid_body_returns_400(self):
url = reverse('api:bot:documentationlink-list')
response = self.client.post(url, data={'i': 'am', 'totally': 'valid'})
self.assertEqual(response.status_code, 400)
def test_create_invalid_url_returns_400(self):
body = {
'package': 'example',
'base_url': 'https://example.com',
'inventory_url': 'totally an url'
}
url = reverse('api:bot:documentationlink-list')
response = self.client.post(url, data=body)
self.assertEqual(response.status_code, 400)
def test_create_invalid_package_name_returns_400(self):
test_cases = ("InvalidPackage", "invalid package", "i\u0150valid")
for case in test_cases:
with self.subTest(package_name=case):
body = self.doc_json.copy()
body['package'] = case
url = reverse('api:bot:documentationlink-list')
response = self.client.post(url, data=body)
self.assertEqual(response.status_code, 400)
class DocumentationLinkCreationTests(AuthenticatedAPITestCase):
def setUp(self):
super().setUp()
self.body = {
'package': 'example',
'base_url': 'https://example.com/',
'inventory_url': 'https://docs.example.com'
}
url = reverse('api:bot:documentationlink-list')
response = self.client.post(url, data=self.body)
self.assertEqual(response.status_code, 201)
def test_package_in_full_list(self):
url = reverse('api:bot:documentationlink-list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), [self.body])
def test_detail_lookup_works_with_package(self):
url = reverse('api:bot:documentationlink-detail', args=(self.body['package'],))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), self.body)
class DocumentationLinkDeletionTests(AuthenticatedAPITestCase):
@classmethod
def setUpTestData(cls):
cls.doc_link = DocumentationLink.objects.create(
package='example',
base_url='https://example.com',
inventory_url='https://docs.example.com'
)
def test_unknown_package_returns_404(self):
url = reverse('api:bot:documentationlink-detail', args=('whatever',))
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
def test_delete_known_package_returns_204(self):
url = reverse('api:bot:documentationlink-detail', args=(self.doc_link.package,))
response = self.client.delete(url)
self.assertEqual(response.status_code, 204) | null |
194 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecd.endpoint import endpoint_data
class CreateADConnectorOfficeSiteRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ecd', '2020-09-30', 'CreateADConnectorOfficeSite')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_CenId(self): # String
return self.get_query_params().get('CenId')
def set_CenId(self, CenId): # String
self.add_query_param('CenId', CenId)
def get_SubDomainDnsAddresss(self): # RepeatList
return self.get_query_params().get('SubDomainDnsAddress')
def set_SubDomainDnsAddresss(self, SubDomainDnsAddress): # RepeatList
for depth1 in range(len(SubDomainDnsAddress)):
self.add_query_param('SubDomainDnsAddress.' + str(depth1 + 1), SubDomainDnsAddress[depth1])
def get_CenOwnerId(self): # Long
return self.get_query_params().get('CenOwnerId')
def set_CenOwnerId(self, CenOwnerId): # Long
self.add_query_param('CenOwnerId', CenOwnerId)
def get_EnableInternetAccess(self): # Boolean
return self.get_query_params().get('EnableInternetAccess')
def set_EnableInternetAccess(self, EnableInternetAccess): # Boolean
self.add_query_param('EnableInternetAccess', EnableInternetAccess)
def get_SubDomainName(self): # String
return self.get_query_params().get('SubDomainName')
def set_SubDomainName(self, SubDomainName): # String
self.add_query_param('SubDomainName', SubDomainName)
def get_DomainPassword(self): # String
return self.get_query_params().get('DomainPassword')
def set_DomainPassword(self, DomainPassword): # String
self.add_query_param('DomainPassword', DomainPassword)
def get_VerifyCode(self): # String
return self.get_query_params().get('VerifyCode')
def set_VerifyCode(self, VerifyCode): # String
self.add_query_param('VerifyCode', VerifyCode)
def get_EnableAdminAccess(self): # Boolean
return self.get_query_params().get('EnableAdminAccess')
def METHOD_NAME(self, EnableAdminAccess): # Boolean
self.add_query_param('EnableAdminAccess', EnableAdminAccess)
def get_Bandwidth(self): # Integer
return self.get_query_params().get('Bandwidth')
def set_Bandwidth(self, Bandwidth): # Integer
self.add_query_param('Bandwidth', Bandwidth)
def get_DesktopAccessType(self): # String
return self.get_query_params().get('DesktopAccessType')
def set_DesktopAccessType(self, DesktopAccessType): # String
self.add_query_param('DesktopAccessType', DesktopAccessType)
def get_AdHostname(self): # String
return self.get_query_params().get('AdHostname')
def set_AdHostname(self, AdHostname): # String
self.add_query_param('AdHostname', AdHostname)
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_Specification(self): # Long
return self.get_query_params().get('Specification')
def set_Specification(self, Specification): # Long
self.add_query_param('Specification', Specification)
def get_OfficeSiteName(self): # String
return self.get_query_params().get('OfficeSiteName')
def set_OfficeSiteName(self, OfficeSiteName): # String
self.add_query_param('OfficeSiteName', OfficeSiteName)
def get_MfaEnabled(self): # Boolean
return self.get_query_params().get('MfaEnabled')
def set_MfaEnabled(self, MfaEnabled): # Boolean
self.add_query_param('MfaEnabled', MfaEnabled)
def get_DomainUserName(self): # String
return self.get_query_params().get('DomainUserName')
def set_DomainUserName(self, DomainUserName): # String
self.add_query_param('DomainUserName', DomainUserName)
def get_CidrBlock(self): # String
return self.get_query_params().get('CidrBlock')
def set_CidrBlock(self, CidrBlock): # String
self.add_query_param('CidrBlock', CidrBlock)
def get_ProtocolType(self): # String
return self.get_query_params().get('ProtocolType')
def set_ProtocolType(self, ProtocolType): # String
self.add_query_param('ProtocolType', ProtocolType)
def get_DnsAddresss(self): # RepeatList
return self.get_query_params().get('DnsAddress')
def set_DnsAddresss(self, DnsAddress): # RepeatList
for depth1 in range(len(DnsAddress)):
self.add_query_param('DnsAddress.' + str(depth1 + 1), DnsAddress[depth1]) | null |
195 | from django.contrib.contenttypes.models import ContentType
from django.utils.translation import gettext as _
from creme.creme_core.core.deletion import (
REPLACERS_MAP,
FixedValueReplacer,
SETReplacer,
)
from creme.creme_core.models import (
FakeCivility,
FakeContact,
FakeDocument,
FakeDocumentCategory,
FakeOrganisation,
FakeSector,
FakeTicket,
FakeTicketPriority,
)
from ..base import CremeTestCase
class DeletionTestCase(CremeTestCase):
def test_replacer_by_fixed_value01(self):
civ = FakeCivility.objects.create(title='Kun')
model_field = FakeContact._meta.get_field('civility')
replacer1 = FixedValueReplacer(model_field=model_field, value=civ)
self.assertEqual(model_field, replacer1.model_field)
self.assertEqual(civ, replacer1._fixed_value)
serialized = {
'ctype': ContentType.objects.get_for_model(FakeContact).natural_key(),
'field': 'civility',
'pk': civ.pk,
}
self.assertEqual(serialized, replacer1.as_dict())
replacer2 = FixedValueReplacer.from_dict(serialized)
self.assertIsInstance(replacer2, FixedValueReplacer)
self.assertEqual(model_field, replacer2.model_field)
self.assertEqual(civ, replacer2.get_value())
self.assertEqual(
_('In «{model} - {field}», replace by «{new}»').format(
model='Test Contact',
field=_('Civility'),
new=civ.title,
),
str(replacer1),
)
def test_replacer_by_fixed_value02(self):
"<None> value + other ContentType."
model_field = FakeOrganisation._meta.get_field('sector')
replacer1 = FixedValueReplacer(model_field=model_field, value=None)
serialized = {
'ctype': ContentType.objects.get_for_model(FakeOrganisation).natural_key(),
'field': 'sector',
}
self.assertEqual(serialized, replacer1.as_dict())
replacer2 = FixedValueReplacer.from_dict(serialized)
self.assertIsInstance(replacer2, FixedValueReplacer)
self.assertEqual(model_field, replacer2.model_field)
self.assertIsNone(replacer2.get_value())
self.assertEqual(
_('Empty «{model} - {field}»').format(
model='Test Organisation',
field=_('Sector'),
),
str(replacer1),
)
def test_replacer_by_fixed_value03(self):
"Explicit & implicit values."
self.assertEqual(
_('Empty «{model} - {field}»').format(
model='Test Contact',
field=_('Civility'),
),
str(FixedValueReplacer(
model_field=FakeContact._meta.get_field('civility')
)),
)
sector = FakeSector.objects.create(title='Ninja')
self.assertEqual(
_('In «{model} - {field}», replace by «{new}»').format(
model='Test Organisation',
field=_('Sector'),
new=sector.title,
),
str(FixedValueReplacer(
model_field=FakeOrganisation._meta.get_field('sector'),
value=sector,
))
)
def METHOD_NAME(self):
"ManyToMany."
cat = FakeDocumentCategory.objects.create(name='PNGs')
m2m = FakeDocument._meta.get_field('categories')
self.assertEqual(
_('In «{model} - {field}», replace by «{new}»').format(
model='Test Document',
field=_('Categories'),
new=cat.name,
),
str(FixedValueReplacer(model_field=m2m, value=cat)),
)
self.assertEqual(
_('Remove from «{model} - {field}»').format(
model='Test Document',
field=_('Categories'),
),
str(FixedValueReplacer(model_field=m2m)),
)
def test_replacer_for_SET(self):
self.assertFalse(FakeTicketPriority.objects.filter(name='Deleted'))
model_field = FakeTicket._meta.get_field('priority')
replacer1 = SETReplacer(model_field=model_field)
self.assertEqual(model_field, replacer1.model_field)
value = replacer1.get_value()
self.assertIsInstance(value, FakeTicketPriority)
self.assertEqual('Deleted', value.name)
serialized = {
'ctype': ContentType.objects.get_for_model(FakeTicket).natural_key(),
'field': 'priority',
}
self.assertEqual(serialized, replacer1.as_dict())
replacer2 = SETReplacer.from_dict(serialized)
self.assertIsInstance(replacer2, SETReplacer)
self.assertEqual(model_field, replacer2.model_field)
self.assertEqual(value, replacer2.get_value())
self.assertEqual(
_('In «{model} - {field}», replace by a fallback value').format(
model='Test Ticket',
field=_('Priority'),
),
str(replacer1),
)
def test_registry01(self):
"FixedValueReplacer."
sector = FakeSector.objects.first()
field1 = FakeOrganisation._meta.get_field('sector')
field2 = FakeContact._meta.get_field('sector')
replacer1 = FixedValueReplacer(model_field=field1, value=None)
replacer2 = FixedValueReplacer(model_field=field2, value=sector)
get_ct = ContentType.objects.get_for_model
serialized = [
[
'fixed_value',
{
'ctype': get_ct(FakeOrganisation).natural_key(),
'field': 'sector',
},
], [
'fixed_value',
{
'ctype': get_ct(FakeContact).natural_key(),
'field': 'sector',
'pk': sector.pk,
},
],
]
self.assertEqual(
serialized,
REPLACERS_MAP.serialize([replacer1, replacer2])
)
replacers = REPLACERS_MAP.deserialize(serialized)
self.assertIsList(replacers, length=2)
d_replacer1 = replacers[0]
self.assertIsInstance(d_replacer1, FixedValueReplacer)
self.assertEqual(field1, d_replacer1.model_field)
self.assertIsNone(d_replacer1.get_value())
d_replacer2 = replacers[1]
self.assertIsInstance(d_replacer2, FixedValueReplacer)
self.assertEqual(field2, d_replacer2.model_field)
self.assertEqual(sector, d_replacer2.get_value())
def test_registry02(self):
"SETReplacer."
field = FakeTicket._meta.get_field('priority')
replacer = SETReplacer(model_field=field)
serialized = [
[
'SET',
{
'ctype': ContentType.objects.get_for_model(FakeTicket).natural_key(),
'field': 'priority',
},
],
]
self.assertEqual(serialized, REPLACERS_MAP.serialize([replacer]))
replacers = REPLACERS_MAP.deserialize(serialized)
self.assertIsList(replacers, length=1)
d_replacer = replacers[0]
self.assertIsInstance(d_replacer, SETReplacer)
self.assertEqual(field, d_replacer.model_field) | null |
196 | import shutil
import os
import stat
import bpy
import arm.utils
from arm import log
if arm.is_reload(__name__):
log = arm.reload_module(log)
arm.utils = arm.reload_module(arm.utils)
else:
arm.enable_reload(__name__)
assets = []
reserved_names = ['return.']
khafile_params = []
khafile_defs = []
khafile_defs_last = []
embedded_data = []
shaders = []
shaders_last = []
shaders_external = []
shader_datas = []
shader_passes = []
shader_passes_assets = {}
shader_cons = {}
def reset():
global assets
global khafile_params
global khafile_defs
global khafile_defs_last
global embedded_data
global shaders
global shaders_last
global shaders_external
global shader_datas
global shader_passes
global shader_cons
assets = []
khafile_params = []
khafile_defs_last = khafile_defs
khafile_defs = []
embedded_data = []
shaders_last = shaders
shaders = []
shaders_external = []
shader_datas = []
shader_passes = []
shader_cons = {}
shader_cons['mesh_vert'] = []
shader_cons['depth_vert'] = []
shader_cons['depth_frag'] = []
shader_cons['voxel_vert'] = []
shader_cons['voxel_frag'] = []
shader_cons['voxel_geom'] = []
def add(asset_file):
global assets
# Asset already exists, do nothing
if asset_file in assets:
return
asset_file_base = os.path.basename(asset_file)
for f in assets:
f_file_base = os.path.basename(f)
if f_file_base == asset_file_base:
return
assets.append(asset_file)
# Reserved file name
for f in reserved_names:
if f in asset_file:
log.warn(f'File "{asset_file}" contains reserved keyword, this will break C++ builds!')
def add_khafile_def(d):
global khafile_defs
if d not in khafile_defs:
khafile_defs.append(d)
def add_khafile_param(p):
global khafile_params
if p not in khafile_params:
khafile_params.append(p)
def add_embedded_data(file):
global embedded_data
if file not in embedded_data:
embedded_data.append(file)
def add_shader(file):
global shaders
global shaders_last
if file not in shaders:
shaders.append(file)
def add_shader_data(file):
global shader_datas
if file not in shader_datas:
shader_datas.append(file)
def add_shader_pass(data_name):
global shader_passes
# Shader data for passes are written into single shader_datas.arm file
add_shader_data(arm.utils.get_fp_build() + '/compiled/Shaders/shader_datas.arm')
if data_name not in shader_passes:
shader_passes.append(data_name)
def add_shader_external(file):
global shaders_external
shaders_external.append(file)
name = file.split('/')[-1].split('\\')[-1]
add_shader(arm.utils.get_fp_build() + '/compiled/Shaders/' + name)
invalidate_enabled = True # Disable invalidating during build process
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def invalidate_shader_cache(self, context):
# compiled.inc changed, recompile all shaders next time
global invalidate_enabled
if invalidate_enabled is False:
return
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Shaders'):
shutil.rmtree(fp + '/compiled/Shaders', onerror=remove_readonly)
if os.path.isdir(fp + '/debug/html5-resources'):
shutil.rmtree(fp + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/krom-resources'):
shutil.rmtree(fp + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/debug/krom-resources'):
shutil.rmtree(fp + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/windows-resources'):
shutil.rmtree(fp + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/linux-resources'):
shutil.rmtree(fp + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/osx-resources'):
shutil.rmtree(fp + '/osx-resources', onerror=remove_readonly)
def invalidate_compiled_data(self, context):
global invalidate_enabled
if invalidate_enabled is False:
return
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled'):
shutil.rmtree(fp + '/compiled', onerror=remove_readonly)
def invalidate_mesh_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/meshes'):
shutil.rmtree(fp + '/compiled/Assets/meshes', onerror=remove_readonly)
def invalidate_envmap_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/envmaps'):
shutil.rmtree(fp + '/compiled/Assets/envmaps', onerror=remove_readonly)
def invalidate_unpacked_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/unpacked'):
shutil.rmtree(fp + '/compiled/Assets/unpacked', onerror=remove_readonly)
def METHOD_NAME(self, context):
if context.object is None or context.object.data is None:
return
context.object.data.arm_cached = False
def invalidate_instance_cache(self, context):
if context.object is None or context.object.data is None:
return
METHOD_NAME(self, context)
for slot in context.object.material_slots:
slot.material.arm_cached = False
def invalidate_compiler_cache(self, context):
bpy.data.worlds['Arm'].arm_recompile = True
def shader_equal(sh, ar, shtype):
# Merge equal shaders
for e in ar:
if sh.is_equal(e):
sh.context.data[shtype] = e.context.data[shtype]
sh.is_linked = True
return
ar.append(sh)
def vs_equal(c, ar):
shader_equal(c.vert, ar, 'vertex_shader')
def fs_equal(c, ar):
shader_equal(c.frag, ar, 'fragment_shader')
def gs_equal(c, ar):
shader_equal(c.geom, ar, 'geometry_shader')
def tcs_equal(c, ar):
shader_equal(c.tesc, ar, 'tesscontrol_shader')
def tes_equal(c, ar):
shader_equal(c.tese, ar, 'tesseval_shader') | null |
197 | #!/usr/bin/env python3
#
# Copyright (c) 2015 - 2023, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import sys
import unittest
import subprocess
import io
import json
import geopmpy.agent
import geopmdpy.topo
from integration.test import geopm_test_launcher
from integration.test import util
@util.skip_unless_msr_access('Skipped pending resolution of issue #2017.')
@util.skip_unless_no_service_or_root()
@util.skip_unless_do_launch()
@util.skip_unless_batch()
class TestIntegrationEnforcePolicy(unittest.TestCase):
"""Test the static policy enforcement feature of the agent interface
"""
@classmethod
def setUpClass(cls):
script_dir = os.path.dirname(os.path.realpath(__file__))
cls._app_exec_path = os.path.join(script_dir, '.libs', 'test_enforce_policy')
# note: if /etc/geopm/environment-*.json sets the same variables, this
# test will not work.
for env_file in ['/etc/geopm/environment-default.json',
'/etc/geopm/environment-override.json']:
try:
stdout = io.StringIO()
stderr = io.StringIO()
geopm_test_launcher.allocation_node_test('dummy -- cat {}'.format(env_file),
stdout, stderr)
file_contents = stdout.getvalue()
if "GEOPM_POLICY" in file_contents:
raise RuntimeError("{} contains GEOPM_POLICY".format(env_file))
if "GEOPM_AGENT" in file_contents:
raise RuntimeError("{} contains GEOPM_AGENT".format(env_file))
except subprocess.CalledProcessError:
pass
def setUp(self):
self._old_max_freq = geopm_test_launcher.geopmread("MSR::PERF_CTL:FREQ board 0")
self._old_max_power = geopm_test_launcher.geopmread("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0")
# make sure controls are at default
self._max_freq = geopm_test_launcher.geopmread("CPU_FREQUENCY_MAX_AVAIL board 0")
geopm_test_launcher.geopmwrite("CPU_FREQUENCY_MAX_CONTROL board 0 {}".format(self._max_freq))
self._tdp_power = geopm_test_launcher.geopmread("CPU_POWER_LIMIT_DEFAULT package 0")
geopm_test_launcher.geopmwrite("CPU_POWER_LIMIT_CONTROL board 0 {}".format(self._tdp_power))
self._sticker_freq = geopm_test_launcher.geopmread("CPU_FREQUENCY_STICKER board 0")
self._step_freq = geopm_test_launcher.geopmread("CPU_FREQUENCY_STEP board 0")
self._stdout = None
self._stderr = None
def tearDown(self):
geopm_test_launcher.geopmwrite("MSR::PERF_CTL:FREQ board 0 {}".format(self._old_max_freq))
geopm_test_launcher.geopmwrite("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0 {}".format(self._old_max_power))
def run_tool(self, agent_name, policy_setting):
test_name = 'test_enforce_policy'
self._report_path = test_name + '.report'
self._agent_conf_path = test_name + '-agent-config.json'
agent_conf = geopmpy.agent.AgentConf(self._agent_conf_path,
agent_name,
policy_setting)
agent_conf.write()
# set environment in launch using geopmlaunch options instead of
# files from /etc/geopm
environ = os.environ.copy()
environ.update({
"GEOPM_POLICY": agent_conf.get_path(),
"GEOPM_AGENT": agent_name
})
# detect correct launcher type and set options
# but run without geopmlaunch
launcher_type = geopm_test_launcher.detect_launcher()
argv = ["dummy", launcher_type, "--geopm-ctl-disable"]
launcher = geopmpy.launcher.Factory().create(argv, num_rank=1, num_node=1)
num_rank = launcher.num_rank_option(False)
num_node = launcher.num_node_option(False)
argv = [launcher_type] + num_rank + num_node + [self._app_exec_path]
prog = subprocess.Popen(argv,
env=environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self._stdout, self._stderr = prog.communicate(timeout=10)
except TypeError:
self._stdout, self._stderr = prog.communicate()
def test_monitor_no_policy(self):
# check that the monitor doesn't change anything
start_freq = geopm_test_launcher.geopmread("MSR::PERF_CTL:FREQ board 0")
start_power = geopm_test_launcher.geopmread("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0")
self.run_tool('monitor', {})
end_freq = geopm_test_launcher.geopmread("MSR::PERF_CTL:FREQ board 0")
end_power = geopm_test_launcher.geopmread("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0")
self.assertEqual(start_freq, end_freq)
self.assertEqual(start_power, end_power)
def test_freq_map_max_freq(self):
test_freq = self._sticker_freq - 2 * self._step_freq
current_freq = geopm_test_launcher.geopmread("MSR::PERF_CTL:FREQ board 0")
self.assertNotEqual(test_freq, current_freq)
self.run_tool('frequency_map', {'FREQ_CPU_DEFAULT': test_freq})
current_freq = geopm_test_launcher.geopmread("MSR::PERF_CTL:FREQ board 0")
self.assertEqual(test_freq, current_freq)
def test_power_governor_power_cap(self):
num_pkg = geopmdpy.topo.num_domain('package')
test_power = self._tdp_power * num_pkg - 20
current_power = geopm_test_launcher.geopmread("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0")
self.assertNotEqual(test_power, current_power)
self.run_tool('power_governor', {'power_budget': test_power})
current_power = geopm_test_launcher.geopmread("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0")
self.assertEqual(test_power, current_power)
def METHOD_NAME(self):
num_pkg = geopmdpy.topo.num_domain('package')
test_power = self._tdp_power * num_pkg - 20
current_power = geopm_test_launcher.geopmread("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0")
self.assertNotEqual(test_power, current_power)
self.run_tool('power_balancer', {'power_budget': test_power})
current_power = geopm_test_launcher.geopmread("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0")
self.assertEqual(test_power, current_power)
if __name__ == '__main__':
unittest.main() | null |
198 | """Behavior (validation, encoding, and transformations) for vobjects."""
from .base import (
Component,
ContentLine,
NativeError,
ValidateError,
VObjectError,
defaultSerialize,
)
class Behavior:
"""Abstract class to describe vobject options, requirements and encodings.
Behaviors are used for root components like VCALENDAR, for subcomponents
like VEVENT, and for individual lines in components.
Behavior subclasses are not meant to be instantiated, all methods should
be classmethods.
@cvar name:
The uppercase name of the object described by the class, or a generic
name if the class defines behavior for many objects.
@cvar description:
A brief excerpt from the RFC explaining the function of the component or
line.
@cvar versionString:
The string associated with the component, for instance, 2.0 if there's a
line like VERSION:2.0, an empty string otherwise.
@cvar knownChildren:
A dictionary with uppercased component/property names as keys and a
tuple (min, max, id) as value, where id is the id used by
L{registerBehavior}, min and max are the limits on how many of this child
must occur. None is used to denote no max or no id.
@cvar quotedPrintable:
A boolean describing whether the object should be encoded and decoded
using quoted printable line folding and character escaping.
@cvar defaultBehavior:
Behavior to apply to ContentLine children when no behavior is found.
@cvar hasNative:
A boolean describing whether the object can be transformed into a more
Pythonic object.
@cvar isComponent:
A boolean, True if the object should be a Component.
@cvar sortFirst:
The lower-case list of children which should come first when sorting.
@cvar allowGroup:
Whether or not vCard style group prefixes are allowed.
"""
name = ''
description = ''
versionString = ''
knownChildren = {}
quotedPrintable = False
defaultBehavior = None
hasNative = False
isComponent = False
allowGroup = False
forceUTC = False
sortFirst = []
def __init__(self):
raise VObjectError("Behavior subclasses are not meant to be instantiated")
@classmethod
def validate(cls, obj, raiseException=False, complainUnrecognized=False):
"""Check if the object satisfies this behavior's requirements.
@param obj:
The L{ContentLine<base.ContentLine>} or
L{Component<base.Component>} to be validated.
@param raiseException:
If True, raise a L{base.ValidateError} on validation failure.
Otherwise return a boolean.
@param complainUnrecognized:
If True, fail to validate if an uncrecognized parameter or child is
found. Otherwise log the lack of recognition.
"""
if not cls.allowGroup and obj.group is not None:
raise VObjectError(str(obj) + " has a group, but this object doesn't support groups")
if isinstance(obj, ContentLine):
return cls.lineValidate(obj, raiseException, complainUnrecognized)
elif isinstance(obj, Component):
count = {}
for child in obj.getChildren():
if not child.validate(raiseException, complainUnrecognized):
return False
name = child.name.upper()
count[name] = count.get(name, 0) + 1
for key, val in cls.knownChildren.items():
if count.get(key, 0) < val[0]:
if raiseException:
raise ValidateError(
f'{cls.name} components must contain at least {val[0]} {key}'
)
return False
if val[1] and count.get(key, 0) > val[1]:
if raiseException:
raise ValidateError(
f'{cls.name} components cannot contain more than {val[1]} {key}'
)
return False
return True
raise VObjectError(f"{obj} is not a Component or ContentLine")
@classmethod
def lineValidate(cls, line, raiseException, complainUnrecognized):
"""Examine a line's parameters and values, return True if valid."""
return True
@classmethod
def decode(cls, line):
if line.encoded:
line.encoded = 0
@classmethod
def encode(cls, line):
if not line.encoded:
line.encoded = 1
@classmethod
def transformToNative(cls, obj):
"""Turn a ContentLine or Component into a Python-native representation.
If appropriate, turn dates or datetime strings into Python objects.
Components containing VTIMEZONEs turn into VtimezoneComponents.
"""
return obj
@classmethod
def METHOD_NAME(cls, obj):
"""Inverse of transformToNative."""
raise NativeError("No transformFromNative defined")
@classmethod
def generateImplicitParameters(cls, obj):
"""Generate any required information that don't yet exist."""
pass
@classmethod
def serialize(cls, obj, buf, lineLength, validate=True):
"""Set implicit parameters, do encoding, return unicode string.
If validate is True, raise VObjectError if the line doesn't validate
after implicit parameters are generated.
Default is to call base.defaultSerialize.
"""
cls.generateImplicitParameters(obj)
if validate:
cls.validate(obj, raiseException=True)
if obj.isNative:
transformed = obj.METHOD_NAME()
undoTransform = True
else:
transformed = obj
undoTransform = False
out = defaultSerialize(transformed, buf, lineLength)
if undoTransform:
obj.transformToNative()
return out | null |
199 | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Any, Dict, Optional
from pydantic import Extra, BaseModel, Field
from lightly.openapi_generated.swagger_client.models.docker_worker_type import DockerWorkerType
from lightly.openapi_generated.swagger_client.models.selection_config import SelectionConfig
class DockerWorkerConfig(BaseModel):
"""
DockerWorkerConfig
"""
worker_type: DockerWorkerType = Field(..., alias="workerType")
docker: Optional[Dict[str, Any]] = Field(None, description="docker run configurations, keys should match the structure of https://github.com/lightly-ai/lightly-core/blob/develop/onprem-docker/lightly_worker/src/lightly_worker/resources/docker/docker.yaml ")
lightly: Optional[Dict[str, Any]] = Field(None, description="lightly configurations which are passed to a docker run, keys should match structure of https://github.com/lightly-ai/lightly/blob/master/lightly/cli/config/config.yaml ")
selection: Optional[SelectionConfig] = None
__properties = ["workerType", "docker", "lightly", "selection"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> DockerWorkerConfig:
"""Create an instance of DockerWorkerConfig from a JSON string"""
return cls.METHOD_NAME(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
# override the default output from pydantic by calling `to_dict()` of selection
if self.selection:
_dict['selection' if by_alias else 'selection'] = self.selection.to_dict(by_alias=by_alias)
# set to None if docker (nullable) is None
# and __fields_set__ contains the field
if self.docker is None and "docker" in self.__fields_set__:
_dict['docker' if by_alias else 'docker'] = None
# set to None if lightly (nullable) is None
# and __fields_set__ contains the field
if self.lightly is None and "lightly" in self.__fields_set__:
_dict['lightly' if by_alias else 'lightly'] = None
return _dict
@classmethod
def METHOD_NAME(cls, obj: dict) -> DockerWorkerConfig:
"""Create an instance of DockerWorkerConfig from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return DockerWorkerConfig.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in DockerWorkerConfig) in the input: " + str(obj))
_obj = DockerWorkerConfig.parse_obj({
"worker_type": obj.get("workerType"),
"docker": obj.get("docker"),
"lightly": obj.get("lightly"),
"selection": SelectionConfig.METHOD_NAME(obj.get("selection")) if obj.get("selection") is not None else None
})
return _obj
| null |