id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/sbswebsite-0.0.23.tar.gz/sbswebsite-0.0.23/website/update_config.py | from ConfigParser import SafeConfigParser
import os.path
import sys
import time
default_config = SafeConfigParser()
user_config = SafeConfigParser()
default_config.read('config.cfg.default')
#TODO Add code to verify file exists, and only read if the file exists
if not os.path.isfile('config.cfg'):
default_config.write(open('config.cfg','w+'))
print "No existing config file, new file created"
print "Please make sure to update the existing file"
sys.exit(0)
user_config.read('config.cfg')
timestamp = '.' + str(int(time.time()*1000))
backup_file = 'config.cfg.user' + timestamp
log_file = 'log' + timestamp
log = open(log_file,'w+')
user_config.write(open(backup_file,'w+'))
new_sections = [x for x in default_config.sections() if x not in user_config.sections()]
deprecated_sections = [x for x in user_config.sections() if x not in default_config.sections()]
existing_sections = [x for x in user_config.sections() if x in default_config.sections()]
log.write("The following sections have been deprecated\n")
for deprecate in deprecated_sections:
log.write("\t" + deprecate + "\n")
user_config.remove_section(deprecate)
log.write("The following sections have been added:\n")
for new in new_sections:
log.write("\t" + new + "\n")
for opt in default_config.options(new):
user_config.set(new,opt,default_config.get(new,opt))
log.write("The following sections have been modified:\n")
for existing in existing_sections:
log.write("\t" + existing + "\n")
deprecated_options = [x for x in user_config.options(existing) if x not in default_config.options(existing)]
new_options = [x for x in default_config.options(existing) if x not in user_config.options(existing)]
for deprecate in deprecated_options:
log.write("\t\tDEPRECATED: " + deprecate + "\n")
user_config.remove_option(existing,deprecate)
for new in new_options:
log.write("\t\tNEW: " + new + "\n")
user_config.set(existing,opt,default_config.get(existing,opt))
user_config.write(open('config.cfg','w'))
log.close() | PypiClean |
/atooms-pp-3.3.3.tar.gz/atooms-pp-3.3.3/atooms/postprocessing/partial.py | import logging
from .helpers import filter_species
from . import core
_log = logging.getLogger(__name__)
# TODO: add output_path to all interfaces
class Partial(object):
def __init__(self, corr_cls, species, *args, **kwargs):
"""The first positional argument must be the trajectory instance."""
# Instantiate correlation objects
# with args passed upon construction
self.partial = {}
self.nbodies = corr_cls.nbodies
self.species = species
self._output_path = core.pp_output_path
if self.nbodies == 1:
for i in range(len(self.species)):
isp = self.species[i]
self.partial[isp] = corr_cls(*args, **kwargs)
self.partial[isp].add_filter(filter_species, isp)
self.partial[isp].tag = str(isp)
self.partial[isp].tag_description = 'species %s' % isp
elif self.nbodies == 2:
for i in range(len(self.species)):
for j in range(len(self.species)):
isp = self.species[i]
jsp = self.species[j]
self.partial[(isp, jsp)] = corr_cls(*args, **kwargs)
self.partial[(isp, jsp)].add_filter(filter_species, isp)
# Slight optimization: avoid filtering twice when isp==jsp
if isp != jsp:
self.partial[(isp, jsp)].add_filter(filter_species, jsp)
self.partial[(isp, jsp)].tag = '%s-%s' % (isp, jsp)
self.partial[(isp, jsp)].tag_description = 'species pair %s-%s' % (isp, jsp)
@property
def output_path(self):
return self._output_path
@output_path.setter
def output_path(self, path):
self._output_path = path
for key in self.partial:
self.partial[key].output_path = path
def add_weight(self, trajectory=None, field=None, fluctuations=False):
for key in self.partial:
self.partial[key].add_weight(trajectory, field, fluctuations)
def need_update(self):
need = False
for partial in self.partial.values():
if partial.need_update():
need = True
break
return need
def compute(self):
if self.nbodies == 1:
for i in range(len(self.species)):
isp = self.species[i]
self.partial[isp].compute()
elif self.nbodies == 2:
for i in range(len(self.species)):
for j in range(len(self.species)):
isp = self.species[i]
jsp = self.species[j]
if j >= i or not self.partial[(isp, jsp)]._symmetric:
self.partial[(isp, jsp)].compute()
else:
# The isp-jsp has been already calculated
self.partial[(isp, jsp)].grid = self.partial[(jsp, isp)].grid
self.partial[(isp, jsp)].value = self.partial[(jsp, isp)].value
self.partial[(isp, jsp)].analysis = self.partial[(jsp, isp)].analysis
def write(self):
for partial in self.partial.values():
partial.write()
def do(self, update=False):
if update and not self.need_update():
for partial in self.partial.values():
partial.read()
return
self.compute()
for partial in self.partial.values():
try:
partial.analyze()
except ImportError as e:
_log.warn('Could not analyze due to missing modules, continuing...')
_log.warn(e)
partial.write() | PypiClean |
/sam_ml_py-0.13.0-py3-none-any.whl/sam_ml/models/main_pipeline.py | import copy
import pandas as pd
from sam_ml.config import setup_logger
from sam_ml.data.preprocessing import (
Embeddings_builder,
Sampler,
SamplerPipeline,
Scaler,
Selector,
)
from .main_classifier import Classifier
from .RandomForestClassifier import RFC
logger = setup_logger(__name__)
class Pipeline(Classifier):
""" classifier pipeline class """
def __init__(self, vectorizer: str | Embeddings_builder | None = None, scaler: str | Scaler | None = None, selector: str | tuple[str, int] | Selector | None = None, sampler: str | Sampler | SamplerPipeline | None = None, model: Classifier = RFC(), model_name: str = "pipe"):
"""
@params:
vectorizer: type of "data.embeddings.Embeddings_builder" or Embeddings_builder class object for automatic string column vectorizing (None for no vectorizing)
scaler: type of "data.scaler.Scaler" or Scaler class object for scaling the data (None for no scaling)
selector: type of "data.feature_selection.Selector" or Selector class object for feature selection (None for no selecting)
sampling: type of "data.sampling.Sampler" or Sampler class object for sampling the train data (None for no sampling)
model: Classifier class object
model_name: name of the model
"""
if issubclass(type(model), Classifier):
super().__init__(model_object=model.model, model_name=model_name, model_type=model.model_type, grid=model.grid)
# Inherit methods and attributes from model
for attribute_name in dir(model):
attribute_value = getattr(model, attribute_name)
# Check if the attribute is a method or a variable (excluding private attributes)
if callable(attribute_value) and not attribute_name.startswith("__"):
if not hasattr(self, attribute_name):
setattr(self, attribute_name, attribute_value)
elif not attribute_name.startswith("__"):
if not hasattr(self, attribute_name):
self.__dict__[attribute_name] = attribute_value
self.__classifier = model
else:
raise ValueError(f"wrong input '{model}' for model")
if vectorizer in Embeddings_builder.params()["vec"]:
self.vectorizer = Embeddings_builder(algorithm=vectorizer)
elif type(vectorizer) == Embeddings_builder or vectorizer is None:
self.vectorizer = vectorizer
else:
raise ValueError(f"wrong input '{vectorizer}' for vectorizer")
if scaler in Scaler.params()["scaler"]:
self.scaler = Scaler(algorithm=scaler)
elif type(scaler) == Scaler or scaler is None:
self.scaler = scaler
else:
raise ValueError(f"wrong input '{scaler}' for scaler")
if selector in Selector.params()["algorithm"]:
self.selector = Selector(algorithm=selector)
elif type(selector) == tuple and len(selector) == 2:
if selector[0] in Selector.params()["algorithm"] and type(selector[1])==int:
if selector[1] > 0:
self.selector = Selector(algorithm=selector[0], num_features=selector[1])
else:
raise ValueError(f"wrong input '{selector}' for selector -> integer in tuple has to be greater 0")
else:
raise ValueError(f"wrong input '{selector}' for selector -> tuple incorrect")
elif type(selector) == Selector or selector is None:
self.selector = selector
else:
raise ValueError(f"wrong input '{selector}' for selector")
if sampler in Sampler.params()["algorithm"]:
self.sampler = Sampler(algorithm=sampler)
elif type(sampler) ==str and SamplerPipeline.check_is_valid_algorithm(sampler):
self.sampler = SamplerPipeline(algorithm=sampler)
elif type(sampler) in (Sampler, SamplerPipeline) or sampler is None:
self.sampler = sampler
else:
raise ValueError(f"wrong input '{sampler}' for sampler")
self.vectorizer_dict: dict[str, Embeddings_builder] = {}
# keep track if model was trained for warm_start
self._data_classes_trained: bool = False
def __repr__(self) -> str:
params: str = ""
for step in self.steps:
params += step[0]+"="+step[1].__str__()+", "
params += f"model_name='{self.model_name}'"
return f"Pipeline({params})"
@property
def steps(self) -> list[tuple[str, any]]:
return [("vectorizer", self.vectorizer), ("scaler", self.scaler), ("selector", self.selector), ("sampler", self.sampler), ("model", self.__classifier)]
def __auto_vectorizing(self, X: pd.DataFrame, train_on: bool = True) -> pd.DataFrame:
""" detects string columns, creates a vectorizer for each, and vectorizes them """
if train_on:
X = X.convert_dtypes()
string_columns = list(X.select_dtypes(include="string").columns)
self._string_columns = string_columns
self.vectorizer_dict = dict(zip(self._string_columns, [copy.deepcopy(self.vectorizer) for i in range(len(string_columns))]))
for col in self._string_columns:
X = pd.concat([X, self.vectorizer_dict[col].vectorize(X[col], train_on=train_on)], axis=1)
X_vec = X.drop(columns=self._string_columns)
return X_vec
def __data_prepare(self, X: pd.DataFrame, y: pd.Series, train_on: bool = True) -> tuple[pd.DataFrame, pd.Series]:
""" runs data class objects on data to prepare them for the model """
if self.vectorizer is not None:
X = self.__auto_vectorizing(X, train_on=train_on)
if self.scaler is not None:
X = self.scaler.scale(X, train_on=train_on)
if self.selector is not None:
X = self.selector.select(X, y, train_on=train_on)
if self.sampler is not None and train_on:
X, y = self.sampler.sample(X, y)
self._data_classes_trained = True
return X, y
def fit(self, x_train: pd.DataFrame, y_train: pd.Series, **kwargs):
x_train_pre, y_train_pre = self.__data_prepare(x_train, y_train, train_on=True)
self.feature_names = list(x_train_pre.columns)
return super().fit(x_train_pre, y_train_pre, **kwargs)
def fit_warm_start(self, x_train: pd.DataFrame, y_train: pd.Series, **kwargs):
x_train_pre, y_train_pre = self.__data_prepare(x_train, y_train, train_on = not self._data_classes_trained)
self.feature_names = list(x_train_pre.columns)
return super().fit(x_train_pre, y_train_pre, **kwargs)
def predict(self, x_test: pd.DataFrame) -> list:
x_test_pre, _ = self.__data_prepare(x_test, None, train_on=False)
return super().predict(x_test_pre)
def get_params(self, deep: bool = True) -> dict[str, any]:
return dict(self.steps) | PypiClean |
/shotstack-sdk-0.2.6.tar.gz/shotstack-sdk-0.2.6/shotstack_sdk/model/skew_transformation.py | import re # noqa: F401
import sys # noqa: F401
from shotstack_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from shotstack_sdk.exceptions import ApiAttributeError
class SkewTransformation(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'x': (float,), # noqa: E501
'y': (float,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'x': 'x', # noqa: E501
'y': 'y', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""SkewTransformation - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
x (float): Skew the clip along it's x axis.. [optional] # noqa: E501
y (float): Skew the clip along it's y axis.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SkewTransformation - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
x (float): Skew the clip along it's x axis.. [optional] # noqa: E501
y (float): Skew the clip along it's y axis.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | PypiClean |
/nb_hdr_plotter-0.1.2-py3-none-any.whl/nb_hdr_plotter/hdr_manipulation.py | import datetime
from functools import reduce
from hdrh.log import HistogramLogReader
from hdrh.histogram import HdrHistogram
# CONSTANTS
# 'values' stored in histograms are in ns, we need ms
VALUE_FACTOR = 1.0e6
# Loader
def loadHdrSlices(filename):
slices = []
baseHistogram = None
lReader = HistogramLogReader(filename, baseHistogram)
while True:
tSlice = lReader.get_next_interval_histogram()
if tSlice is None:
break
else:
slices.append(tSlice)
#
return slices
# Single-slice functions
def sliceStartTimestamp(slice):
return slice.start_time_stamp_msec
def sliceEndTimestamp(slice):
return slice.end_time_stamp_msec
def sliceMaxValue(slice, rawFlag):
if rawFlag:
return slice.max_value
else:
return slice.max_value / VALUE_FACTOR
def sliceMinValue(slice, rawFlag):
if rawFlag:
return slice.min_value
else:
return slice.min_value / VALUE_FACTOR
def sliceValueCount(slice):
return slice.total_count
# Slice-list functions
def slicesStartTimestamp(slices):
earliestMsec = min(sliceStartTimestamp(sl) for sl in slices)
return earliestMsec
def slicesEndTimestamp(slices):
earliestMsec = max(sliceEndTimestamp(sl) for sl in slices)
return earliestMsec
def slicesMinValue(slices, rawFlag):
return min(sliceMinValue(sl, rawFlag=rawFlag) for sl in slices)
def slicesMaxValue(slices, rawFlag):
return max(sliceMaxValue(sl, rawFlag=rawFlag) for sl in slices)
def slicesCountNonempty(slices):
return sum(1 if sliceValueCount(sl) > 0 else 0 for sl in slices)
def slicesValueCount(slices):
return sum(sliceValueCount(sl) for sl in slices)
# Utilities
def timestampToDate(tstamp):
return datetime.datetime.fromtimestamp(tstamp / 1000.0)
def valueUnitName(rawFlag):
# used to properly label histo-value units returned from other functions
return "RU" if rawFlag else "ms"
def aggregateSlices(slices, sigFigures):
# in this case we always stay on the 'raw' units as we are working
# under the hood, merging histograms.
metricMax = slicesMaxValue(slices, rawFlag=True)
fullHistogram = HdrHistogram(1, int(1 + metricMax), sigFigures)
reduce(lambda _, b: fullHistogram.add(b), slices, None)
return fullHistogram
# Histogram functions
def histogramGetValueAtPercentile(histogram, percentile, rawFlag):
if rawFlag:
return histogram.get_value_at_percentile(percentile)
else:
# in the histogram we have ns, we want to make them into ms
return histogram.get_value_at_percentile(percentile) / VALUE_FACTOR
# Extraction for plots
def normalizedDistribution(histogram, x_incr, max_percentile, rawFlag):
vf = 1.0 if rawFlag else VALUE_FACTOR
# NOTE: x_incr is expected to be passed in ms if not rawFlag
# in any case here this is made into the 'raw' unit as found in the histo:
x_incr_rw = x_incr * vf
if sliceValueCount(histogram) > 0:
cursor = histogram.get_linear_iterator(value_units_per_bucket=x_incr_rw)
xs0, ys0 = zip(
*(
(
0.5 * (step.value_iterated_from + step.value_iterated_to),
step.count_added_in_this_iter_step,
)
for step in cursor
if step.percentile <= max_percentile
)
)
#
xs = [x / vf for x in xs0]
# integral must be == 1 for ease of comparisons:
ys = [y / (histogram.total_count * x_incr) for y in ys0]
#
return xs, ys
else:
return [], [] | PypiClean |
/dataone.util-3.5.2-py3-none-any.whl/d1_util/xml_apply_xslt.py |
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Apply XSLT transform to XML document.
This is an example on how to use the DataONE Science Metadata library for Python. It
shows how to:
- Deserialize, process and serialize XML docs.
- Apply an XSLT stransform.
- Display or save the resulting XML doc.
"""
import argparse
import logging
import d1_scimeta.util
import d1_client.command_line
log = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("xslt_path", help="Path to XSLT file to apply")
parser.add_argument("xml_path", help="Path to XML file to process")
parser.add_argument("--update", action="store_true", help="Update the XML file")
parser.add_argument("--debug", action="store_true", help="Debug level logging")
args = parser.parse_args()
d1_client.command_line.log_setup(is_debug=args.debug)
xml_tree = d1_scimeta.util.load_xml_file_to_tree(args.xml_path)
proc_xml_tree = d1_scimeta.util.apply_xslt_transform(xml_tree, args.xslt_path)
d1_scimeta.util.dump_pretty_tree(
proc_xml_tree, "Result of XSLT processing", log.info
)
if args.update:
d1_scimeta.util.save_tree_to_file(proc_xml_tree, args.xml_path)
def _log(msg, indent=0, log_=log.info, extra_indent=False, extra_line=False):
if extra_line:
log_("")
log_("{}{}".format(" " * (indent + (1 if extra_indent else 0)), msg))
class ResolveError(Exception):
pass
if __name__ == "__main__":
main() | PypiClean |
/bmw-lobster-monolithic-0.9.12.tar.gz/bmw-lobster-monolithic-0.9.12/lobster/tools/json/json.py |
import sys
import os.path
import json
from pprint import pprint
from lobster.tool import LOBSTER_Per_File_Tool
from lobster.items import Tracing_Tag, Activity
from lobster.location import File_Reference
class Malformed_Input(Exception):
def __init__(self, msg, data):
super().__init__(msg)
self.msg = msg
self.data = data
def get_item(root, path, required):
assert isinstance(path, str)
assert isinstance(required, bool)
if path == "":
return root
if "." in path:
field, tail = path.split(".", 1)
else:
field = path
tail = ""
if isinstance(root, dict):
if field in root:
return get_item(root[field], tail, required)
elif required:
raise Malformed_Input("object does not contain %s" % field,
root)
else:
return None
elif required:
raise Malformed_Input("not an object", root)
else:
return None
def syn_test_name(file_name):
components = []
head = os.path.dirname(file_name)
while True:
head, tail = os.path.split(head)
components = [tail] + components
if not head:
break
components.append(os.path.basename(file_name).replace(".json", ""))
components = [item
for item in components
if item and item != "."]
return ".".join(components)
class LOBSTER_Json(LOBSTER_Per_File_Tool):
def __init__(self):
super().__init__(
name = "json",
description = "Extract tracing data from JSON files.",
extensions = ["json"],
official = True)
self.add_argument("--test-list",
default = "",
help = ("Member name indicator resulting in a"
" list containing objects carrying test"
" data."))
self.add_argument("--name-attribute",
default = None,
help = "Member name indicator for test name.")
self.add_argument("--tag-attribute",
default = None,
required = True,
help = ("Member name indicator for test "
" tracing tags."))
self.add_argument("--justification-attribute",
default = None,
help = ("Member name indicator for "
" justifications."))
def process_tool_options(self, options, work_list):
self.schema = Activity
return True
@classmethod
def process(cls, options, file_name):
with open(file_name, "r", encoding="UTF-8") as fd:
data = json.load(fd)
# First we follow the test-list items to get the actual data
# we're interested in.
try:
data = get_item(root = data,
path = options.test_list,
required = True)
except Malformed_Input as err:
pprint(err.data)
print("%s: malformed input: %s" % (file_name, err.msg))
return False, []
# Ensure we actually have a list now
if not isinstance(data, list):
if options.test_list:
pprint(data)
print("%s: item described by %s is not a list" %
(file_name, options.test_list))
else:
print("%s: top-level item is not a list. use --test-list "
"to select a suitable list" % file_name)
return False, []
# Convert individual items
items = []
ok = True
for item_id, item in enumerate(data, 1):
try:
if options.name_attribute:
item_name = get_item(root = item,
path = options.name_attribute,
required = True)
else:
item_name = "%s.%u" % (syn_test_name(file_name),
item_id)
if not isinstance(item_name, str):
raise Malformed_Input("name is not a string",
item_name)
item_tags = get_item(root = item,
path = options.tag_attribute,
required = False)
if isinstance(item_tags, list):
pass
elif isinstance(item_tags, str):
item_tags = [item_tags]
elif item_tags is None:
item_tags = []
else:
raise Malformed_Input("tags are not a string or list",
item_name)
if options.justification_attribute:
item_just = get_item(
root = item,
path = options.justification_attribute,
required = False)
else:
item_just = []
if isinstance(item_just, list):
pass
elif isinstance(item_just, str):
item_just = [item_just]
elif item_just is None:
item_just = []
else:
raise Malformed_Input("justification is not a string"
" or list",
item_just)
l_item = Activity(
tag = Tracing_Tag(namespace = "json",
tag = item_name),
location = File_Reference(file_name),
framework = "JSON",
kind = "Test Vector")
for tag in item_tags:
l_item.add_tracing_target(
Tracing_Tag(namespace = "req",
tag = tag))
for just_up in item_just:
l_item.just_up.append(just_up)
items.append(l_item)
except Malformed_Input as err:
pprint(err.data)
print("%s: malformed input: %s" % (file_name, err.msg))
ok = False
return ok, items
def main():
tool = LOBSTER_Json()
return tool.execute()
if __name__ == "__main__":
sys.exit(main()) | PypiClean |
/rpi_ws281x-5.0.0.tar.gz/rpi_ws281x-5.0.0/README.rst | rpi\_ws281x
===========
Userspace Raspberry Pi library for controlling WS281X LEDs. This
includes WS2812 and SK6812RGB RGB LEDs Preliminary support is now
included for SK6812RGBW LEDs (yes, RGB + W) The LEDs can be controlled
by either the PWM (2 independent channels) or PCM controller (1 channel)
or the SPI interface (1 channel).
Background:
-----------
The BCM2835 in the Raspberry Pi has both a PWM and a PCM module that are
well suited to driving individually controllable WS281X LEDs. Using the
DMA, PWM or PCM FIFO, and serial mode in the PWM, it's possible to
control almost any number of WS281X LEDs in a chain connected to the
appropriate output pin. For SPI the Raspbian spidev driver is used
(``/dev/spidev0.0``). This library and test program set the clock rate
to 3X the desired output frequency and creates a bit pattern in RAM from
an array of colors where each bit is represented by 3 bits as follows.
::
Bit 1 - 1 1 0
Bit 0 - 1 0 0
GPIO Usage:
-----------
The GPIOs that can be used are limited by the hardware of the Pi and
will vary based on the method used to drive them (PWM, PCM or SPI).
Beware that the GPIO numbers are not the same as the physical pin
numbers on the header.
PWM:
::
PWM0, which can be set to use GPIOs 12, 18, 40, and 52.
Only 12 (pin 32) and 18 (pin 12) are available on the B+/2B/3B
PWM1 which can be set to use GPIOs 13, 19, 41, 45 and 53.
Only 13 is available on the B+/2B/PiZero/3B, on pin 33
PCM:
::
PCM_DOUT, which can be set to use GPIOs 21 and 31.
Only 21 is available on the B+/2B/PiZero/3B, on pin 40.
SPI:
::
SPI0-MOSI is available on GPIOs 10 and 38.
Only GPIO 10 is available on all models.
See also note for RPi 3 below.
Power and voltage requirements
------------------------------
WS281X LEDs are generally driven at 5V. Depending on your actual LED
model and data line length you might be able to successfully drive the
data input with 3.3V. However in the general case you probably want to
use a level shifter to convert from the Raspberry Pi GPIO/PWM to 5V.
It is also possible to run the LEDs from a 3.3V - 3.6V power source, and
connect the GPIO directly at a cost of brightness, but this isn't
recommended.
The test program is designed to drive a 8x8 grid of LEDs e.g.from
Adafruit (http://www.adafruit.com/products/1487) or Pimoroni
(https://shop.pimoroni.com/products/unicorn-hat). Please see the
Adafruit and Pimoroni websites for more information.
Know what you're doing with the hardware and electricity. I take no
reponsibility for damage, harm, or mistakes.
Important warning about DMA channels
------------------------------------
You must make sure that the DMA channel you choose to use for the LEDs
is not `already in
use <https://www.raspberrypi.org/forums/viewtopic.php?p=609380#p609380>`__
by the operating system.
For example, **using DMA channel 5 will cause filesystem corruption**
on the Raspberry Pi 3 Model B.
See: https://github.com/jgarff/rpi_ws281x/issues/224
The default DMA channel (10) should be safe for the Raspberry Pi 3 Model
B, but this may change in future software releases.
Limitations:
------------
PWM
~~~
Since this library and the onboard Raspberry Pi audio both use the PWM,
they cannot be used together. You will need to blacklist the Broadcom
audio kernel module by creating a file
``/etc/modprobe.d/snd-blacklist.conf`` with
::
blacklist snd_bcm2835
If the audio device is still loading after blacklisting, you may also
need to comment it out in the /etc/modules file.
On headless systems you may also need to force audio through hdmi Edit
config.txt and add:
::
hdmi_force_hotplug=1
hdmi_force_edid_audio=1
A reboot is required for this change to take effect
Some distributions use audio by default, even if nothing is being
played. If audio is needed, you can use a USB audio device instead.
PCM
~~~
When using PCM you cannot use digital audio devices which use I2S since
I2S uses the PCM hardware, but you can use analog audio.
SPI
~~~
When using SPI the ledstring is the only device which can be connected
to the SPI bus. Both digital (I2S/PCM) and analog (PWM) audio can be
used.
Many distributions have a maximum SPI transfer of 4096 bytes. This can
be changed in ``/boot/cmdline.txt`` by appending
::
spidev.bufsiz=32768
On a RPi 3 you have to change the GPU core frequency to 250 MHz,
otherwise the SPI clock has the wrong frequency. Do this by adding the
following line to /boot/config.txt and reboot.
::
core_freq=250
On a RPi 4 its dynamic frequency clocking has to be disabled, since it will
desync the SPI clock. Do this by adding this line to
``/boot/config.txt``. (``core_freq`` does not have to be changed, since
the default value of 500MHz is SPI compatible)
::
core_freq_min=500
SPI requires you to be in the ``gpio`` group if you wish to control your
LEDs without root.
Comparison PWM/PCM/SPI
----------------------
Both PWM and PCM use DMA transfer to output the control signal for the
LEDs. The max size of a DMA transfer is 65536 bytes. Since each LED
needs 12 bytes (4 colors, 8 symbols per color, 3 bits per symbol) this
means you can control approximately 5400 LEDs for a single strand in PCM
and 2700 LEDs per string for PWM (Only PWM can control 2 independent
strings simultaneously) SPI uses the SPI device driver in the kernel.
For transfers larger than 96 bytes the kernel driver also uses DMA. Of
course there are practical limits on power and signal quality. These
will be more constraining in practice than the theoretical limits above.
When controlling a LED string of 240 LEDs the CPU load on the original
Pi 2 (BCM2836) are: PWM 5% PCM 5% SPI 1%
| PypiClean |
/fake_bpy_module_2.80-20230117-py3-none-any.whl/bl_ui/space_properties.py | import sys
import typing
import bpy_types
GenericType = typing.TypeVar("GenericType")
class PROPERTIES_HT_header(bpy_types.Header, bpy_types._GenericUI):
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PROPERTIES_PT_navigation_bar(bpy_types.Panel, bpy_types._GenericUI):
bl_label = None
''' '''
bl_options = None
''' '''
bl_region_type = None
''' '''
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass | PypiClean |
/multi_emotion_recognition-0.1.2.tar.gz/multi_emotion_recognition-0.1.2/src/utils/utils.py | import json
import os
import pickle
import random
from collections import Counter
import emotlib
import numpy as np
from tqdm import tqdm
from src.utils.data import dataset_info, data_keys
import pandas as pd
def nrc_hashtag_lexicon(nrc_lexicon):
hashtag_vocab = []
for nl in nrc_lexicon:
hashtag_vocab.append(nl.split('\t')[1].replace("#", ""))
return hashtag_vocab
def spanish_hashtag_lexicon(emo_lexicon):
hashtag_lexicon = []
for el in emo_lexicon[1:]:
hashtag_lexicon.append(el.split('\t')[0])
return hashtag_lexicon
def get_hashtag_inputs(tokens, hashtag_dict):
hashtag_inputs = []
cur_word = []
tid = 0
while tid < len(tokens):
if tokens[tid] in ['[PAD]', '[SEP]']:
hashtag_inputs.extend([0] * (len(tokens) - len(hashtag_inputs)))
break
if tid < len(tokens) and tokens[tid].startswith('##'):
while tid < len(tokens) and tokens[tid].startswith('##'):
cur_word.append(tokens[tid][2:])
tid += 1
# let tid point to the last token of the word
tid -= 1
else:
cur_word = [tokens[tid]]
if ''.join(cur_word) in hashtag_dict:
hashtag_id = hashtag_dict[''.join(cur_word)]
# the hashtags of word: #, xx, ##xx, ##xx are all 1
if 0 < (tid - len(cur_word)) < len(tokens) and tokens[tid - len(cur_word)] == "#":
hashtag_inputs[-1] = hashtag_id
hashtag_inputs.extend([hashtag_id] * len(cur_word))
else:
hashtag_inputs.extend([0] * len(cur_word))
tid += 1
return hashtag_inputs
def update_dataset_dict(
idx, dataset_dict, input_ids, hashtag_inputs, max_length, tokenizer, text, token_offsets, label=None):
input_ids = [tokenizer.cls_token_id] + input_ids + [tokenizer.sep_token_id]
hashtag_inputs = [0] + hashtag_inputs + [0]
num_tokens = len(input_ids)
truncated_pos = token_offsets[-1][1] if text else 0
num_pad_tokens = max_length - num_tokens
assert num_pad_tokens >= 0
input_ids += [tokenizer.pad_token_id] * num_pad_tokens
attention_mask = [1] * num_tokens + [0] * num_pad_tokens
hashtag_inputs += [0] * num_pad_tokens
if token_offsets is not None:
token_offsets = [(0, 0)] + token_offsets + [(0, 0)]
token_offsets += [(0, 0)] * num_pad_tokens
dataset_dict['item_idx'].append(idx)
dataset_dict['input_ids'].append(input_ids)
dataset_dict['hashtag_ids'].append(hashtag_inputs)
dataset_dict['attention_mask'].append(attention_mask)
if label is not None:
dataset_dict['label'].append(label)
dataset_dict['offsets'].append(token_offsets)
dataset_dict['truncated_texts'].append(text[: truncated_pos])
return dataset_dict
def preprocess_dataset(text):
text = text.replace("\\n", " ")
text = text.lower()
text = emotlib.demojify(text)
return text
def transform_data(tokenizer, hashtag_dict, text, max_length):
encode_dict = tokenizer(text, add_special_tokens=False, return_offsets_mapping=True)
raw_tokens = encode_dict.encodings[0].tokens
hashtag_inputs = get_hashtag_inputs(raw_tokens, hashtag_dict)
# num_tokens = encode_dict['attention_mask'].count(1)
input_ids = encode_dict['input_ids']
offset_mapping = encode_dict['offset_mapping']
# if num_tokens > actual_max_length:
# actual_max_length = num_tokens
input_length = min(len(input_ids), max_length - 2)
input_ids = input_ids[:input_length]
offset_mapping = offset_mapping[:input_length]
hashtag_inputs = hashtag_inputs[:input_length]
return input_ids, hashtag_inputs, offset_mapping
def generate_output_file(dataset, tokenizer, input_ids, probabilities, output_dir=None, targets=None):
texts = []
for i in range(len(input_ids)):
text = tokenizer.decode(input_ids[i], skip_special_tokens=True)
texts.append(text)
labels = [','.join([dataset_info[dataset]['classes'][i] for i, pp in enumerate(p) if pp > 0.5]) for p in probabilities]
labels = ['neutral' if len(l) == 0 else l for l in labels]
probs = [json.dumps([{dataset_info[dataset]['classes'][i]: pp.item()} for i, pp in enumerate(p)]) for p in probabilities]
results = [{'text': t, 'pred_label': l, 'probability': p} for t, l, p in zip(texts, labels, probs)]
result_df = pd.DataFrame({'text': texts, 'prediction': labels, 'prob': probs})
if targets is not None:
targets = [','.join([dataset_info[dataset]['classes'][i] for i, tt in enumerate(t) if tt != 0]) for t in
targets]
result_df['target'] = targets
if output_dir:
result_df.to_csv(os.path.join(output_dir, 'result.csv'), index=False)
return results
def stratified_sampling(data, num_samples):
num_instances = len(data)
assert num_samples < num_instances
counter_dict = Counter(data)
unique_vals = list(counter_dict.keys())
val_counts = list(counter_dict.values())
num_unique_vals = len(unique_vals)
assert num_unique_vals > 1
num_stratified_samples = [int(c*num_samples/num_instances) for c in val_counts]
assert sum(num_stratified_samples) <= num_samples
if sum(num_stratified_samples) < num_samples:
delta = num_samples - sum(num_stratified_samples)
delta_samples = np.random.choice(range(num_unique_vals), replace=True, size=delta)
for val in delta_samples:
num_stratified_samples[unique_vals.index(val)] += 1
assert sum(num_stratified_samples) == num_samples
sampled_indices = []
for i, val in enumerate(unique_vals):
candidates = np.where(data == val)[0]
sampled_indices += list(np.random.choice(candidates, replace=False, size=num_stratified_samples[i]))
random.shuffle(sampled_indices)
return sampled_indices
def sample_dataset(data_path, dataset_dict, split, num_samples, seed):
sampled_split_filename = f'{split}_split_{num_samples}_{seed}.pkl'
if os.path.exists(os.path.join(data_path, sampled_split_filename)):
with open(os.path.join(data_path, sampled_split_filename), 'rb') as f:
sampled_split = pickle.load(f)
else:
sampled_split = stratified_sampling(dataset_dict['label'], num_samples)
with open(os.path.join(data_path, sampled_split_filename), 'wb') as f:
pickle.dump(sampled_split, f)
for key in data_keys:
dataset_dict[key] = sampled_split if key == 'item_idx' else [dataset_dict[key][i] for i in sampled_split]
return dataset_dict
def save_datadict(data_path, dataset_dict, split, num_samples, seed):
for key in tqdm(data_keys, desc=f'Saving {split} dataset'):
if key in dataset_dict:
filename = f'{key}.pkl' if num_samples is None else f'{key}_{num_samples}_{seed}.pkl'
with open(os.path.join(data_path, filename), 'wb') as f:
pickle.dump(dataset_dict[key], f) | PypiClean |
/winevtrc-20220106.tar.gz/winevtrc-20220106/docs/sources/eventlog-providers/Provider-Application-Hang.md | ## Application Hang
Seen on:
* Windows 2003
* Windows XP 32-bit
* Windows XP 64-bit
<table border="1" class="docutils">
<tbody>
<tr>
<td><b>Log source(s):</b></td>
<td>Application Hang</td>
</tr>
<tr>
<td><b>Log type:</b></td>
<td>Application</td>
</tr>
<tr>
<td><b>Event message file(s):</b></td>
<td>%systemroot%\system32\faultrep.dll</td>
</tr>
</tbody>
</table>
Seen on:
* Windows 10 (1511, 1607, 1703, 1709, 1803, 1809, 1903, 1909, 2004, 20H2)
* Windows 11 (21H2)
* Windows 2008
* Windows 2012
* Windows 7
* Windows 8.0
* Windows 8.1
* Windows Vista
<table border="1" class="docutils">
<tbody>
<tr>
<td><b>Log source(s):</b></td>
<td>Application Hang</td>
</tr>
<tr>
<td><b>Log type:</b></td>
<td>Application</td>
</tr>
<tr>
<td><b>Event message file(s):</b></td>
<td>%systemroot%\system32\wersvc.dll</td>
</tr>
</tbody>
</table>
| PypiClean |
/cheat-zh-0.0.1.tar.gz/cheat-zh-0.0.1/cheat/sheets.py | import os
from cheat import cheatsheets
from cheat.utils import die
def default_path():
""" Returns the default cheatsheet path """
# determine the default cheatsheet dir
default_sheets_dir = os.environ.get('DEFAULT_CHEAT_DIR') or os.path.join('~', '.cheat')
default_sheets_dir = os.path.expanduser(os.path.expandvars(default_sheets_dir))
# create the DEFAULT_CHEAT_DIR if it does not exist
if not os.path.isdir(default_sheets_dir):
try:
# @kludge: unclear on why this is necessary
os.umask(0000)
os.mkdir(default_sheets_dir)
except OSError:
die('无法创建缺省目录DEFAULT_CHEAT_DIR')
# assert that the DEFAULT_CHEAT_DIR is readable and writable
if not os.access(default_sheets_dir, os.R_OK):
die('缺省目录DEFAULT_CHEAT_DIR (' + default_sheets_dir +') 不可读.')
if not os.access(default_sheets_dir, os.W_OK):
die('缺省目录DEFAULT_CHEAT_DIR (' + default_sheets_dir +') 不可写.')
# return the default dir
return default_sheets_dir
def get():
""" Assembles a dictionary of cheatsheets as name => file-path """
cheats = {}
# otherwise, scan the filesystem
for cheat_dir in reversed(paths()):
cheats.update(
dict([
(cheat, os.path.join(cheat_dir, cheat))
for cheat in os.listdir(cheat_dir)
if not cheat.startswith('.')
and not cheat.startswith('__')
])
)
return cheats
def paths():
""" Assembles a list of directories containing cheatsheets """
sheet_paths = [
default_path(),
cheatsheets.sheets_dir()[0],
]
# merge the CHEATPATH paths into the sheet_paths
if 'CHEATPATH' in os.environ and os.environ['CHEATPATH']:
for path in os.environ['CHEATPATH'].split(os.pathsep):
if os.path.isdir(path):
sheet_paths.append(path)
if not sheet_paths:
die('缺省目录DEFAULT_CHEAT_DIR不存在 或 CHEATPATH 未配置.')
return sheet_paths
def list():
""" Lists the available cheatsheets """
sheet_list = ''
pad_length = max([len(x) for x in get().keys()]) + 4
for sheet in sorted(get().items()):
sheet_list += sheet[0].ljust(pad_length) + sheet[1] + "\n"
return sheet_list
def search(term):
""" Searches all cheatsheets for the specified term """
result = ''
for cheatsheet in sorted(get().items()):
match = ''
for line in open(cheatsheet[1]):
if term in line:
match += ' ' + line
if match != '':
result += cheatsheet[0] + ":\n" + match + "\n"
return result | PypiClean |
/chaostoolkit-1.15.1.tar.gz/chaostoolkit-1.15.1/CHANGELOG.md | # Changelog
## [Unreleased][]
[Unreleased]: https://github.com/chaostoolkit/chaostoolkit/compare/1.15.1...HEAD
## [1.15.1][] - 2023-04-03
[1.15.1]: https://github.com/chaostoolkit/chaostoolkit/compare/1.15.0...1.15.1
### Fixed
- As we now depend on recent versions of `importlib_metadata` we need to
abide by its breaking compability changes. Specially at the bottom of the
section [here](https://docs.python.org/3/library/importlib.metadata.html#entry-points).
Fixes [#279][279]
[279]: https://github.com/chaostoolkit/chaostoolkit/issues/279
## [1.15.0][] - 2023-01-29
[1.15.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.14.0...1.15.0
### Added
- Support to define the runtime strategies for hypothesis and rollbacks from
within the experiment itself. Usng the following block:
```json
{
"runtime": {
"hypoethesis": {
"strategy": "after-method-only"
},
"rollbacks": {
"strategy": "always"
}
}
}
```
Only one of the two may be set. In all cases, these can be overriden by
the CLI corresponding flags.
## [1.14.0][] - 2023-01-27
[1.14.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.13.0...1.14.0
- Addressed issue# 272 - removed vulnerabilities on container image built using Dockerfile-full
- Addressed issue# 272 - removed vulnerabilities on container image built using Dockerfile-basic
- Add support for control-file arguments to the the `chaos run` command
## [1.13.0][] - 2022-09-29
[1.13.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.12.0...1.13.0
### Changed
- Updated dependencies
- Upgraded GitHub actions
## [1.12.0][] - 2022-02-05
[1.12.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.11.0...1.12.0
### Added
* Full and basic container images abased of Debians bullseye
* `--log-file-level`: a new root level flag to set the log level of the
`chaostoolkit.log` file
## [1.11.0][] - 2022-01-05
[1.11.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.10.1...1.11.0
### Changed
* Bump to Python 3.7 as a baseline since Python 3.6 is EOL
### Added
* Add Python 3.10 support
## [1.10.1][] - 2021-10-27
[1.10.1]: https://github.com/chaostoolkit/chaostoolkit/compare/1.10.0...1.10.1
### Changed
* Upgraded to python-json-logger 2.0+
* Fixed json logger format as per [#251][251]
[251]: https://github.com/chaostoolkit/chaostoolkit/issues/251
## [1.10.0][] - 2021-10-04
[1.10.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.9.6...1.10.0
### Changed
* Updated to chaostoolkit-lib 1.22+ to support new `dry` flag
* Updated release workflow to remove duplicate `needs` properties
### Added
* Added `.github/workflows/check_pr.yaml` which checks if a PR has modified the
CHANGELOG.md and if it changed/added tests
* Moved `pip install --upgrade pip setuptools wheel` step from `install-dev` to
`install` in the Makefile
* Updated `.github/workflows/check_pr.yaml` to check that both `CHANGELOG.md`
and `chaostoolkit/__init__.py` get updated in line with a new version
* Updated `Makefile` to specify `python3` instead of `python`
* Added dry run options:
* `--dry=activities` runs all activities as dry run. (same as the old flag)
* `--dry=actions` runs all actions as dry run.
* `--dry=probes` runs all probes as dry run.
* `--dry=pause` runs all activities with no pausing.
## [1.9.6][] - 2021-08-26
[1.9.6]: https://github.com/chaostoolkit/chaostoolkit/compare/1.9.5...1.9.6
### Fixed
* Actually update the version of the lib on release
## [1.9.5][] - 2021-08-26
[1.9.5]: https://github.com/chaostoolkit/chaostoolkit/compare/1.9.4...1.9.5
### Fixed
* Fixed version extracting in `setup.py`
## [1.9.4][] - 2021-08-26
[1.9.4]: https://github.com/chaostoolkit/chaostoolkit/compare/1.9.3...1.9.4
### Changed
* Bump version of `chaostoolkit-lib` to `~=1.21`
* Switched from pycodestyle/pylama to `black`, `flake8`, `isort`
* Update CI builds to build, lint, and test
* Applied `black`, `flake8`, and `isort` across the codebase
* Ran `pyupgrade --py36-plus`
## [1.9.3][] - 2021-08-24
[1.9.3]: https://github.com/chaostoolkit/chaostoolkit/compare/1.9.2...1.9.3
### Changed
* Fixed typo in `chaos init` prompt from `reognised` to `recognised`
* Changed `--hypothesis-strategy` method `continously` to `continuously`
* Changed `Schedule` parameter from `continous_hypothesis_frequency` to
`continuous_hypothesis_frequency`
* Changed other minor typos
## [1.9.2][] - 2021-08-16
[1.9.2]: https://github.com/chaostoolkit/chaostoolkit/compare/1.9.1...1.9.2
### Added
* Add `.github/workflows/close_stale_issues.yaml` to mark Issues stale after `365` days
Also closes them after `7` days of being `Stale`
### Changed
* Dockerfile now requires `--build-arg ctkversion=<version>` when building
* `.github/workflows/release.yaml` now uses a retry step for Docker builds to ensure we don't
lose a race condition between PyPi and our build step
## [1.9.1][] - 2021-05-31
[1.9.1]: https://github.com/chaostoolkit/chaostoolkit/compare/1.9.0...1.9.1
### Changed
- Fixed `--var` and `--var-file` arguments parsing by addition of missing
return into `validate_vars` function
## [1.9.0][] - 2021-02-18
[1.9.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.8.1...1.9.0
### Changed
- Remove Python 3.5 support. [Requires 3.6][208] at least now until end of 2021
when [Python 3.6 reaches EOL][pyeol].
[pyeol]: https://devguide.python.org/#status-of-python-branches
[208]: https://github.com/chaostoolkit/chaostoolkit/issues/208
## [1.8.1][] - 2021-02-17
[1.8.1]: https://github.com/chaostoolkit/chaostoolkit/compare/1.8.0...1.8.1
**LAST VERSION SUPPORTING PYTHON 3.5**
### Changed
- Updated build scripts to support latest setuptools to build package properly
## [1.8.0][] - 2021-02-17
[1.8.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.7.1...1.8.0
### Changed
- Updated pyyaml to due to CVE-2020-14343 [#206][206]
- Pinning Chaos Toolkit lib to strictly less than 1.19.0 as it'll be the last
one supporting Python 3.5 [#194][194]
- Moved to setup.cfg to manage build
[206]: https://github.com/chaostoolkit/chaostoolkit/issues/206
[194]: https://github.com/chaostoolkit/chaostoolkit/issues/194
## [1.7.1][] - 2020-11-03
[1.7.1]: https://github.com/chaostoolkit/chaostoolkit/compare/1.7.0...1.7.1
### Changed
- Pass extra variables down to the runner for substitution
[chaostoolkit-lib#192][ctklib192]
[ctklib192]: https://github.com/chaostoolkit/chaostoolkit-lib/issues/192
## [1.7.0][] - 2020-09-07
[1.7.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.6.0...1.7.0
### Added
* Add the `--hypothesis-strategy` flag to the `run` command. It defines how the
steady-state hypothesis is applied. One of:
* `default` is the classic mode where the hypothesis is run before and after
the method
* `before-method-only` runs the hypothesis once only before the method
* `after-method-only` runs the hypothesis once only after the method. This is
useful when you know your environment is not in the appropriate state
before the conditions are applied
* `during-method-only` runs the hypothesis repeatedly during the method but
not before nor after
* `continously` runs the hypothesis repeatedly during the method as well as
before and after as usual
* Add the `--hypothesis-frequency` flag to the `run` command. This flag is
only meaningful with `--hypothesis-strategy=during-method-only|continously`.
It takes a floating number indicating how many seconds to wait between two
executions of the hypothesis
* Add the `--fail-fast` flag to the `run` command. This flag is
only meaningful with `--hypothesis-strategy=during-method-only|continously`.
If set, this indicates the experiment should be marked as deviating
immediately. When not provided, the hypothesis runs until the end of the
method without terminating the experiment
### Changed
- Bump dependency on chaostoolkit-lib to 1.13.0 to support the steady state
strategy
## [1.6.0][] - 2020-08-17
[1.6.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.5.0...1.6.0
## Added
- The `--var` and `--var-file` flags to override values in the configuration,
and secrets for var files, blocks of the experiments. They take precedence
for inlined values and allow to have data files managed externally to the
experiment itself when environment variables are not an option for example.
[#175][175]
[175]: https://github.com/chaostoolkit/chaostoolkit-lib/issues/175
## [1.5.0][] - 2020-07-06
[1.5.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.4.2...1.5.0
### Added
- Commands to get/set/remove an entry from the settings as well as show
the settings file entirely [#65][65]
- Rollbacs runtime strategy flag [#176][176]
Backwars compatible default strategy to run the rollbacks. This will run
unless of a failed probe in the hypothesis or when a control interrupted
the experiment (not passing the flag is equivalent to this):
```
$ chaos run --rollback-strategy=default experiment.json
```
Always run the rollbacks:
```
$ chaos run --rollback-strategy=always experiment.json
```
Never run the rollbacks:
```
$ chaos run --rollback-strategy=never experiment.json
```
Run the rollbacks only when deviated:
```
$ chaos run --rollback-strategy=deviated experiment.json
```
[65]: https://github.com/chaostoolkit/chaostoolkit-lib/issues/65
[176]: https://github.com/chaostoolkit/chaostoolkit-lib/issues/176
## [1.4.2][] - 2020-04-29
[1.4.2]: https://github.com/chaostoolkit/chaostoolkit/compare/1.4.1...1.4.2
### Added
* New flag `--no-verify-tls` to `chaos run` and `chaos validate`commands;
it disables TLS certificate verification when source is downloaded
over a self-signed certificate endpoint.
### Changed
* Migrates CI/CD from TravisCI to Github Actions
* [Potentially breaking] Build the Docker image with a non-root user by default (rootless container).
This is a potentially breaking change if you created your own docker image
using the chaostoolkit/chaostoolkit as a base image.
* Allow validating experiments downloaded from URL: `chaos validate http://...`
## [1.4.1][] - 2020-02-20
[1.4.1]: https://github.com/chaostoolkit/chaostoolkit/compare/1.4.0...1.4.1
### Added
* Added build for Python 3.8
### Changed
* Fixed `importlib_metadata` different naming between Python 3.8 and
earlier [#162][162]
[162]: https://github.com/chaostoolkit/chaostoolkit-lib/issues/162
## [1.4.0][] - 2020-02-20
[1.4.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.3.0...1.4.0
### Added
- Add critical level color to the logger
- Add chaos init exports experiment also in yaml format
```
chaos init --experiment-path prod-experiment.yaml
```
### Changed
* Fixed Dockerfile so the right dependencies are installed at build time
* Replaced pkg_resource usage with python 3.8 backport importlib_metadata
* Bump chaostoolkit-lib dependency to 1.8.0
## [1.3.0][] - 2019-09-03
[1.3.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.2.0...1.3.0
### Added
- Load global controls before we even read the experiments so we can apply
them before and after loading the experiment.
## [1.2.0][] - 2018-04-17
[1.2.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.1.0...1.2.0
### Added
- Support for structured logging [#122][122]
[122]: https://github.com/chaostoolkit/chaostoolkit/issues/122
### Changed
- Moved loading global controls back into `run_experiment` itself
[chaostoolkit-lib#116][116]
[116]: https://github.com/chaostoolkit/chaostoolkit-lib/issues/116
## [1.1.0][] - 2018-04-17
[1.1.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.0.0...1.1.0
## Added
- Bump to Chaos Toolkit library 1.2.0
- Allow to declare and load controls from settings so they are globally
applied to all your runs [chaostoolkit-lib#99][99]
In your settings file, at `~/.chaostoolkit-lib/settings.yaml` add, for
instance:
```yaml
controls:
my-own-control:
provider:
module: mypackage.mycontrole_module
type: python
```
This will load `mypackage/mycontrole_module.py` from your `PYTHONPATH`
and use it as a [control][].
- Remove MacOSX build. Way too long for any benefits.
- Build against stable Python 3.7
- Ensure exit code is set in all cases
[control]: https://docs.chaostoolkit.org/reference/extending/create-control-extension/
[99]: https://github.com/chaostoolkit/chaostoolkit-lib/issues/99
## [1.0.0][] - 2018-02-21
[1.0.0]: https://github.com/chaostoolkit/chaostoolkit/compare/1.0.0rc4...1.0.0
## Changed
- Cleaned up package metadata
## [1.0.0rc4][] - 2018-02-21
[1.0.0rc4]: https://github.com/chaostoolkit/chaostoolkit/compare/1.0.0rc3...1.0.0rc4
## Added
- Ensure requirements-dev.txt is bundled with the package
- Bumped chaostoolkit-lib to 1.0.0
- Ensure we don't create installation problem by forcing a specific version
## [1.0.0rc3][] - 2018-01-29
[1.0.0rc3]: https://github.com/chaostoolkit/chaostoolkit/compare/1.0.0rc2...1.0.0rc3
## Changed
- Bump to chaostoolkit-lib 1.0.0rc3
## [1.0.0rc2][] - 2018-01-28
[1.0.0rc2]: https://github.com/chaostoolkit/chaostoolkit/compare/1.0.0rc1...1.0.0rc2
## Changed
- Bump to chaostoolkit-lib 1.0.0rc2
- Enable MacOSX travis build to ensure Chaos Toolkit does build there
## [1.0.0rc1][] - 2018-11-30
[1.0.0rc1]: https://github.com/chaostoolkit/chaostoolkit/compare/0.17.1...1.0.0rc1
## Changed
- Handle RC versioning when building release
- Pin dependency versions
## [0.17.1][] - 2018-11-30
[0.17.1]: https://github.com/chaostoolkit/chaostoolkit/compare/0.17.0...0.17.1
### Changed
* Remove `NoReturn` import as it is not available prior Python 3.6.5 [#90][90]
[90]: https://github.com/chaostoolkit/chaostoolkit/issues/90
## [0.17.0][] - 2018-11-29
[0.17.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.16.0...0.17.0
### Added
- add `info` command to display basic information such as version of the
toolkit core library or installed extensions. Display also the current
settings [#64][64]
[64]: https://github.com/chaostoolkit/chaostoolkit/issues/64
### Changed
- strip command name before sending it to check newer version as sometimes
we get a tabulation character in there
- swap `logger.warn` for `logger.warning` as the former is obsolete
## [0.16.0][] - 2018-09-19
[0.16.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.15.0...0.16.0
### Added
- send a `RunFlowEvent.RunDeviated` event in addition to other events when the
steady state deviated after the experimental method [#56][56]
[56]: https://github.com/chaostoolkit/chaostoolkit/issues/56
## [0.15.0][] - 2018-08-09
[0.15.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.14.0...0.15.0
## Added
- a new global flag `chaos --settings <path>` to explicitely specify the
location of the Chaos Toolkit settings file
- experiments can now also be loaded from a HTTP(s) resource (with or without
auth) as per [#53][53]
[53]: https://github.com/chaostoolkit/chaostoolkit/issues/53
## Changed
- by default, the run command will now set the exit code to 1 when the
experiment is not successful (interrupted, aborted or failed). This can be
bypassed by plugins so they have the opportunity to process the journal as
well. In that case, they must set the exit code themselves to play nicely.
## [0.14.0][] - 2018-04-27
[0.14.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.13.0...0.14.0
### Changed
- Do not notify of experiment validation when running it (too noisy)
- Encode date, datetime, decimal and UUID to JSON explicitely
## [0.13.0][] - 2018-02-20
[0.13.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.12.0...0.13.0
### Changed
- Publish events for each step of the flow
## [0.12.0][] - 2018-02-09
[0.12.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.11.0...0.12.0
### Changed
- New `chaos init` wizard instructions
## [0.11.0][] - 2018-02-08
[0.11.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.10.0...0.11.0
### Changed
- Returning journal and experiment from run and validate commands for
downstream applications
- Better guidance on init
## [0.10.0][] - 2018-02-06
[0.10.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.9.0...0.10.0
### Changed
- Create an empty experiment when no discovery was run beforehand [#27][27]
- Returns the generated experiment to external callers
- Name of the journal output from the run command is now `journal.json` rather
than `chaos-report.json` [#31][31]
- Renamed the debug log from `experiment.log` to `chaostoolkit.log` because
it is used for any commands, even when the experiment is not required
- The debug log is now appending
- The command being run is logged into the debug log
- You can bypass argument in the init command via empty string [#29][29]
- Allow to create steady-state hypothesis from init command [#28][28]
- Allow to set rollbacks from init command [#30][30]
- Pass command executed to checker for compatability [#36][36]
- Better logging of failed discovery [chaostoolkit-lib#29][29lib]
- Depending now on chaostoolkit-lib 0.14.0
[27]: https://github.com/chaostoolkit/chaostoolkit/issues/27
[28]: https://github.com/chaostoolkit/chaostoolkit/issues/28
[29]: https://github.com/chaostoolkit/chaostoolkit/issues/29
[30]: https://github.com/chaostoolkit/chaostoolkit/issues/30
[31]: https://github.com/chaostoolkit/chaostoolkit/issues/31
[36]: https://github.com/chaostoolkit/chaostoolkit/issues/36
[29lib]: https://github.com/chaostoolkit/chaostoolkit-lib/issues/29
## [0.9.0][] - 2018-01-17
[0.9.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.8.0...0.9.0
### Added
- Steady state hypothesis is not mandatory when exploring weaknesses [#18][18]
[18]: https://github.com/chaostoolkit/chaostoolkit/issues/18
## [0.8.0][] - 2018-01-16
[0.8.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.7.0...0.8.0
### Added
- New init feature [#23][23]
[23]: https://github.com/chaostoolkit/chaostoolkit/issues/23
## [0.7.0][] - 2018-01-16
[0.7.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.6.0...0.7.0
### Added
- New discovery feature
## [0.6.0][] - 2017-12-19
[0.6.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.5.0...0.6.0
### Changed
- Version check is now done server-side to remove semver dependency
## [0.5.0][] - 2017-12-17
[0.5.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.4.0...0.5.0
### Changed
- Log to file by default and added a flag to disable it
- Updated to chaostoolkit-lib 0.8.0
## [0.4.0][] - 2017-12-12
[0.4.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.3.0...0.4.0
### Added
- Added log-file flag to log the run (at DEBUG level) to a file
### Changed
- Bumped to chaostoolkit-lib 0.7.0
## [0.3.0][] - 2017-12-06
[0.3.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.2.8...0.3.0
### Changed
- Proper contact email address
- Ensuring latest spec version support from chaostoolkit-lib 0.6.0
## [0.2.8][] - 2017-11-30
[0.2.8]: https://github.com/chaostoolkit/chaostoolkit/compare/0.2.5...0.2.8
### Changed
- Minor improvements of the version check
- Triggering the documentation build on new releases so the doc is updated
with the latest tag information
## [0.2.5][] - 2017-11-23
[0.2.5]: https://github.com/chaostoolkit/chaostoolkit/compare/0.2.4...0.2.5
### Added
- Checking for newer release of the toolkit at runtime
## [0.2.4][] - 2017-10-12
[0.2.4]: https://github.com/chaostoolkit/chaostoolkit/compare/0.2.3...0.2.4
### Added
- Enable CLI extensions
- Provide a change directory argument when using the CLI
### Changed
- Proper verbose log level
## [0.2.3][] - 2017-10-07
[0.2.3]: https://github.com/chaostoolkit/chaostoolkit/compare/0.2.2...0.2.3
### Changed
- Not a universal wheel distribution
## [0.2.2][] - 2017-10-06
[0.2.2]: https://github.com/chaostoolkit/chaostoolkit/compare/0.2.1...0.2.2
### Changed
- Removed old dependencies
## [0.2.1][] - 2017-10-06
[0.2.1]: https://github.com/chaostoolkit/chaostoolkit/compare/0.2.0...0.2.1
### Changed
- Package up extra files when installed from source
## [0.2.0][] - 2017-10-06
[0.2.0]: https://github.com/chaostoolkit/chaostoolkit/compare/0.1.12...0.2.0
### Changed
- Core code now lives in a dedicated project [chaoslib][chaoslib].
chaostoolkit is now just the CLI of running experiments [#3][3]
[chaoslib]: https://github.com/chaostoolkit/chaostoolkit-lib
[3]: https://github.com/chaostoolkit/chaostoolkit/issues/3
## [0.1.12][] - 2017-10-03
[0.1.12]: https://github.com/chaostoolkit/chaostoolkit/compare/0.1.11...0.1.12
### Removed
- Documentation has moved to its own project
## [0.1.11][] - 2017-10-02
[0.1.11]: https://github.com/chaostoolkit/chaostoolkit/compare/0.1.10...0.1.11
### Added
- Ensure CNAME is set for the docs to be resolved via chaostoolkit.org
## [0.1.10][] - 2017-10-02
[0.1.9]: https://github.com/chaostoolkit/chaostoolkit/compare/0.1.9...0.1.10
### Added
- Installing dependencies along with the command
- Using a regular user to run from a Docker container
## [0.1.9][] - 2017-10-01
[0.1.9]: https://github.com/chaostoolkit/chaostoolkit/compare/0.1.8...0.1.9
### Changed
- Switched to an alpine based Docker image for smaller footprint
## [0.1.8][] - 2017-10-01
[0.1.8]: https://github.com/chaostoolkit/chaostoolkit/compare/0.1.7...0.1.8
### Changed
- Better installation docs
## [0.1.0][] - 2017-10-01
[0.1.0]: https://github.com/chaostoolkit/chaostoolkit/tree/0.1.0
### Added
- Initial release
| PypiClean |
/scml-agents-0.4.2.tar.gz/scml-agents-0.4.2/scml_agents/scml2022/oneshot/team_107/other_agents/agent_team73.py | from abc import ABC
from collections import defaultdict
from typing import List
from negmas import MechanismState, ResponseType
from negmas.outcomes import Outcome
from scml.oneshot import *
Buy = 0
Sell = 1
Offer = 0
Accept = 1
INF = 1000
__all__ = [
"SimpleAgent",
"BetterAgent",
"AdaptiveAgent",
"Gentle",
]
class SimpleAgent(OneShotAgent, ABC):
"""A greedy agent based on OneShotAgent"""
def __init__(self, owner=None, ufun=None, name=None):
super().__init__(owner, ufun, name)
self.secured = 0
def init(self):
pass
def step(self):
self.secured = 0
def on_negotiation_success(self, contract, mechanism):
self.secured += contract.agreement["quantity"]
def propose(self, negotiator_id: str, state) -> "Outcome":
return self.best_offer(negotiator_id)
def respond(self, negotiator_id, state):
offer = state.current_offer
if not offer:
return ResponseType.REJECT_OFFER
my_needs = self._needed(negotiator_id)
if my_needs <= 0:
return ResponseType.END_NEGOTIATION
return (
ResponseType.ACCEPT_OFFER
if offer[QUANTITY] <= my_needs
else ResponseType.REJECT_OFFER
)
def best_offer(self, negotiator_id):
my_needs = self._needed(negotiator_id)
if my_needs <= 0:
return None
nmi = self.get_nmi(negotiator_id)
if not nmi:
return None
quantity_issue = nmi.issues[QUANTITY]
unit_price_issue = nmi.issues[UNIT_PRICE]
offer = [-1] * 3
offer[QUANTITY] = max(
min(my_needs, quantity_issue.max_value), quantity_issue.min_value
)
offer[TIME] = self.awi.current_step
if self._is_selling(nmi):
offer[UNIT_PRICE] = unit_price_issue.max_value
else:
offer[UNIT_PRICE] = unit_price_issue.min_value
return tuple(offer)
def _needed(self, negotiator_id=None):
return (
self.awi.current_exogenous_input_quantity
+ self.awi.current_exogenous_output_quantity
- self.secured
)
def _is_selling(self, nmi):
return nmi.annotation["product"] == self.awi.my_output_product
class BetterAgent(SimpleAgent, ABC):
"""A greedy agent based on OneShotAgent with more sane strategy"""
def __init__(self, *args, concession_exponent=0.2, **kwargs):
super().__init__(*args, **kwargs)
self._e = concession_exponent
def propose(self, negotiator_id: str, state) -> "Outcome":
offer = super().propose(negotiator_id, state)
if not offer:
return None
offer = list(offer)
offer[UNIT_PRICE] = self._find_good_price(self.get_nmi(negotiator_id), state)
return tuple(offer)
def respond(self, negotiator_id, state):
offer = state.current_offer
if not offer:
return ResponseType.REJECT_OFFER
response = super().respond(negotiator_id, state)
if response != ResponseType.ACCEPT_OFFER:
return response
nmi = self.get_nmi(negotiator_id)
return (
response
if self._is_good_price(nmi, state, offer[UNIT_PRICE])
else ResponseType.REJECT_OFFER
)
def _is_good_price(self, nmi, state, price):
"""Checks if a given price is good enough at this stage"""
mn, mx = self._price_range(nmi)
th = self._th(state.step, nmi.n_steps)
# a good price is one better than the threshold
if self._is_selling(nmi):
return (price - mn) >= th * (mx - mn)
else:
return (mx - price) >= th * (mx - mn)
def _find_good_price(self, nmi, state):
"""Finds a good-enough price conceding linearly over time"""
mn, mx = self._price_range(nmi)
th = self._th(state.step, nmi.n_steps)
# offer a price that is around th of your best possible price
if self._is_selling(nmi):
return mn + th * (mx - mn)
else:
return mx - th * (mx - mn)
def _price_range(self, nmi):
"""Finds the minimum and maximum prices"""
mn = nmi.issues[UNIT_PRICE].min_value
mx = nmi.issues[UNIT_PRICE].max_value
return mn, mx
def _th(self, step, n_steps):
"""calculates a descending threshold (0 <= th <= 1)"""
return ((n_steps - step - 1) / (n_steps - 1)) ** self._e
class AdaptiveAgent(BetterAgent, ABC):
"""Considers best price offers received when making its decisions"""
def __init__(self, *args, concession_exponent=0.2, **kwargs):
super().__init__(*args, concession_exponent=concession_exponent, **kwargs)
self._best_selling, self._best_buying = 0.0, float("inf")
def init(self):
super().init()
def step(self):
super().step()
self._best_selling, self._best_buying = 0.0, float("inf")
def respond(self, negotiator_id, state):
"""Save the best price received"""
offer = state.current_offer
if not offer:
return ResponseType.REJECT_OFFER
response = super().respond(negotiator_id, state)
nmi = self.get_nmi(negotiator_id)
if self._is_selling(nmi):
self._best_selling = max(offer[UNIT_PRICE], self._best_selling)
else:
self._best_buying = min(offer[UNIT_PRICE], self._best_buying)
return response
def _price_range(self, nmi):
"""Limits the price by the best price received"""
mn, mx = super()._price_range(nmi)
if self._is_selling(nmi):
mn = max(mn, self._best_selling)
else:
mx = min(mx, self._best_buying)
return mn, mx
class Gentle(AdaptiveAgent, ABC):
better_agent = BetterAgent(concession_exponent=0.2)
def __init__(
self,
*args,
acc_price_slack=float("inf"),
step_price_slack=float("inf"),
opp_price_slack=0.0,
opp_acc_price_slack=0.2,
range_slack=0.03,
concession_exponent=0.1,
worst_opp_acc_price_slack=0.0,
first_offer_price_slack=INF,
**kwargs,
):
super().__init__(*args, **kwargs)
self._e = concession_exponent
self._best_acc_selling, self._best_acc_buying = 0.0, float("inf")
self._best_opp_selling = defaultdict(float)
self._best_opp_buying = defaultdict(lambda: float("inf"))
self._best_opp_acc_selling = defaultdict(float)
self._best_opp_acc_buying = defaultdict(lambda: float("inf"))
self._acc_price_slack = acc_price_slack
self._step_price_slack = step_price_slack
self._opp_price_slack = opp_price_slack
self._opp_acc_price_slack = opp_acc_price_slack
self._range_slack = range_slack
self.new_price_selling, self.new_price_buying = float("inf"), 0.0 # 価格変化後の交渉価格
self.new_price_slack = 0.05
self.concession_threshold = 3 # 譲歩の変化率の閾値
self.worst_opp_acc_price_slack = (
worst_opp_acc_price_slack # 相手にとって最も良い合意価格に関するSlack変数
)
self.first_offer_price_slack = (
first_offer_price_slack # 合意のない相手に対するoffer価格に関するslack変数
)
# 取引情報
self.success_list = defaultdict(lambda: list()) # 交渉成功した際の取引データ
self.success_contracts = [] # 交渉成功した契約のリスト
self.failure_opp_list = [] # 各日の交渉失敗した相手のリスト
self.opp_offer_list = defaultdict(lambda: list()) # 相手のOfferのリスト
self.my_offer_list = defaultdict(lambda: list()) # 自分のOfferのリスト
self.nego_info = {} # 交渉情報
def init(self):
"""Initialize the quantities and best prices received so far"""
super().init()
def step(self):
"""Initialize the quantities and best prices received for next step"""
super().step()
self._best_opp_selling = defaultdict(float)
self._best_opp_buying = defaultdict(lambda: float("inf"))
self.failure_opp_list = []
self._opp_price_slack = 0.0
self._opp_acc_price_slack = 0.0
self.worst_opp_acc_price_slack = 0.0
self.first_offer_price_slack = INF
def on_negotiation_start(self, negotiator_id: str, state: MechanismState) -> None:
is_selling = self._is_selling(self.get_nmi(negotiator_id))
nmi = self.get_nmi(negotiator_id)
if is_selling:
self.nego_info["my_name"] = shorten_name(
self.get_nmi(negotiator_id).annotation["seller"]
)
else:
self.nego_info["my_name"] = shorten_name(
self.get_nmi(negotiator_id).annotation["buyer"]
)
def on_negotiation_success(self, contract, mechanism):
"""Record sales/supplies secured"""
super().on_negotiation_success(contract, mechanism)
# update my current best price to use for limiting concession in other
# negotiations
up = contract.agreement["unit_price"]
if self._is_selling(mechanism):
partner = contract.annotation["buyer"]
self._best_acc_selling = max(up, self._best_acc_selling)
self._best_opp_acc_selling[partner] = max(
up, self._best_opp_acc_selling[partner]
)
else:
partner = contract.annotation["seller"]
self._best_acc_buying = min(up, self._best_acc_buying)
self._best_opp_acc_buying[partner] = min(
up, self._best_opp_acc_buying[partner]
)
# 取引データを記録
if self._is_selling(mechanism):
self.success_list[shorten_name(contract.partners[0])].append(
[
contract.agreement["quantity"],
self.awi.current_step,
contract.agreement["unit_price"],
]
)
else:
self.success_list[shorten_name(contract.partners[1])].append(
[
contract.agreement["quantity"],
self.awi.current_step,
contract.agreement["unit_price"],
]
)
self.success_contracts.append(contract)
def propose(self, negotiator_id: str, state) -> "Outcome":
self.nego_info["negotiation_step"] = state.step # 交渉ステップを記録
offer = super().propose(negotiator_id, state)
if offer is None:
return None
offer = list(offer)
offer[QUANTITY] = min(self.awi.profile.n_lines, offer[QUANTITY])
self._record_information({shorten_name(negotiator_id): offer}, True) # offerの保存
# デバッグ用
# if self.nego_info["negotiation_step"] == 19:
# print_log(["step", "proposer name"], [self.awi.current_step, self.nego_info["my_name"]])
# print_log("offer", offer)
return tuple(offer)
def respond(self, negotiator_id, state):
offer = state.current_offer
if not offer:
return ResponseType.REJECT_OFFER
# find the quantity I still need and end negotiation if I need nothing more
self.nego_info["negotiation_step"] = state.step # 交渉ステップを記録
self._record_information(
{shorten_name(negotiator_id): offer}, False
) # offerの保存
# update my current best price to use for limiting concession in other
# negotiations
nmi = self.get_nmi(negotiator_id)
up = offer[UNIT_PRICE]
if self._is_selling(nmi):
partner = nmi.annotation["buyer"]
self._best_opp_selling[partner] = max(up, self._best_selling)
else:
partner = nmi.annotation["seller"]
self._best_opp_buying[partner] = min(up, self._best_buying)
response = super().respond(negotiator_id, state)
# デバッグ用
# if self.nego_info["negotiation_step"] == 19:
# print_log(["step", "responder name", "to"], [self.awi.current_step, self.nego_info["my_name"], negotiator_id])
return response
def _find_good_price(self, nmi, state):
"""Finds a good-enough price conceding linearly over time"""
is_selling = self._is_selling(nmi)
# offer a price that is around th of your best possible price
# パラメタの設定
name = nmi.annotation["buyer"] if is_selling else nmi.annotation["seller"]
success_agreements = opponent_agreements(
nmi, is_selling, self.success_contracts
)
accept_agreements = [
_
for _ in success_agreements
if _.mechanism_state["current_proposer"] == self.nego_info["my_name"]
]
offer_agreements = [
_
for _ in success_agreements
if _.mechanism_state["current_proposer"] != self.nego_info["my_name"]
]
step = state.step
rank = opponent_rank(
list(self.active_negotiators.keys()), is_selling, self.success_contracts
)
good_price_range = self._good_price_range(nmi)
std = good_price_range["max"] if is_selling else good_price_range["min"]
# std = (good_price_range["min"] + good_price_range["max"]) / 2
# std = self.awi.trading_prices[1]
pattern = ["offer"]
if self.nego_info["negotiation_step"] >= nmi.n_steps - 1:
if self._environment_factor(nmi) >= 0.5:
pattern.append("good_env")
if success_agreements:
if accept_agreements:
pattern.append("accept_agreements")
if price_comparison(
is_selling,
worst_opp_acc_price(
nmi, is_selling, self.success_contracts
),
std,
):
pattern.append("good")
else:
pattern.append("bad")
else:
pattern.append("offer_agreements")
if list(rank.keys())[0] == name:
step = min(state.step + 1, nmi.n_steps - 1)
elif self.nego_info["negotiation_step"] >= nmi.n_steps - 1:
pattern.append("first_offer")
else:
pattern.append("") # パラメタを変更しない
else:
pattern.append("bad_env")
if success_agreements:
if accept_agreements:
pattern.append("accept_agreements")
if price_comparison(
is_selling,
worst_opp_acc_price(
nmi, is_selling, self.success_contracts
),
std,
):
pattern.append("good")
else:
pattern.append("bad")
else:
pattern.append("offer_agreements")
if list(rank.keys())[0] == name:
step = min(state.step + 1, nmi.n_steps - 1)
elif self.nego_info["negotiation_step"] >= nmi.n_steps - 1:
pattern.append("first_offer")
else:
pattern.append("") # パラメタを変更しない
self._set_param(pattern)
mn, mx = self._price_range(nmi)
th = self._th(step, nmi.n_steps)
if is_selling:
return mn + th * (mx - mn)
else:
return mx - th * (mx - mn)
def _is_good_price(self, nmi, state, price):
"""Checks if a given price is good enough at this stage"""
is_selling = self._is_selling(nmi)
# 相手の譲歩率に応じて判断
name = nmi.annotation["buyer"] if is_selling else nmi.annotation["seller"]
success_agreements = opponent_agreements(
nmi, is_selling, self.success_contracts
)
pattern = ["accept"]
if self.nego_info["negotiation_step"] >= nmi.n_steps - 1:
# 譲歩率の変化
if self._opp_concession_rate_change(name):
pattern.append("concession")
else:
if success_agreements:
pattern.append("persist")
else:
up = nmi.issues[UNIT_PRICE]
if is_selling:
return price >= (up.min_value + up.max_value) / 2
else:
return price <= (up.min_value + up.max_value) / 2
# env-factor
if self._environment_factor(nmi) > 0.5:
pattern.append("good_env")
else:
pattern.append("bad_env")
self._set_param(pattern)
mn, mx = self._price_range(nmi)
th = self._th(state.step, nmi.n_steps)
# a good price is one better than the threshold
if self._is_selling(nmi):
return (price - mn) >= th * (mx - mn)
else:
return (mx - price) >= th * (mx - mn)
def _good_price_range(self, nmi):
"""エージェントにとって良い価格帯を見つける"""
is_selling = self._is_selling(nmi)
mx = nmi.issues[UNIT_PRICE].max_value
mn = nmi.issues[UNIT_PRICE].min_value
price_range = {
"min": mn
+ Gentle.better_agent._th(nmi.n_steps - 2, nmi.n_steps) * (mx - mn),
"max": mx
- Gentle.better_agent._th(nmi.n_steps - 2, nmi.n_steps) * (mx - mn),
}
return price_range
def _first_offer_price(self, name: str):
"""合意のない相手に対するofferの価格を決定"""
nmi = self.get_nmi(name)
is_selling = self._is_selling(nmi)
time = t(self.awi.current_step, self.awi.n_steps)
# 基準値の決定
up = nmi.issues[UNIT_PRICE]
std = (up.max_value + up.min_value) / 2
# 価格を決定(滑らかに譲歩)
strong_range = {"max": 0.2, "min": -0.3}
rng = strong_range["max"] - strong_range["min"]
th = [0.0, 0.3]
if time < th[0]:
strong_degree = strong_range["max"]
elif th[0] <= time <= th[1] or 0.5 < self._self_factor(nmi):
strong_degree = strong_range["max"] - rng * min(
time - th[0] / th[1] - th[0], 1
)
else:
strong_degree = strong_range["min"] - 0.1
price = std * (1 + TF_sign(is_selling) * strong_degree)
return price
def _price_range(self, nmi):
"""Limits the price by the best price received"""
mn = nmi.issues[UNIT_PRICE].min_value
mx = nmi.issues[UNIT_PRICE].max_value
is_selling = self._is_selling(nmi)
name = nmi.annotation["buyer"] if is_selling else nmi.annotation["seller"]
if self._is_selling(nmi):
partner = nmi.annotation["buyer"]
mn = min(
mx * (1 - self._range_slack),
# self.new_price_selling * (1 - self.new_price_slack),
# min([_[UNIT_PRICE] for _ in self.my_offer_list[partner]] + [float("inf")]),
# min([_.agreement["unit_price"] for _ in success_agreements] + [float("inf")]),
worst_opp_acc_price(nmi, is_selling, self.success_contracts)
* (1 + self.worst_opp_acc_price_slack),
max(
[mn]
+ [
p * (1 - slack)
for p, slack in (
(self._best_selling, self._step_price_slack),
# (self._best_acc_selling, self._acc_price_slack),
(self._best_opp_selling[partner], self._opp_price_slack),
(
self._best_opp_acc_selling[partner],
self._opp_acc_price_slack,
),
(
self._first_offer_price(name),
self.first_offer_price_slack,
),
)
]
),
)
else:
partner = nmi.annotation["seller"]
mx = max(
mn * (1 + self._range_slack),
# self.new_price_buying * (1 + self.new_price_slack),
# max([_[UNIT_PRICE] for _ in self.my_offer_list[partner]] + [0]),
# max([_.agreement["unit_price"] for _ in success_agreements] + [0]),
worst_opp_acc_price(nmi, is_selling, self.success_contracts)
* (1 - self.worst_opp_acc_price_slack),
min(
[mx]
+ [
p * (1 + slack)
for p, slack in (
(self._best_buying, self._step_price_slack),
# (self._best_acc_buying, self._acc_price_slack),
(self._best_opp_buying[partner], self._opp_price_slack),
(
self._best_opp_acc_buying[partner],
self._opp_acc_price_slack,
),
# (self.opp_next_price(partner), self._opp_price_slack),
(
self._first_offer_price(name),
self.first_offer_price_slack,
),
)
]
),
)
return mn, mx
def _self_factor(self, nmi):
"""自身の交渉の進捗を評価"""
prev_agreement = 0 # 前日合意できたか
agreement_ratio = 0 # 相手との交渉成功割合
good_agreement = 0 # 良い値段で合意できたか
w_prev = 4
w_good = 2
w_prev, w_good = param_normalization([w_prev, w_good])
# これまでの交渉成功割合
success_agreements = [
[_.agreement, _.mechanism_state["current_proposer"]]
for _ in self.success_contracts
]
if success_agreements:
simulation_steps = {lis[0]["time"] for lis in success_agreements}
prev_agreement = len(simulation_steps) / (self.awi.current_step + 1)
else:
prev_agreement = 1
# 良い値段で合意できているか
success_agreements = opponent_agreements(
nmi, self._is_selling(nmi), self.success_contracts
)
if success_agreements:
tp = self.awi.trading_prices[1]
prev_up = success_agreements[-1].agreement["unit_price"]
if self._is_selling(nmi):
good_agreement = min(max(0.5 - (prev_up - tp) / tp, 0), 1)
else:
good_agreement = max(min(0.5 + (prev_up - tp) / tp, 1), 0)
else:
good_agreement = 0.5
# デバッグ用
# print_log("params", param_normalization([w_prev, w_ratio, w_good]))
# print_log("params", [prev_agreement, agreement_ratio, good_agreement])
# print_log("self_factor", w_prev * prev_agreement + w_good * good_agreement)
# 重み付けして足す
return w_prev * prev_agreement + w_good * good_agreement
def _environment_factor(self, nmi):
"""マーケットの状況を評価"""
if self._is_selling(nmi):
n_sellers = len(self.awi.all_suppliers[1])
n_buyers = len(self.awi.my_consumers)
return min(n_buyers / n_sellers / 2, 1)
else:
n_sellers = len(self.awi.my_suppliers)
n_buyers = len(self.awi.all_consumers[1])
return min(n_sellers / n_buyers / 2, 1)
def _opp_concession_rate_change(self, name: str):
"""相手の譲歩の変化率を計算"""
nmi = self.get_nmi(name)
offers = [
_
for _ in self.opp_offer_list[shorten_name(name)]
if _[TIME] == self.awi.current_step
]
if len(offers) >= 3:
prev = offers[-2][UNIT_PRICE] - offers[-3][UNIT_PRICE]
now = offers[-1][UNIT_PRICE] - offers[-2][UNIT_PRICE]
if prev == 0:
return 0
return now / prev > self.concession_threshold
else:
return False
def _record_information(self, offers: dict, mine: bool):
"""offer や utilを保存"""
# offerを保存
if mine:
for k, v in offers.items():
o = list(v)
o.append(self.nego_info["negotiation_step"])
self.my_offer_list[shorten_name(k)].append(o)
else:
for k, v in offers.items():
o = list(v)
o.append(self.nego_info["negotiation_step"])
self.opp_offer_list[shorten_name(k)].append(o)
def _set_param(self, pattern: List[str]) -> None:
"""
各種パラメタを引数によって設定
:param pattern:
:return: None
"""
r = 0.1
if pattern[0] == "accept":
self.first_offer_price_slack = INF
if pattern[1] == "concession":
self._opp_price_slack = 0.0
self._opp_acc_price_slack = 0.2
self.worst_opp_acc_price_slack = INF
elif pattern[1] == "persist":
# self._step_price_slack = 0.0
self._opp_price_slack = 0.0
self._opp_acc_price_slack = 0.0
self.worst_opp_acc_price_slack = INF
elif pattern[0] == "offer":
self._step_price_slack = INF
if pattern[1] == "good_env":
if pattern[2] == "accept_agreements":
self.first_offer_price_slack = INF
if pattern[3] == "good":
self._opp_price_slack = 0.0
self._opp_acc_price_slack = -INF
self.worst_opp_acc_price_slack = 0.0
elif pattern[3] == "bad":
self._opp_price_slack = 0.0
self._opp_acc_price_slack = -INF
self.worst_opp_acc_price_slack = r
elif pattern[2] == "offer_agreements":
self._opp_price_slack = 0.0
self._opp_acc_price_slack = -INF
self.worst_opp_acc_price_slack = 0.0
self.first_offer_price_slack = INF
elif pattern[2] == "first_offer":
self._opp_price_slack = 0.0
self._opp_acc_price_slack = 0.0
self.worst_opp_acc_price_slack = 0.0
self.first_offer_price_slack = 0.0
elif pattern[1] == "bad_env":
if pattern[2] == "accept_agreements":
self.first_offer_price_slack = INF
if pattern[3] == "good":
self._opp_price_slack = 0.0
self._opp_acc_price_slack = -INF
self.worst_opp_acc_price_slack = 0.0
elif pattern[3] == "bad":
self._opp_price_slack = 0.0
self._opp_acc_price_slack = -INF
self.worst_opp_acc_price_slack = r
elif pattern[2] == "offer_agreements":
self._opp_price_slack = 0.0
self._opp_acc_price_slack = -INF
self.worst_opp_acc_price_slack = 0.0
self.first_offer_price_slack = INF
elif pattern[2] == "first_offer":
self._opp_price_slack = 0.0
self._opp_acc_price_slack = 0.0
self.worst_opp_acc_price_slack = 0.0
self.first_offer_price_slack = 0.0
def print_log(names, values, on=False):
if on:
if type(names) == str:
pass # print(f"{names}:{values}")
if type(names) == list:
for name, value in dict(zip(names, values)).items():
pass # print(f"{name}:{value}", end=" ")
pass # print()
from statistics import mean
from typing import List
from negmas import SAONMI
QUANTITY = 0
TIME = 1
UNIT_PRICE = 2
def param_normalization(params: list):
params_sum = sum(params)
return [_ / params_sum for _ in params]
def t(step, n_steps):
return (step + 1) / n_steps
def shorten_name(name: str):
return name.split("-")[0]
def opponent_agreements(nmi: SAONMI, is_selling: bool, success_contracts: list) -> list:
"""指定された相手との合意(contract)を返す"""
if is_selling:
opponent_name = nmi.annotation["buyer"]
success_agreements = [
_ for _ in success_contracts if _.partners[0] == opponent_name
]
else:
opponent_name = nmi.annotation["seller"]
success_agreements = [
_ for _ in success_contracts if _.partners[1] == opponent_name
]
return success_agreements
def worst_opp_acc_price(
nmi: SAONMI, is_selling: bool, success_contracts: list
) -> float:
"""
指定された相手との合意の中で,相手にとって最も良い価格を返す.
合意がない場合は,0かinfを返す.
:param nmi:
:param is_selling:
:param success_contracts:
:return worst_opp_acc_price:
"""
success_agreements = opponent_agreements(nmi, is_selling, success_contracts)
if is_selling:
price = min(
[_.agreement["unit_price"] for _ in success_agreements] + [float("inf")]
)
else:
price = max([_.agreement["unit_price"] for _ in success_agreements] + [0])
return price
def TF_sign(x: bool):
"""Trueなら1,Falseなら-1を返す"""
if x:
return 1
else:
return -1
def opponent_rank(opponent_names: List[str], is_selling: bool, success_contract: list):
"""相手を合意価格によって順位付け"""
rank = {}
if is_selling:
for name in opponent_names:
agreements = [
_.agreement["unit_price"]
for _ in success_contract
if _.partners[0] == name
]
rank[name] = mean(agreements) if agreements else 0
sorted(rank.items(), key=lambda x: x[1], reverse=True)
else:
for name in opponent_names:
agreements = [
_.agreement["unit_price"]
for _ in success_contract
if _.partners[1] == name
]
rank[name] = mean(agreements) if agreements else float("inf")
sorted(rank.items(), key=lambda x: x[1], reverse=False)
return rank
def price_comparison(is_selling: bool, x: float, y: float) -> bool:
"""
与えられた2つの価格x,yのうち,xの方がエージェントにとって良い場合,Trueを返す.
:param is_selling:
:param x:
:param y:
:return: True or False
"""
if is_selling:
return x >= y
else:
return x <= y | PypiClean |
/pyfolio-tekton-1.3.tar.gz/pyfolio-tekton-1.3/pyfolio/tears.py | from __future__ import division
import warnings
from time import time
import empyrical as ep
from IPython.display import display, Markdown
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from . import _seaborn as sns
from . import capacity
from . import perf_attrib
from . import plotting
from . import pos
from . import risk
from . import round_trips
from . import timeseries
from . import txn
from . import utils
try:
from . import bayesian
have_bayesian = True
except ImportError:
warnings.warn(
"Could not import bayesian submodule due to missing pymc3 dependency.",
ImportWarning)
have_bayesian = False
FACTOR_PARTITIONS = {
'style': ['momentum', 'size', 'value', 'reversal_short_term',
'volatility'],
'sector': ['basic_materials', 'consumer_cyclical', 'financial_services',
'real_estate', 'consumer_defensive', 'health_care',
'utilities', 'communication_services', 'energy', 'industrials',
'technology']
}
def timer(msg_body, previous_time):
current_time = time()
run_time = current_time - previous_time
message = "\nFinished " + msg_body + " (required {:.2f} seconds)."
print(message.format(run_time))
return current_time
def create_full_tear_sheet(returns,
positions=None,
transactions=None,
market_data=None,
benchmark_rets=None,
slippage=None,
live_start_date=None,
sector_mappings=None,
bayesian=False,
round_trips=False,
estimate_intraday='infer',
hide_positions=False,
cone_std=(1.0, 1.5, 2.0),
bootstrap=False,
unadjusted_returns=None,
style_factor_panel=None,
sectors=None,
caps=None,
shares_held=None,
volumes=None,
percentile=None,
turnover_denom='AGB',
set_context=True,
factor_returns=None,
factor_loadings=None,
pos_in_dollars=True,
header_rows=None,
factor_partitions=FACTOR_PARTITIONS):
"""
Generate a number of tear sheets that are useful
for analyzing a strategy's performance.
- Fetches benchmarks if needed.
- Creates tear sheets for returns, and significant events.
If possible, also creates tear sheets for position analysis,
transaction analysis, and Bayesian analysis.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- Time series with decimal returns.
- Example:
2015-07-16 -0.012143
2015-07-17 0.045350
2015-07-20 0.030957
2015-07-21 0.004902
positions : pd.DataFrame, optional
Daily net position values.
- Time series of dollar amount invested in each position and cash.
- Days where stocks are not held can be represented by 0 or NaN.
- Non-working capital is labelled 'cash'
- Example:
index 'AAPL' 'MSFT' cash
2004-01-09 13939.3800 -14012.9930 711.5585
2004-01-12 14492.6300 -14624.8700 27.1821
2004-01-13 -13853.2800 13653.6400 -43.6375
transactions : pd.DataFrame, optional
Executed trade volumes and fill prices.
- One row per trade.
- Trades on different names that occur at the
same time will have identical indicies.
- Example:
index amount price symbol
2004-01-09 12:18:01 483 324.12 'AAPL'
2004-01-09 12:18:01 122 83.10 'MSFT'
2004-01-13 14:12:23 -75 340.43 'AAPL'
market_data : pd.Panel, optional
Panel with items axis of 'price' and 'volume' DataFrames.
The major and minor axes should match those of the
the passed positions DataFrame (same dates and symbols).
slippage : int/float, optional
Basis points of slippage to apply to returns before generating
tearsheet stats and plots.
If a value is provided, slippage parameter sweep
plots will be generated from the unadjusted returns.
Transactions and positions must also be passed.
- See txn.adjust_returns_for_slippage for more details.
live_start_date : datetime, optional
The point in time when the strategy began live trading,
after its backtest period. This datetime should be normalized.
hide_positions : bool, optional
If True, will not output any symbol names.
bayesian: boolean, optional
If True, causes the generation of a Bayesian tear sheet.
round_trips: boolean, optional
If True, causes the generation of a round trip tear sheet.
sector_mappings : dict or pd.Series, optional
Security identifier to sector mapping.
Security ids as keys, sectors as values.
estimate_intraday: boolean or str, optional
Instead of using the end-of-day positions, use the point in the day
where we have the most $ invested. This will adjust positions to
better approximate and represent how an intraday strategy behaves.
By default, this is 'infer', and an attempt will be made to detect
an intraday strategy. Specifying this value will prevent detection.
cone_std : float, or tuple, optional
If float, The standard deviation to use for the cone plots.
If tuple, Tuple of standard deviation values to use for the cone plots
- The cone is a normal distribution with this standard deviation
centered around a linear regression.
bootstrap : boolean (optional)
Whether to perform bootstrap analysis for the performance
metrics. Takes a few minutes longer.
turnover_denom : str
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
factor_returns : pd.Dataframe, optional
Returns by factor, with date as index and factors as columns
factor_loadings : pd.Dataframe, optional
Factor loadings for all days in the date range, with date and
ticker as index, and factors as columns.
pos_in_dollars : boolean, optional
indicates whether positions is in dollars
header_rows : dict or OrderedDict, optional
Extra rows to display at the top of the perf stats table.
set_context : boolean, optional
If True, set default plotting style context.
- See plotting.context().
factor_partitions : dict, optional
dict specifying how factors should be separated in perf attrib
factor returns and risk exposures plots
- See create_perf_attrib_tear_sheet().
"""
if (unadjusted_returns is None) and (slippage is not None) and\
(transactions is not None):
unadjusted_returns = returns.copy()
returns = txn.adjust_returns_for_slippage(returns, positions,
transactions, slippage)
positions = utils.check_intraday(estimate_intraday, returns,
positions, transactions)
create_returns_tear_sheet(
returns,
positions=positions,
transactions=transactions,
live_start_date=live_start_date,
cone_std=cone_std,
benchmark_rets=benchmark_rets,
bootstrap=bootstrap,
turnover_denom=turnover_denom,
header_rows=header_rows,
set_context=set_context)
create_interesting_times_tear_sheet(returns,
benchmark_rets=benchmark_rets,
set_context=set_context)
if positions is not None:
create_position_tear_sheet(returns, positions,
hide_positions=hide_positions,
set_context=set_context,
sector_mappings=sector_mappings,
estimate_intraday=False)
if transactions is not None:
create_txn_tear_sheet(returns, positions, transactions,
unadjusted_returns=unadjusted_returns,
estimate_intraday=False,
set_context=set_context)
if round_trips:
create_round_trip_tear_sheet(
returns=returns,
positions=positions,
transactions=transactions,
sector_mappings=sector_mappings,
estimate_intraday=False)
if market_data is not None:
create_capacity_tear_sheet(returns, positions, transactions,
market_data,
liquidation_daily_vol_limit=0.2,
last_n_days=125,
estimate_intraday=False)
if style_factor_panel is not None:
create_risk_tear_sheet(positions, style_factor_panel, sectors,
caps, shares_held, volumes, percentile)
if factor_returns is not None and factor_loadings is not None:
create_perf_attrib_tear_sheet(returns, positions, factor_returns,
factor_loadings, transactions,
pos_in_dollars=pos_in_dollars,
factor_partitions=factor_partitions)
if bayesian:
create_bayesian_tear_sheet(returns,
live_start_date=live_start_date,
benchmark_rets=benchmark_rets,
set_context=set_context)
@plotting.customize
def create_simple_tear_sheet(returns,
positions=None,
transactions=None,
benchmark_rets=None,
slippage=None,
estimate_intraday='infer',
live_start_date=None,
turnover_denom='AGB',
header_rows=None):
"""
Simpler version of create_full_tear_sheet; generates summary performance
statistics and important plots as a single image.
- Plots: cumulative returns, rolling beta, rolling Sharpe, underwater,
exposure, top 10 holdings, total holdings, long/short holdings,
daily turnover, transaction time distribution.
- Never accept market_data input (market_data = None)
- Never accept sector_mappings input (sector_mappings = None)
- Never perform bootstrap analysis (bootstrap = False)
- Never hide posistions on top 10 holdings plot (hide_positions = False)
- Always use default cone_std (cone_std = (1.0, 1.5, 2.0))
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- Time series with decimal returns.
- Example:
2015-07-16 -0.012143
2015-07-17 0.045350
2015-07-20 0.030957
2015-07-21 0.004902
positions : pd.DataFrame, optional
Daily net position values.
- Time series of dollar amount invested in each position and cash.
- Days where stocks are not held can be represented by 0 or NaN.
- Non-working capital is labelled 'cash'
- Example:
index 'AAPL' 'MSFT' cash
2004-01-09 13939.3800 -14012.9930 711.5585
2004-01-12 14492.6300 -14624.8700 27.1821
2004-01-13 -13853.2800 13653.6400 -43.6375
transactions : pd.DataFrame, optional
Executed trade volumes and fill prices.
- One row per trade.
- Trades on different names that occur at the
same time will have identical indicies.
- Example:
index amount price symbol
2004-01-09 12:18:01 483 324.12 'AAPL'
2004-01-09 12:18:01 122 83.10 'MSFT'
2004-01-13 14:12:23 -75 340.43 'AAPL'
benchmark_rets : pd.Series, optional
Daily returns of the benchmark, noncumulative.
slippage : int/float, optional
Basis points of slippage to apply to returns before generating
tearsheet stats and plots.
If a value is provided, slippage parameter sweep
plots will be generated from the unadjusted returns.
Transactions and positions must also be passed.
- See txn.adjust_returns_for_slippage for more details.
live_start_date : datetime, optional
The point in time when the strategy began live trading,
after its backtest period. This datetime should be normalized.
turnover_denom : str, optional
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
header_rows : dict or OrderedDict, optional
Extra rows to display at the top of the perf stats table.
set_context : boolean, optional
If True, set default plotting style context.
"""
positions = utils.check_intraday(estimate_intraday, returns,
positions, transactions)
if (slippage is not None) and (transactions is not None):
returns = txn.adjust_returns_for_slippage(returns, positions,
transactions, slippage)
always_sections = 4
positions_sections = 4 if positions is not None else 0
transactions_sections = 2 if transactions is not None else 0
live_sections = 1 if live_start_date is not None else 0
benchmark_sections = 1 if benchmark_rets is not None else 0
vertical_sections = sum([
always_sections,
positions_sections,
transactions_sections,
live_sections,
benchmark_sections,
])
if live_start_date is not None:
live_start_date = ep.utils.get_utc_timestamp(live_start_date)
plotting.show_perf_stats(returns,
benchmark_rets,
positions=positions,
transactions=transactions,
turnover_denom=turnover_denom,
live_start_date=live_start_date,
header_rows=header_rows)
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_rolling_returns = plt.subplot(gs[:2, :])
i = 2
if benchmark_rets is not None:
ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
plotting.plot_rolling_returns(returns,
factor_returns=benchmark_rets,
live_start_date=live_start_date,
cone_std=(1.0, 1.5, 2.0),
ax=ax_rolling_returns)
ax_rolling_returns.set_title('Cumulative returns')
if benchmark_rets is not None:
plotting.plot_rolling_beta(returns, benchmark_rets, ax=ax_rolling_beta)
plotting.plot_rolling_sharpe(returns, ax=ax_rolling_sharpe)
plotting.plot_drawdown_underwater(returns, ax=ax_underwater)
if positions is not None:
# Plot simple positions tear sheet
ax_exposures = plt.subplot(gs[i, :])
i += 1
ax_top_positions = plt.subplot(gs[i, :], sharex=ax_exposures)
i += 1
ax_holdings = plt.subplot(gs[i, :], sharex=ax_exposures)
i += 1
ax_long_short_holdings = plt.subplot(gs[i, :])
i += 1
positions_alloc = pos.get_percent_alloc(positions)
plotting.plot_exposures(returns, positions, ax=ax_exposures)
plotting.show_and_plot_top_positions(returns,
positions_alloc,
show_and_plot=0,
hide_positions=False,
ax=ax_top_positions)
plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings)
plotting.plot_long_short_holdings(returns, positions_alloc,
ax=ax_long_short_holdings)
if transactions is not None:
# Plot simple transactions tear sheet
ax_turnover = plt.subplot(gs[i, :])
i += 1
ax_txn_timings = plt.subplot(gs[i, :])
i += 1
plotting.plot_turnover(returns,
transactions,
positions,
ax=ax_turnover)
plotting.plot_txn_time_hist(transactions, ax=ax_txn_timings)
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
@plotting.customize
def create_returns_tear_sheet(returns, positions=None,
transactions=None,
live_start_date=None,
cone_std=(1.0, 1.5, 2.0),
benchmark_rets=None,
bootstrap=False,
turnover_denom='AGB',
header_rows=None,
return_fig=False):
"""
Generate a number of plots for analyzing a strategy's returns.
- Fetches benchmarks, then creates the plots on a single figure.
- Plots: rolling returns (with cone), rolling beta, rolling sharpe,
rolling Fama-French risk factors, drawdowns, underwater plot, monthly
and annual return plots, daily similarity plots,
and return quantile box plot.
- Will also print the start and end dates of the strategy,
performance statistics, drawdown periods, and the return range.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame, optional
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame, optional
Executed trade volumes and fill prices.
- See full explanation in create_full_tear_sheet.
live_start_date : datetime, optional
The point in time when the strategy began live trading,
after its backtest period.
cone_std : float, or tuple, optional
If float, The standard deviation to use for the cone plots.
If tuple, Tuple of standard deviation values to use for the cone plots
- The cone is a normal distribution with this standard deviation
centered around a linear regression.
benchmark_rets : pd.Series, optional
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
bootstrap : boolean, optional
Whether to perform bootstrap analysis for the performance
metrics. Takes a few minutes longer.
turnover_denom : str, optional
Either AGB or portfolio_value, default AGB.
- See full explanation in txn.get_turnover.
header_rows : dict or OrderedDict, optional
Extra rows to display at the top of the perf stats table.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
"""
if benchmark_rets is not None:
returns = utils.clip_returns_to_benchmark(returns, benchmark_rets)
plotting.show_perf_stats(returns, benchmark_rets,
positions=positions,
transactions=transactions,
turnover_denom=turnover_denom,
bootstrap=bootstrap,
live_start_date=live_start_date,
header_rows=header_rows)
plotting.show_worst_drawdown_periods(returns)
vertical_sections = 11
if live_start_date is not None:
vertical_sections += 1
live_start_date = ep.utils.get_utc_timestamp(live_start_date)
if benchmark_rets is not None:
vertical_sections += 1
if bootstrap:
vertical_sections += 1
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_rolling_returns = plt.subplot(gs[:2, :])
i = 2
ax_rolling_returns_vol_match = plt.subplot(gs[i, :],
sharex=ax_rolling_returns)
i += 1
ax_rolling_returns_log = plt.subplot(gs[i, :],
sharex=ax_rolling_returns)
i += 1
ax_returns = plt.subplot(gs[i, :],
sharex=ax_rolling_returns)
i += 1
if benchmark_rets is not None:
ax_rolling_beta = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_rolling_volatility = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_rolling_sharpe = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_drawdown = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_underwater = plt.subplot(gs[i, :], sharex=ax_rolling_returns)
i += 1
ax_monthly_heatmap = plt.subplot(gs[i, 0])
ax_annual_returns = plt.subplot(gs[i, 1])
ax_monthly_dist = plt.subplot(gs[i, 2])
i += 1
ax_return_quantiles = plt.subplot(gs[i, :])
i += 1
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
live_start_date=live_start_date,
cone_std=cone_std,
ax=ax_rolling_returns)
ax_rolling_returns.set_title(
'Cumulative returns')
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
live_start_date=live_start_date,
cone_std=None,
volatility_match=(benchmark_rets is not None),
legend_loc=None,
ax=ax_rolling_returns_vol_match)
ax_rolling_returns_vol_match.set_title(
'Cumulative returns volatility matched to benchmark')
plotting.plot_rolling_returns(
returns,
factor_returns=benchmark_rets,
logy=True,
live_start_date=live_start_date,
cone_std=cone_std,
ax=ax_rolling_returns_log)
ax_rolling_returns_log.set_title(
'Cumulative returns on logarithmic scale')
plotting.plot_returns(
returns,
live_start_date=live_start_date,
ax=ax_returns,
)
ax_returns.set_title(
'Returns')
if benchmark_rets is not None:
plotting.plot_rolling_beta(
returns, benchmark_rets, ax=ax_rolling_beta)
plotting.plot_rolling_volatility(
returns, factor_returns=benchmark_rets, ax=ax_rolling_volatility)
plotting.plot_rolling_sharpe(
returns, ax=ax_rolling_sharpe)
# Drawdowns
plotting.plot_drawdown_periods(
returns, top=5, ax=ax_drawdown)
plotting.plot_drawdown_underwater(
returns=returns, ax=ax_underwater)
plotting.plot_monthly_returns_heatmap(returns, ax=ax_monthly_heatmap)
plotting.plot_annual_returns(returns, ax=ax_annual_returns)
plotting.plot_monthly_returns_dist(returns, ax=ax_monthly_dist)
plotting.plot_return_quantiles(
returns,
live_start_date=live_start_date,
ax=ax_return_quantiles)
if bootstrap and (benchmark_rets is not None):
ax_bootstrap = plt.subplot(gs[i, :])
plotting.plot_perf_stats(returns, benchmark_rets,
ax=ax_bootstrap)
elif bootstrap:
raise ValueError('bootstrap requires passing of benchmark_rets.')
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
if return_fig:
return fig
@plotting.customize
def create_position_tear_sheet(returns, positions,
show_and_plot_top_pos=2, hide_positions=False,
return_fig=False, sector_mappings=None,
transactions=None, estimate_intraday='infer'):
"""
Generate a number of plots for analyzing a
strategy's positions and holdings.
- Plots: gross leverage, exposures, top positions, and holdings.
- Will also print the top positions held.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
show_and_plot_top_pos : int, optional
By default, this is 2, and both prints and plots the
top 10 positions.
If this is 0, it will only plot; if 1, it will only print.
hide_positions : bool, optional
If True, will not output any symbol names.
Overrides show_and_plot_top_pos to 0 to suppress text output.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
sector_mappings : dict or pd.Series, optional
Security identifier to sector mapping.
Security ids as keys, sectors as values.
transactions : pd.DataFrame, optional
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
estimate_intraday: boolean or str, optional
Approximate returns for intraday strategies.
See description in create_full_tear_sheet.
"""
positions = utils.check_intraday(estimate_intraday, returns,
positions, transactions)
if hide_positions:
show_and_plot_top_pos = 0
vertical_sections = 7 if sector_mappings is not None else 6
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_exposures = plt.subplot(gs[0, :])
ax_top_positions = plt.subplot(gs[1, :], sharex=ax_exposures)
ax_max_median_pos = plt.subplot(gs[2, :], sharex=ax_exposures)
ax_holdings = plt.subplot(gs[3, :], sharex=ax_exposures)
ax_long_short_holdings = plt.subplot(gs[4, :])
ax_gross_leverage = plt.subplot(gs[5, :], sharex=ax_exposures)
positions_alloc = pos.get_percent_alloc(positions)
plotting.plot_exposures(returns, positions, ax=ax_exposures)
plotting.show_and_plot_top_positions(
returns,
positions_alloc,
show_and_plot=show_and_plot_top_pos,
hide_positions=hide_positions,
ax=ax_top_positions)
plotting.plot_max_median_position_concentration(positions,
ax=ax_max_median_pos)
plotting.plot_holdings(returns, positions_alloc, ax=ax_holdings)
plotting.plot_long_short_holdings(returns, positions_alloc,
ax=ax_long_short_holdings)
plotting.plot_gross_leverage(returns, positions,
ax=ax_gross_leverage)
if sector_mappings is not None:
sector_exposures = pos.get_sector_exposures(positions,
sector_mappings)
if len(sector_exposures.columns) > 1:
sector_alloc = pos.get_percent_alloc(sector_exposures)
sector_alloc = sector_alloc.drop('cash', axis='columns')
ax_sector_alloc = plt.subplot(gs[6, :], sharex=ax_exposures)
plotting.plot_sector_allocations(returns, sector_alloc,
ax=ax_sector_alloc)
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
if return_fig:
return fig
@plotting.customize
def create_txn_tear_sheet(returns, positions, transactions,
unadjusted_returns=None, estimate_intraday='infer',
return_fig=False):
"""
Generate a number of plots for analyzing a strategy's transactions.
Plots: turnover, daily volume, and a histogram of daily volume.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
unadjusted_returns : pd.Series, optional
Daily unadjusted returns of the strategy, noncumulative.
Will plot additional swippage sweep analysis.
- See pyfolio.plotting.plot_swippage_sleep and
pyfolio.plotting.plot_slippage_sensitivity
estimate_intraday: boolean or str, optional
Approximate returns for intraday strategies.
See description in create_full_tear_sheet.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
"""
positions = utils.check_intraday(estimate_intraday, returns,
positions, transactions)
vertical_sections = 6 if unadjusted_returns is not None else 4
fig = plt.figure(figsize=(14, vertical_sections * 6))
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
ax_turnover = plt.subplot(gs[0, :])
ax_daily_volume = plt.subplot(gs[1, :], sharex=ax_turnover)
ax_turnover_hist = plt.subplot(gs[2, :])
ax_txn_timings = plt.subplot(gs[3, :])
plotting.plot_turnover(
returns,
transactions,
positions,
ax=ax_turnover)
plotting.plot_daily_volume(returns, transactions, ax=ax_daily_volume)
try:
plotting.plot_daily_turnover_hist(transactions, positions,
ax=ax_turnover_hist)
except ValueError:
warnings.warn('Unable to generate turnover plot.', UserWarning)
plotting.plot_txn_time_hist(transactions, ax=ax_txn_timings)
if unadjusted_returns is not None:
ax_slippage_sweep = plt.subplot(gs[4, :])
plotting.plot_slippage_sweep(unadjusted_returns,
positions,
transactions,
ax=ax_slippage_sweep
)
ax_slippage_sensitivity = plt.subplot(gs[5, :])
plotting.plot_slippage_sensitivity(unadjusted_returns,
positions,
transactions,
ax=ax_slippage_sensitivity
)
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
if return_fig:
return fig
@plotting.customize
def create_round_trip_tear_sheet(returns, positions, transactions,
sector_mappings=None,
estimate_intraday='infer', return_fig=False):
"""
Generate a number of figures and plots describing the duration,
frequency, and profitability of trade "round trips."
A round trip is started when a new long or short position is
opened and is only completed when the number of shares in that
position returns to or crosses zero.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
sector_mappings : dict or pd.Series, optional
Security identifier to sector mapping.
Security ids as keys, sectors as values.
estimate_intraday: boolean or str, optional
Approximate returns for intraday strategies.
See description in create_full_tear_sheet.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
"""
positions = utils.check_intraday(estimate_intraday, returns,
positions, transactions)
transactions_closed = round_trips.add_closing_transactions(positions,
transactions)
# extract_round_trips requires BoD portfolio_value
trades = round_trips.extract_round_trips(
transactions_closed,
portfolio_value=positions.sum(axis='columns') / (1 + returns)
)
if len(trades) < 5:
warnings.warn(
"""Fewer than 5 round-trip trades made.
Skipping round trip tearsheet.""", UserWarning)
return
round_trips.print_round_trip_stats(trades)
plotting.show_profit_attribution(trades)
if sector_mappings is not None:
sector_trades = round_trips.apply_sector_mappings_to_round_trips(
trades, sector_mappings)
plotting.show_profit_attribution(sector_trades)
fig = plt.figure(figsize=(14, 3 * 6))
gs = gridspec.GridSpec(3, 2, wspace=0.5, hspace=0.5)
ax_trade_lifetimes = plt.subplot(gs[0, :])
ax_prob_profit_trade = plt.subplot(gs[1, 0])
ax_holding_time = plt.subplot(gs[1, 1])
ax_pnl_per_round_trip_dollars = plt.subplot(gs[2, 0])
ax_pnl_per_round_trip_pct = plt.subplot(gs[2, 1])
plotting.plot_round_trip_lifetimes(trades, ax=ax_trade_lifetimes)
plotting.plot_prob_profit_trade(trades, ax=ax_prob_profit_trade)
trade_holding_times = [x.days for x in trades['duration']]
sns.distplot(trade_holding_times, kde=False, ax=ax_holding_time)
ax_holding_time.set(xlabel='Holding time in days')
sns.distplot(trades.pnl, kde=False, ax=ax_pnl_per_round_trip_dollars)
ax_pnl_per_round_trip_dollars.set(xlabel='PnL per round-trip trade in $')
sns.distplot(trades.returns.dropna() * 100, kde=False,
ax=ax_pnl_per_round_trip_pct)
ax_pnl_per_round_trip_pct.set(
xlabel='Round-trip returns in %')
gs.tight_layout(fig)
if return_fig:
return fig
@plotting.customize
def create_interesting_times_tear_sheet(
returns, benchmark_rets=None, legend_loc='best', return_fig=False):
"""
Generate a number of returns plots around interesting points in time,
like the flash crash and 9/11.
Plots: returns around the dotcom bubble burst, Lehmann Brothers' failure,
9/11, US downgrade and EU debt crisis, Fukushima meltdown, US housing
bubble burst, EZB IR, Great Recession (August 2007, March and September
of 2008, Q1 & Q2 2009), flash crash, April and October 2014.
benchmark_rets must be passed, as it is meaningless to analyze performance
during interesting times without some benchmark to refer to.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
benchmark_rets : pd.Series
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
legend_loc : plt.legend_loc, optional
The legend's location.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
"""
rets_interesting = timeseries.extract_interesting_date_ranges(returns)
if not rets_interesting:
warnings.warn('Passed returns do not overlap with any'
'interesting times.', UserWarning)
return
utils.print_table(pd.DataFrame(rets_interesting)
.describe().transpose()
.loc[:, ['mean', 'min', 'max']] * 100,
name='Stress Events',
float_format='{0:.2f}%'.format)
if benchmark_rets is not None:
returns = utils.clip_returns_to_benchmark(returns, benchmark_rets)
bmark_interesting = timeseries.extract_interesting_date_ranges(
benchmark_rets)
num_plots = len(rets_interesting)
# 2 plots, 1 row; 3 plots, 2 rows; 4 plots, 2 rows; etc.
num_rows = int((num_plots + 1) / 2.0)
fig = plt.figure(figsize=(14, num_rows * 6.0))
gs = gridspec.GridSpec(num_rows, 2, wspace=0.5, hspace=0.5)
for i, (name, rets_period) in enumerate(rets_interesting.items()):
# i=0 -> 0, i=1 -> 0, i=2 -> 1 ;; i=0 -> 0, i=1 -> 1, i=2 -> 0
ax = plt.subplot(gs[int(i / 2.0), i % 2])
ep.cum_returns(rets_period).plot(
ax=ax, color='forestgreen', label='algo', alpha=0.7, lw=2)
if benchmark_rets is not None:
ep.cum_returns(bmark_interesting[name]).plot(
ax=ax, color='gray', label='benchmark', alpha=0.6)
ax.legend(['Algo',
'benchmark'],
loc=legend_loc, frameon=True, framealpha=0.5)
else:
ax.legend(['Algo'],
loc=legend_loc, frameon=True, framealpha=0.5)
ax.set_title(name)
ax.set_ylabel('Returns')
ax.set_xlabel('')
if return_fig:
return fig
@plotting.customize
def create_capacity_tear_sheet(returns, positions, transactions,
market_data,
liquidation_daily_vol_limit=0.2,
trade_daily_vol_limit=0.05,
last_n_days=utils.APPROX_BDAYS_PER_MONTH * 6,
days_to_liquidate_limit=1,
estimate_intraday='infer'):
"""
Generates a report detailing portfolio size constraints set by
least liquid tickers. Plots a "capacity sweep," a curve describing
projected sharpe ratio given the slippage penalties that are
applied at various capital bases.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
market_data : pd.Panel
Panel with items axis of 'price' and 'volume' DataFrames.
The major and minor axes should match those of the
the passed positions DataFrame (same dates and symbols).
liquidation_daily_vol_limit : float
Max proportion of a daily bar that can be consumed in the
process of liquidating a position in the
"days to liquidation" analysis.
trade_daily_vol_limit : float
Flag daily transaction totals that exceed proportion of
daily bar.
last_n_days : integer
Compute max position allocation and dollar volume for only
the last N days of the backtest
days_to_liquidate_limit : integer
Display all tickers with greater max days to liquidation.
estimate_intraday: boolean or str, optional
Approximate returns for intraday strategies.
See description in create_full_tear_sheet.
"""
positions = utils.check_intraday(estimate_intraday, returns,
positions, transactions)
print("Max days to liquidation is computed for each traded name "
"assuming a 20% limit on daily bar consumption \n"
"and trailing 5 day mean volume as the available bar volume.\n\n"
"Tickers with >1 day liquidation time at a"
" constant $1m capital base:")
max_days_by_ticker = capacity.get_max_days_to_liquidate_by_ticker(
positions, market_data,
max_bar_consumption=liquidation_daily_vol_limit,
capital_base=1e6,
mean_volume_window=5)
max_days_by_ticker.index = (
max_days_by_ticker.index.map(utils.format_asset))
print("Whole backtest:")
utils.print_table(
max_days_by_ticker[max_days_by_ticker.days_to_liquidate >
days_to_liquidate_limit])
max_days_by_ticker_lnd = capacity.get_max_days_to_liquidate_by_ticker(
positions, market_data,
max_bar_consumption=liquidation_daily_vol_limit,
capital_base=1e6,
mean_volume_window=5,
last_n_days=last_n_days)
max_days_by_ticker_lnd.index = (
max_days_by_ticker_lnd.index.map(utils.format_asset))
print("Last {} trading days:".format(last_n_days))
utils.print_table(
max_days_by_ticker_lnd[max_days_by_ticker_lnd.days_to_liquidate > 1])
llt = capacity.get_low_liquidity_transactions(transactions, market_data)
llt.index = llt.index.map(utils.format_asset)
print('Tickers with daily transactions consuming >{}% of daily bar \n'
'all backtest:'.format(trade_daily_vol_limit * 100))
utils.print_table(
llt[llt['max_pct_bar_consumed'] > trade_daily_vol_limit * 100])
llt = capacity.get_low_liquidity_transactions(
transactions, market_data, last_n_days=last_n_days)
print("Last {} trading days:".format(last_n_days))
utils.print_table(
llt[llt['max_pct_bar_consumed'] > trade_daily_vol_limit * 100])
bt_starting_capital = positions.iloc[0].sum() / (1 + returns.iloc[0])
fig, ax_capacity_sweep = plt.subplots(figsize=(14, 6))
plotting.plot_capacity_sweep(returns, transactions, market_data,
bt_starting_capital,
min_pv=100000,
max_pv=300000000,
step_size=1000000,
ax=ax_capacity_sweep)
@plotting.customize
def create_bayesian_tear_sheet(returns, benchmark_rets=None,
live_start_date=None, samples=2000,
return_fig=False, stoch_vol=False,
progressbar=True):
"""
Generate a number of Bayesian distributions and a Bayesian
cone plot of returns.
Plots: Sharpe distribution, annual volatility distribution,
annual alpha distribution, beta distribution, predicted 1 and 5
day returns distributions, and a cumulative returns cone plot.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
benchmark_rets : pd.Series, optional
Daily noncumulative returns of the benchmark.
- This is in the same style as returns.
live_start_date : datetime, optional
The point in time when the strategy began live
trading, after its backtest period.
samples : int, optional
Number of posterior samples to draw.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
stoch_vol : boolean, optional
If True, run and plot the stochastic volatility model
progressbar : boolean, optional
If True, show a progress bar
"""
if not have_bayesian:
raise NotImplementedError(
"Bayesian tear sheet requirements not found.\n"
"Run 'pip install pyfolio[bayesian]' to install "
"bayesian requirements."
)
if live_start_date is None:
raise NotImplementedError(
'Bayesian tear sheet requires setting of live_start_date'
)
live_start_date = ep.utils.get_utc_timestamp(live_start_date)
df_train = returns.loc[returns.index < live_start_date]
df_test = returns.loc[returns.index >= live_start_date]
# Run T model with missing data
print("Running T model")
previous_time = time()
# track the total run time of the Bayesian tear sheet
start_time = previous_time
trace_t, ppc_t = bayesian.run_model('t', df_train,
returns_test=df_test,
samples=samples, ppc=True,
progressbar=progressbar)
previous_time = timer("T model", previous_time)
# Compute BEST model
print("\nRunning BEST model")
trace_best = bayesian.run_model('best', df_train,
returns_test=df_test,
samples=samples,
progressbar=progressbar)
previous_time = timer("BEST model", previous_time)
# Plot results
fig = plt.figure(figsize=(14, 10 * 2))
gs = gridspec.GridSpec(9, 2, wspace=0.3, hspace=0.3)
axs = []
row = 0
# Plot Bayesian cone
ax_cone = plt.subplot(gs[row, :])
bayesian.plot_bayes_cone(df_train, df_test, ppc_t, ax=ax_cone)
previous_time = timer("plotting Bayesian cone", previous_time)
# Plot BEST results
row += 1
axs.append(plt.subplot(gs[row, 0]))
axs.append(plt.subplot(gs[row, 1]))
row += 1
axs.append(plt.subplot(gs[row, 0]))
axs.append(plt.subplot(gs[row, 1]))
row += 1
axs.append(plt.subplot(gs[row, 0]))
axs.append(plt.subplot(gs[row, 1]))
row += 1
# Effect size across two
axs.append(plt.subplot(gs[row, :]))
bayesian.plot_best(trace=trace_best, axs=axs)
previous_time = timer("plotting BEST results", previous_time)
# Compute Bayesian predictions
row += 1
ax_ret_pred_day = plt.subplot(gs[row, 0])
ax_ret_pred_week = plt.subplot(gs[row, 1])
day_pred = ppc_t[:, 0]
p5 = scipy.stats.scoreatpercentile(day_pred, 5)
sns.distplot(day_pred,
ax=ax_ret_pred_day
)
ax_ret_pred_day.axvline(p5, linestyle='--', linewidth=3.)
ax_ret_pred_day.set_xlabel('Predicted returns 1 day')
ax_ret_pred_day.set_ylabel('Frequency')
ax_ret_pred_day.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5,
verticalalignment='bottom',
horizontalalignment='right',
transform=ax_ret_pred_day.transAxes)
previous_time = timer("computing Bayesian predictions", previous_time)
# Plot Bayesian VaRs
week_pred = (
np.cumprod(ppc_t[:, :5] + 1, 1) - 1)[:, -1]
p5 = scipy.stats.scoreatpercentile(week_pred, 5)
sns.distplot(week_pred,
ax=ax_ret_pred_week
)
ax_ret_pred_week.axvline(p5, linestyle='--', linewidth=3.)
ax_ret_pred_week.set_xlabel('Predicted cum returns 5 days')
ax_ret_pred_week.set_ylabel('Frequency')
ax_ret_pred_week.text(0.4, 0.9, 'Bayesian VaR = %.2f' % p5,
verticalalignment='bottom',
horizontalalignment='right',
transform=ax_ret_pred_week.transAxes)
previous_time = timer("plotting Bayesian VaRs estimate", previous_time)
# Run alpha beta model
if benchmark_rets is not None:
print("\nRunning alpha beta model")
benchmark_rets = benchmark_rets.loc[df_train.index]
trace_alpha_beta = bayesian.run_model('alpha_beta', df_train,
bmark=benchmark_rets,
samples=samples,
progressbar=progressbar)
previous_time = timer("running alpha beta model", previous_time)
# Plot alpha and beta
row += 1
ax_alpha = plt.subplot(gs[row, 0])
ax_beta = plt.subplot(gs[row, 1])
sns.distplot((1 + trace_alpha_beta['alpha'][100:])**252 - 1,
ax=ax_alpha)
sns.distplot(trace_alpha_beta['beta'][100:], ax=ax_beta)
ax_alpha.set_xlabel('Annual Alpha')
ax_alpha.set_ylabel('Belief')
ax_beta.set_xlabel('Beta')
ax_beta.set_ylabel('Belief')
previous_time = timer("plotting alpha beta model", previous_time)
if stoch_vol:
# run stochastic volatility model
returns_cutoff = 400
print(
"\nRunning stochastic volatility model on "
"most recent {} days of returns.".format(returns_cutoff)
)
if df_train.size > returns_cutoff:
df_train_truncated = df_train[-returns_cutoff:]
_, trace_stoch_vol = bayesian.model_stoch_vol(df_train_truncated)
previous_time = timer(
"running stochastic volatility model", previous_time)
# plot latent volatility
row += 1
ax_volatility = plt.subplot(gs[row, :])
bayesian.plot_stoch_vol(
df_train_truncated, trace=trace_stoch_vol, ax=ax_volatility)
previous_time = timer(
"plotting stochastic volatility model", previous_time)
total_time = time() - start_time
print("\nTotal runtime was {:.2f} seconds.".format(total_time))
gs.tight_layout(fig)
if return_fig:
return fig
@plotting.customize
def create_risk_tear_sheet(positions,
style_factor_panel=None,
sectors=None,
caps=None,
shares_held=None,
volumes=None,
percentile=None,
returns=None,
transactions=None,
estimate_intraday='infer',
return_fig=False):
'''
Creates risk tear sheet: computes and plots style factor exposures, sector
exposures, market cap exposures and volume exposures.
Parameters
----------
positions : pd.DataFrame
Daily equity positions of algorithm, in dollars.
- DataFrame with dates as index, equities as columns
- Last column is cash held
- Example:
Equity(24 Equity(62
[AAPL]) [ABT]) cash
2017-04-03 -108062.40 4401.540 2.247757e+07
2017-04-04 -108852.00 4373.820 2.540999e+07
2017-04-05 -119968.66 4336.200 2.839812e+07
style_factor_panel : pd.Panel
Panel where each item is a DataFrame that tabulates style factor per
equity per day.
- Each item has dates as index, equities as columns
- Example item:
Equity(24 Equity(62
[AAPL]) [ABT])
2017-04-03 -0.51284 1.39173
2017-04-04 -0.73381 0.98149
2017-04-05 -0.90132 1.13981
sectors : pd.DataFrame
Daily Morningstar sector code per asset
- DataFrame with dates as index and equities as columns
- Example:
Equity(24 Equity(62
[AAPL]) [ABT])
2017-04-03 311.0 206.0
2017-04-04 311.0 206.0
2017-04-05 311.0 206.0
caps : pd.DataFrame
Daily market cap per asset
- DataFrame with dates as index and equities as columns
- Example:
Equity(24 Equity(62
[AAPL]) [ABT])
2017-04-03 1.327160e+10 6.402460e+10
2017-04-04 1.329620e+10 6.403694e+10
2017-04-05 1.297464e+10 6.397187e+10
shares_held : pd.DataFrame
Daily number of shares held by an algorithm.
- Example:
Equity(24 Equity(62
[AAPL]) [ABT])
2017-04-03 1915 -2595
2017-04-04 1968 -3272
2017-04-05 2104 -3917
volumes : pd.DataFrame
Daily volume per asset
- DataFrame with dates as index and equities as columns
- Example:
Equity(24 Equity(62
[AAPL]) [ABT])
2017-04-03 34940859.00 4665573.80
2017-04-04 35603329.10 4818463.90
2017-04-05 41846731.75 4129153.10
percentile : float
Percentile to use when computing and plotting volume exposures.
- Defaults to 10th percentile
'''
positions = utils.check_intraday(estimate_intraday, returns,
positions, transactions)
idx = positions.index & style_factor_panel.iloc[0].index & sectors.index \
& caps.index & shares_held.index & volumes.index
positions = positions.loc[idx]
vertical_sections = 0
if style_factor_panel is not None:
vertical_sections += len(style_factor_panel.items)
new_style_dict = {}
for item in style_factor_panel.items:
new_style_dict.update({item:
style_factor_panel.loc[item].loc[idx]})
style_factor_panel = pd.Panel()
style_factor_panel = style_factor_panel.from_dict(new_style_dict)
if sectors is not None:
vertical_sections += 4
sectors = sectors.loc[idx]
if caps is not None:
vertical_sections += 4
caps = caps.loc[idx]
if (shares_held is not None) & (volumes is not None) \
& (percentile is not None):
vertical_sections += 3
shares_held = shares_held.loc[idx]
volumes = volumes.loc[idx]
if percentile is None:
percentile = 0.1
fig = plt.figure(figsize=[14, vertical_sections * 6])
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.5, hspace=0.5)
if style_factor_panel is not None:
style_axes = []
style_axes.append(plt.subplot(gs[0, :]))
for i in range(1, len(style_factor_panel.items)):
style_axes.append(plt.subplot(gs[i, :], sharex=style_axes[0]))
j = 0
for name, df in style_factor_panel.iteritems():
sfe = risk.compute_style_factor_exposures(positions, df)
risk.plot_style_factor_exposures(sfe, name, style_axes[j])
j += 1
if sectors is not None:
i += 1
ax_sector_longshort = plt.subplot(gs[i:i+2, :], sharex=style_axes[0])
i += 2
ax_sector_gross = plt.subplot(gs[i, :], sharex=style_axes[0])
i += 1
ax_sector_net = plt.subplot(gs[i, :], sharex=style_axes[0])
long_exposures, short_exposures, gross_exposures, net_exposures \
= risk.compute_sector_exposures(positions, sectors)
risk.plot_sector_exposures_longshort(long_exposures, short_exposures,
ax=ax_sector_longshort)
risk.plot_sector_exposures_gross(gross_exposures, ax=ax_sector_gross)
risk.plot_sector_exposures_net(net_exposures, ax=ax_sector_net)
if caps is not None:
i += 1
ax_cap_longshort = plt.subplot(gs[i:i+2, :], sharex=style_axes[0])
i += 2
ax_cap_gross = plt.subplot(gs[i, :], sharex=style_axes[0])
i += 1
ax_cap_net = plt.subplot(gs[i, :], sharex=style_axes[0])
long_exposures, short_exposures, gross_exposures, net_exposures \
= risk.compute_cap_exposures(positions, caps)
risk.plot_cap_exposures_longshort(long_exposures, short_exposures,
ax_cap_longshort)
risk.plot_cap_exposures_gross(gross_exposures, ax_cap_gross)
risk.plot_cap_exposures_net(net_exposures, ax_cap_net)
if volumes is not None:
i += 1
ax_vol_longshort = plt.subplot(gs[i:i+2, :], sharex=style_axes[0])
i += 2
ax_vol_gross = plt.subplot(gs[i, :], sharex=style_axes[0])
longed_threshold, shorted_threshold, grossed_threshold \
= risk.compute_volume_exposures(positions, volumes, percentile)
risk.plot_volume_exposures_longshort(longed_threshold,
shorted_threshold, percentile,
ax_vol_longshort)
risk.plot_volume_exposures_gross(grossed_threshold, percentile,
ax_vol_gross)
for ax in fig.axes:
plt.setp(ax.get_xticklabels(), visible=True)
if return_fig:
return fig
@plotting.customize
def create_perf_attrib_tear_sheet(returns,
positions,
factor_returns,
factor_loadings,
transactions=None,
pos_in_dollars=True,
return_fig=False,
factor_partitions=FACTOR_PARTITIONS):
"""
Generate plots and tables for analyzing a strategy's performance.
Parameters
----------
returns : pd.Series
Returns for each day in the date range.
positions: pd.DataFrame
Daily holdings (in dollars or percentages), indexed by date.
Will be converted to percentages if positions are in dollars.
Short positions show up as cash in the 'cash' column.
factor_returns : pd.DataFrame
Returns by factor, with date as index and factors as columns
factor_loadings : pd.DataFrame
Factor loadings for all days in the date range, with date
and ticker as index, and factors as columns.
transactions : pd.DataFrame, optional
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
- Default is None.
pos_in_dollars : boolean, optional
Flag indicating whether `positions` are in dollars or percentages
If True, positions are in dollars.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
factor_partitions : dict
dict specifying how factors should be separated in factor returns
and risk exposures plots
- Example:
{'style': ['momentum', 'size', 'value', ...],
'sector': ['technology', 'materials', ... ]}
"""
portfolio_exposures, perf_attrib_data = perf_attrib.perf_attrib(
returns, positions, factor_returns, factor_loadings, transactions,
pos_in_dollars=pos_in_dollars
)
display(Markdown("## Performance Relative to Common Risk Factors"))
# aggregate perf attrib stats and show summary table
perf_attrib.show_perf_attrib_stats(returns, positions, factor_returns,
factor_loadings, transactions,
pos_in_dollars)
# one section for the returns plot, and for each factor grouping
# one section for factor returns, and one for risk exposures
vertical_sections = 1 + 2 * max(len(factor_partitions), 1)
current_section = 0
fig = plt.figure(figsize=[14, vertical_sections * 6])
gs = gridspec.GridSpec(vertical_sections, 1,
wspace=0.5, hspace=0.5)
perf_attrib.plot_returns(perf_attrib_data,
ax=plt.subplot(gs[current_section]))
current_section += 1
if factor_partitions is not None:
for factor_type, partitions in factor_partitions.iteritems():
columns_to_select = perf_attrib_data.columns.intersection(
partitions
)
perf_attrib.plot_factor_contribution_to_perf(
perf_attrib_data[columns_to_select],
ax=plt.subplot(gs[current_section]),
title=(
'Cumulative common {} returns attribution'
).format(factor_type)
)
current_section += 1
for factor_type, partitions in factor_partitions.iteritems():
perf_attrib.plot_risk_exposures(
portfolio_exposures[portfolio_exposures.columns
.intersection(partitions)],
ax=plt.subplot(gs[current_section]),
title='Daily {} factor exposures'.format(factor_type)
)
current_section += 1
else:
perf_attrib.plot_factor_contribution_to_perf(
perf_attrib_data,
ax=plt.subplot(gs[current_section])
)
current_section += 1
perf_attrib.plot_risk_exposures(
portfolio_exposures,
ax=plt.subplot(gs[current_section])
)
gs.tight_layout(fig)
if return_fig:
return fig | PypiClean |
/nnisgf-0.4-py3-none-manylinux1_x86_64.whl/nnisgf-0.4.data/data/nni/node_modules/moment/src/lib/format/format.js | import zeroFill from '../utils/zero-fill';
import isFunction from '../utils/is-function';
export var formattingTokens = /(\[[^\[]*\])|(\\)?([Hh]mm(ss)?|Mo|MM?M?M?|Do|DDDo|DD?D?D?|ddd?d?|do?|w[o|w]?|W[o|W]?|Qo?|YYYYYY|YYYYY|YYYY|YY|gg(ggg?)?|GG(GGG?)?|e|E|a|A|hh?|HH?|kk?|mm?|ss?|S{1,9}|x|X|zz?|ZZ?|.)/g;
var localFormattingTokens = /(\[[^\[]*\])|(\\)?(LTS|LT|LL?L?L?|l{1,4})/g;
var formatFunctions = {};
export var formatTokenFunctions = {};
// token: 'M'
// padded: ['MM', 2]
// ordinal: 'Mo'
// callback: function () { this.month() + 1 }
export function addFormatToken (token, padded, ordinal, callback) {
var func = callback;
if (typeof callback === 'string') {
func = function () {
return this[callback]();
};
}
if (token) {
formatTokenFunctions[token] = func;
}
if (padded) {
formatTokenFunctions[padded[0]] = function () {
return zeroFill(func.apply(this, arguments), padded[1], padded[2]);
};
}
if (ordinal) {
formatTokenFunctions[ordinal] = function () {
return this.localeData().ordinal(func.apply(this, arguments), token);
};
}
}
function removeFormattingTokens(input) {
if (input.match(/\[[\s\S]/)) {
return input.replace(/^\[|\]$/g, '');
}
return input.replace(/\\/g, '');
}
function makeFormatFunction(format) {
var array = format.match(formattingTokens), i, length;
for (i = 0, length = array.length; i < length; i++) {
if (formatTokenFunctions[array[i]]) {
array[i] = formatTokenFunctions[array[i]];
} else {
array[i] = removeFormattingTokens(array[i]);
}
}
return function (mom) {
var output = '', i;
for (i = 0; i < length; i++) {
output += isFunction(array[i]) ? array[i].call(mom, format) : array[i];
}
return output;
};
}
// format date using native date object
export function formatMoment(m, format) {
if (!m.isValid()) {
return m.localeData().invalidDate();
}
format = expandFormat(format, m.localeData());
formatFunctions[format] = formatFunctions[format] || makeFormatFunction(format);
return formatFunctions[format](m);
}
export function expandFormat(format, locale) {
var i = 5;
function replaceLongDateFormatTokens(input) {
return locale.longDateFormat(input) || input;
}
localFormattingTokens.lastIndex = 0;
while (i >= 0 && localFormattingTokens.test(format)) {
format = format.replace(localFormattingTokens, replaceLongDateFormatTokens);
localFormattingTokens.lastIndex = 0;
i -= 1;
}
return format;
} | PypiClean |
/python-sources-3.10.5.tar.gz/python-sources-3.10.5/Python-3.10.5/Doc/library/urllib.parse.rst | :mod:`urllib.parse` --- Parse URLs into components
==================================================
.. module:: urllib.parse
:synopsis: Parse URLs into or assemble them from components.
**Source code:** :source:`Lib/urllib/parse.py`
.. index::
single: WWW
single: World Wide Web
single: URL
pair: URL; parsing
pair: relative; URL
--------------
This module defines a standard interface to break Uniform Resource Locator (URL)
strings up in components (addressing scheme, network location, path etc.), to
combine the components back into a URL string, and to convert a "relative URL"
to an absolute URL given a "base URL."
The module has been designed to match the internet RFC on Relative Uniform
Resource Locators. It supports the following URL schemes: ``file``, ``ftp``,
``gopher``, ``hdl``, ``http``, ``https``, ``imap``, ``mailto``, ``mms``,
``news``, ``nntp``, ``prospero``, ``rsync``, ``rtsp``, ``rtspu``, ``sftp``,
``shttp``, ``sip``, ``sips``, ``snews``, ``svn``, ``svn+ssh``, ``telnet``,
``wais``, ``ws``, ``wss``.
The :mod:`urllib.parse` module defines functions that fall into two broad
categories: URL parsing and URL quoting. These are covered in detail in
the following sections.
URL Parsing
-----------
The URL parsing functions focus on splitting a URL string into its components,
or on combining URL components into a URL string.
.. function:: urlparse(urlstring, scheme='', allow_fragments=True)
Parse a URL into six components, returning a 6-item :term:`named tuple`. This
corresponds to the general structure of a URL:
``scheme://netloc/path;parameters?query#fragment``.
Each tuple item is a string, possibly empty. The components are not broken up
into smaller parts (for example, the network location is a single string), and %
escapes are not expanded. The delimiters as shown above are not part of the
result, except for a leading slash in the *path* component, which is retained if
present. For example:
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from urllib.parse import urlparse
>>> urlparse("scheme://netloc/path;parameters?query#fragment")
ParseResult(scheme='scheme', netloc='netloc', path='/path;parameters', params='',
query='query', fragment='fragment')
>>> o = urlparse("http://docs.python.org:80/3/library/urllib.parse.html?"
... "highlight=params#url-parsing")
>>> o
ParseResult(scheme='http', netloc='docs.python.org:80',
path='/3/library/urllib.parse.html', params='',
query='highlight=params', fragment='url-parsing')
>>> o.scheme
'http'
>>> o.netloc
'docs.python.org:80'
>>> o.hostname
'docs.python.org'
>>> o.port
80
>>> o._replace(fragment="").geturl()
'http://docs.python.org:80/3/library/urllib.parse.html?highlight=params'
Following the syntax specifications in :rfc:`1808`, urlparse recognizes
a netloc only if it is properly introduced by '//'. Otherwise the
input is presumed to be a relative URL and thus to start with
a path component.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from urllib.parse import urlparse
>>> urlparse('//www.cwi.nl:80/%7Eguido/Python.html')
ParseResult(scheme='', netloc='www.cwi.nl:80', path='/%7Eguido/Python.html',
params='', query='', fragment='')
>>> urlparse('www.cwi.nl/%7Eguido/Python.html')
ParseResult(scheme='', netloc='', path='www.cwi.nl/%7Eguido/Python.html',
params='', query='', fragment='')
>>> urlparse('help/Python.html')
ParseResult(scheme='', netloc='', path='help/Python.html', params='',
query='', fragment='')
The *scheme* argument gives the default addressing scheme, to be
used only if the URL does not specify one. It should be the same type
(text or bytes) as *urlstring*, except that the default value ``''`` is
always allowed, and is automatically converted to ``b''`` if appropriate.
If the *allow_fragments* argument is false, fragment identifiers are not
recognized. Instead, they are parsed as part of the path, parameters
or query component, and :attr:`fragment` is set to the empty string in
the return value.
The return value is a :term:`named tuple`, which means that its items can
be accessed by index or as named attributes, which are:
+------------------+-------+-------------------------+------------------------+
| Attribute | Index | Value | Value if not present |
+==================+=======+=========================+========================+
| :attr:`scheme` | 0 | URL scheme specifier | *scheme* parameter |
+------------------+-------+-------------------------+------------------------+
| :attr:`netloc` | 1 | Network location part | empty string |
+------------------+-------+-------------------------+------------------------+
| :attr:`path` | 2 | Hierarchical path | empty string |
+------------------+-------+-------------------------+------------------------+
| :attr:`params` | 3 | No longer used | always an empty string |
+------------------+-------+-------------------------+------------------------+
| :attr:`query` | 4 | Query component | empty string |
+------------------+-------+-------------------------+------------------------+
| :attr:`fragment` | 5 | Fragment identifier | empty string |
+------------------+-------+-------------------------+------------------------+
| :attr:`username` | | User name | :const:`None` |
+------------------+-------+-------------------------+------------------------+
| :attr:`password` | | Password | :const:`None` |
+------------------+-------+-------------------------+------------------------+
| :attr:`hostname` | | Host name (lower case) | :const:`None` |
+------------------+-------+-------------------------+------------------------+
| :attr:`port` | | Port number as integer, | :const:`None` |
| | | if present | |
+------------------+-------+-------------------------+------------------------+
Reading the :attr:`port` attribute will raise a :exc:`ValueError` if
an invalid port is specified in the URL. See section
:ref:`urlparse-result-object` for more information on the result object.
Unmatched square brackets in the :attr:`netloc` attribute will raise a
:exc:`ValueError`.
Characters in the :attr:`netloc` attribute that decompose under NFKC
normalization (as used by the IDNA encoding) into any of ``/``, ``?``,
``#``, ``@``, or ``:`` will raise a :exc:`ValueError`. If the URL is
decomposed before parsing, no error will be raised.
As is the case with all named tuples, the subclass has a few additional methods
and attributes that are particularly useful. One such method is :meth:`_replace`.
The :meth:`_replace` method will return a new ParseResult object replacing specified
fields with new values.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from urllib.parse import urlparse
>>> u = urlparse('//www.cwi.nl:80/%7Eguido/Python.html')
>>> u
ParseResult(scheme='', netloc='www.cwi.nl:80', path='/%7Eguido/Python.html',
params='', query='', fragment='')
>>> u._replace(scheme='http')
ParseResult(scheme='http', netloc='www.cwi.nl:80', path='/%7Eguido/Python.html',
params='', query='', fragment='')
.. versionchanged:: 3.2
Added IPv6 URL parsing capabilities.
.. versionchanged:: 3.3
The fragment is now parsed for all URL schemes (unless *allow_fragment* is
false), in accordance with :rfc:`3986`. Previously, an allowlist of
schemes that support fragments existed.
.. versionchanged:: 3.6
Out-of-range port numbers now raise :exc:`ValueError`, instead of
returning :const:`None`.
.. versionchanged:: 3.8
Characters that affect netloc parsing under NFKC normalization will
now raise :exc:`ValueError`.
.. function:: parse_qs(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None, separator='&')
Parse a query string given as a string argument (data of type
:mimetype:`application/x-www-form-urlencoded`). Data are returned as a
dictionary. The dictionary keys are the unique query variable names and the
values are lists of values for each name.
The optional argument *keep_blank_values* is a flag indicating whether blank
values in percent-encoded queries should be treated as blank strings. A true value
indicates that blanks should be retained as blank strings. The default false
value indicates that blank values are to be ignored and treated as if they were
not included.
The optional argument *strict_parsing* is a flag indicating what to do with
parsing errors. If false (the default), errors are silently ignored. If true,
errors raise a :exc:`ValueError` exception.
The optional *encoding* and *errors* parameters specify how to decode
percent-encoded sequences into Unicode characters, as accepted by the
:meth:`bytes.decode` method.
The optional argument *max_num_fields* is the maximum number of fields to
read. If set, then throws a :exc:`ValueError` if there are more than
*max_num_fields* fields read.
The optional argument *separator* is the symbol to use for separating the
query arguments. It defaults to ``&``.
Use the :func:`urllib.parse.urlencode` function (with the ``doseq``
parameter set to ``True``) to convert such dictionaries into query
strings.
.. versionchanged:: 3.2
Add *encoding* and *errors* parameters.
.. versionchanged:: 3.8
Added *max_num_fields* parameter.
.. versionchanged:: 3.10
Added *separator* parameter with the default value of ``&``. Python
versions earlier than Python 3.10 allowed using both ``;`` and ``&`` as
query parameter separator. This has been changed to allow only a single
separator key, with ``&`` as the default separator.
.. function:: parse_qsl(qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None, separator='&')
Parse a query string given as a string argument (data of type
:mimetype:`application/x-www-form-urlencoded`). Data are returned as a list of
name, value pairs.
The optional argument *keep_blank_values* is a flag indicating whether blank
values in percent-encoded queries should be treated as blank strings. A true value
indicates that blanks should be retained as blank strings. The default false
value indicates that blank values are to be ignored and treated as if they were
not included.
The optional argument *strict_parsing* is a flag indicating what to do with
parsing errors. If false (the default), errors are silently ignored. If true,
errors raise a :exc:`ValueError` exception.
The optional *encoding* and *errors* parameters specify how to decode
percent-encoded sequences into Unicode characters, as accepted by the
:meth:`bytes.decode` method.
The optional argument *max_num_fields* is the maximum number of fields to
read. If set, then throws a :exc:`ValueError` if there are more than
*max_num_fields* fields read.
The optional argument *separator* is the symbol to use for separating the
query arguments. It defaults to ``&``.
Use the :func:`urllib.parse.urlencode` function to convert such lists of pairs into
query strings.
.. versionchanged:: 3.2
Add *encoding* and *errors* parameters.
.. versionchanged:: 3.8
Added *max_num_fields* parameter.
.. versionchanged:: 3.10
Added *separator* parameter with the default value of ``&``. Python
versions earlier than Python 3.10 allowed using both ``;`` and ``&`` as
query parameter separator. This has been changed to allow only a single
separator key, with ``&`` as the default separator.
.. function:: urlunparse(parts)
Construct a URL from a tuple as returned by ``urlparse()``. The *parts*
argument can be any six-item iterable. This may result in a slightly
different, but equivalent URL, if the URL that was parsed originally had
unnecessary delimiters (for example, a ``?`` with an empty query; the RFC
states that these are equivalent).
.. function:: urlsplit(urlstring, scheme='', allow_fragments=True)
This is similar to :func:`urlparse`, but does not split the params from the URL.
This should generally be used instead of :func:`urlparse` if the more recent URL
syntax allowing parameters to be applied to each segment of the *path* portion
of the URL (see :rfc:`2396`) is wanted. A separate function is needed to
separate the path segments and parameters. This function returns a 5-item
:term:`named tuple`::
(addressing scheme, network location, path, query, fragment identifier).
The return value is a :term:`named tuple`, its items can be accessed by index
or as named attributes:
+------------------+-------+-------------------------+----------------------+
| Attribute | Index | Value | Value if not present |
+==================+=======+=========================+======================+
| :attr:`scheme` | 0 | URL scheme specifier | *scheme* parameter |
+------------------+-------+-------------------------+----------------------+
| :attr:`netloc` | 1 | Network location part | empty string |
+------------------+-------+-------------------------+----------------------+
| :attr:`path` | 2 | Hierarchical path | empty string |
+------------------+-------+-------------------------+----------------------+
| :attr:`query` | 3 | Query component | empty string |
+------------------+-------+-------------------------+----------------------+
| :attr:`fragment` | 4 | Fragment identifier | empty string |
+------------------+-------+-------------------------+----------------------+
| :attr:`username` | | User name | :const:`None` |
+------------------+-------+-------------------------+----------------------+
| :attr:`password` | | Password | :const:`None` |
+------------------+-------+-------------------------+----------------------+
| :attr:`hostname` | | Host name (lower case) | :const:`None` |
+------------------+-------+-------------------------+----------------------+
| :attr:`port` | | Port number as integer, | :const:`None` |
| | | if present | |
+------------------+-------+-------------------------+----------------------+
Reading the :attr:`port` attribute will raise a :exc:`ValueError` if
an invalid port is specified in the URL. See section
:ref:`urlparse-result-object` for more information on the result object.
Unmatched square brackets in the :attr:`netloc` attribute will raise a
:exc:`ValueError`.
Characters in the :attr:`netloc` attribute that decompose under NFKC
normalization (as used by the IDNA encoding) into any of ``/``, ``?``,
``#``, ``@``, or ``:`` will raise a :exc:`ValueError`. If the URL is
decomposed before parsing, no error will be raised.
Following the `WHATWG spec`_ that updates RFC 3986, ASCII newline
``\n``, ``\r`` and tab ``\t`` characters are stripped from the URL.
.. versionchanged:: 3.6
Out-of-range port numbers now raise :exc:`ValueError`, instead of
returning :const:`None`.
.. versionchanged:: 3.8
Characters that affect netloc parsing under NFKC normalization will
now raise :exc:`ValueError`.
.. versionchanged:: 3.10
ASCII newline and tab characters are stripped from the URL.
.. _WHATWG spec: https://url.spec.whatwg.org/#concept-basic-url-parser
.. function:: urlunsplit(parts)
Combine the elements of a tuple as returned by :func:`urlsplit` into a
complete URL as a string. The *parts* argument can be any five-item
iterable. This may result in a slightly different, but equivalent URL, if the
URL that was parsed originally had unnecessary delimiters (for example, a ?
with an empty query; the RFC states that these are equivalent).
.. function:: urljoin(base, url, allow_fragments=True)
Construct a full ("absolute") URL by combining a "base URL" (*base*) with
another URL (*url*). Informally, this uses components of the base URL, in
particular the addressing scheme, the network location and (part of) the
path, to provide missing components in the relative URL. For example:
>>> from urllib.parse import urljoin
>>> urljoin('http://www.cwi.nl/%7Eguido/Python.html', 'FAQ.html')
'http://www.cwi.nl/%7Eguido/FAQ.html'
The *allow_fragments* argument has the same meaning and default as for
:func:`urlparse`.
.. note::
If *url* is an absolute URL (that is, it starts with ``//`` or ``scheme://``),
the *url*'s hostname and/or scheme will be present in the result. For example:
.. doctest::
>>> urljoin('http://www.cwi.nl/%7Eguido/Python.html',
... '//www.python.org/%7Eguido')
'http://www.python.org/%7Eguido'
If you do not want that behavior, preprocess the *url* with :func:`urlsplit` and
:func:`urlunsplit`, removing possible *scheme* and *netloc* parts.
.. versionchanged:: 3.5
Behavior updated to match the semantics defined in :rfc:`3986`.
.. function:: urldefrag(url)
If *url* contains a fragment identifier, return a modified version of *url*
with no fragment identifier, and the fragment identifier as a separate
string. If there is no fragment identifier in *url*, return *url* unmodified
and an empty string.
The return value is a :term:`named tuple`, its items can be accessed by index
or as named attributes:
+------------------+-------+-------------------------+----------------------+
| Attribute | Index | Value | Value if not present |
+==================+=======+=========================+======================+
| :attr:`url` | 0 | URL with no fragment | empty string |
+------------------+-------+-------------------------+----------------------+
| :attr:`fragment` | 1 | Fragment identifier | empty string |
+------------------+-------+-------------------------+----------------------+
See section :ref:`urlparse-result-object` for more information on the result
object.
.. versionchanged:: 3.2
Result is a structured object rather than a simple 2-tuple.
.. function:: unwrap(url)
Extract the url from a wrapped URL (that is, a string formatted as
``<URL:scheme://host/path>``, ``<scheme://host/path>``, ``URL:scheme://host/path``
or ``scheme://host/path``). If *url* is not a wrapped URL, it is returned
without changes.
.. _parsing-ascii-encoded-bytes:
Parsing ASCII Encoded Bytes
---------------------------
The URL parsing functions were originally designed to operate on character
strings only. In practice, it is useful to be able to manipulate properly
quoted and encoded URLs as sequences of ASCII bytes. Accordingly, the
URL parsing functions in this module all operate on :class:`bytes` and
:class:`bytearray` objects in addition to :class:`str` objects.
If :class:`str` data is passed in, the result will also contain only
:class:`str` data. If :class:`bytes` or :class:`bytearray` data is
passed in, the result will contain only :class:`bytes` data.
Attempting to mix :class:`str` data with :class:`bytes` or
:class:`bytearray` in a single function call will result in a
:exc:`TypeError` being raised, while attempting to pass in non-ASCII
byte values will trigger :exc:`UnicodeDecodeError`.
To support easier conversion of result objects between :class:`str` and
:class:`bytes`, all return values from URL parsing functions provide
either an :meth:`encode` method (when the result contains :class:`str`
data) or a :meth:`decode` method (when the result contains :class:`bytes`
data). The signatures of these methods match those of the corresponding
:class:`str` and :class:`bytes` methods (except that the default encoding
is ``'ascii'`` rather than ``'utf-8'``). Each produces a value of a
corresponding type that contains either :class:`bytes` data (for
:meth:`encode` methods) or :class:`str` data (for
:meth:`decode` methods).
Applications that need to operate on potentially improperly quoted URLs
that may contain non-ASCII data will need to do their own decoding from
bytes to characters before invoking the URL parsing methods.
The behaviour described in this section applies only to the URL parsing
functions. The URL quoting functions use their own rules when producing
or consuming byte sequences as detailed in the documentation of the
individual URL quoting functions.
.. versionchanged:: 3.2
URL parsing functions now accept ASCII encoded byte sequences
.. _urlparse-result-object:
Structured Parse Results
------------------------
The result objects from the :func:`urlparse`, :func:`urlsplit` and
:func:`urldefrag` functions are subclasses of the :class:`tuple` type.
These subclasses add the attributes listed in the documentation for
those functions, the encoding and decoding support described in the
previous section, as well as an additional method:
.. method:: urllib.parse.SplitResult.geturl()
Return the re-combined version of the original URL as a string. This may
differ from the original URL in that the scheme may be normalized to lower
case and empty components may be dropped. Specifically, empty parameters,
queries, and fragment identifiers will be removed.
For :func:`urldefrag` results, only empty fragment identifiers will be removed.
For :func:`urlsplit` and :func:`urlparse` results, all noted changes will be
made to the URL returned by this method.
The result of this method remains unchanged if passed back through the original
parsing function:
>>> from urllib.parse import urlsplit
>>> url = 'HTTP://www.Python.org/doc/#'
>>> r1 = urlsplit(url)
>>> r1.geturl()
'http://www.Python.org/doc/'
>>> r2 = urlsplit(r1.geturl())
>>> r2.geturl()
'http://www.Python.org/doc/'
The following classes provide the implementations of the structured parse
results when operating on :class:`str` objects:
.. class:: DefragResult(url, fragment)
Concrete class for :func:`urldefrag` results containing :class:`str`
data. The :meth:`encode` method returns a :class:`DefragResultBytes`
instance.
.. versionadded:: 3.2
.. class:: ParseResult(scheme, netloc, path, params, query, fragment)
Concrete class for :func:`urlparse` results containing :class:`str`
data. The :meth:`encode` method returns a :class:`ParseResultBytes`
instance.
.. class:: SplitResult(scheme, netloc, path, query, fragment)
Concrete class for :func:`urlsplit` results containing :class:`str`
data. The :meth:`encode` method returns a :class:`SplitResultBytes`
instance.
The following classes provide the implementations of the parse results when
operating on :class:`bytes` or :class:`bytearray` objects:
.. class:: DefragResultBytes(url, fragment)
Concrete class for :func:`urldefrag` results containing :class:`bytes`
data. The :meth:`decode` method returns a :class:`DefragResult`
instance.
.. versionadded:: 3.2
.. class:: ParseResultBytes(scheme, netloc, path, params, query, fragment)
Concrete class for :func:`urlparse` results containing :class:`bytes`
data. The :meth:`decode` method returns a :class:`ParseResult`
instance.
.. versionadded:: 3.2
.. class:: SplitResultBytes(scheme, netloc, path, query, fragment)
Concrete class for :func:`urlsplit` results containing :class:`bytes`
data. The :meth:`decode` method returns a :class:`SplitResult`
instance.
.. versionadded:: 3.2
URL Quoting
-----------
The URL quoting functions focus on taking program data and making it safe
for use as URL components by quoting special characters and appropriately
encoding non-ASCII text. They also support reversing these operations to
recreate the original data from the contents of a URL component if that
task isn't already covered by the URL parsing functions above.
.. function:: quote(string, safe='/', encoding=None, errors=None)
Replace special characters in *string* using the ``%xx`` escape. Letters,
digits, and the characters ``'_.-~'`` are never quoted. By default, this
function is intended for quoting the path section of a URL. The optional
*safe* parameter specifies additional ASCII characters that should not be
quoted --- its default value is ``'/'``.
*string* may be either a :class:`str` or a :class:`bytes` object.
.. versionchanged:: 3.7
Moved from :rfc:`2396` to :rfc:`3986` for quoting URL strings. "~" is now
included in the set of unreserved characters.
The optional *encoding* and *errors* parameters specify how to deal with
non-ASCII characters, as accepted by the :meth:`str.encode` method.
*encoding* defaults to ``'utf-8'``.
*errors* defaults to ``'strict'``, meaning unsupported characters raise a
:class:`UnicodeEncodeError`.
*encoding* and *errors* must not be supplied if *string* is a
:class:`bytes`, or a :class:`TypeError` is raised.
Note that ``quote(string, safe, encoding, errors)`` is equivalent to
``quote_from_bytes(string.encode(encoding, errors), safe)``.
Example: ``quote('/El Niño/')`` yields ``'/El%20Ni%C3%B1o/'``.
.. function:: quote_plus(string, safe='', encoding=None, errors=None)
Like :func:`quote`, but also replace spaces with plus signs, as required for
quoting HTML form values when building up a query string to go into a URL.
Plus signs in the original string are escaped unless they are included in
*safe*. It also does not have *safe* default to ``'/'``.
Example: ``quote_plus('/El Niño/')`` yields ``'%2FEl+Ni%C3%B1o%2F'``.
.. function:: quote_from_bytes(bytes, safe='/')
Like :func:`quote`, but accepts a :class:`bytes` object rather than a
:class:`str`, and does not perform string-to-bytes encoding.
Example: ``quote_from_bytes(b'a&\xef')`` yields
``'a%26%EF'``.
.. function:: unquote(string, encoding='utf-8', errors='replace')
Replace ``%xx`` escapes with their single-character equivalent.
The optional *encoding* and *errors* parameters specify how to decode
percent-encoded sequences into Unicode characters, as accepted by the
:meth:`bytes.decode` method.
*string* may be either a :class:`str` or a :class:`bytes` object.
*encoding* defaults to ``'utf-8'``.
*errors* defaults to ``'replace'``, meaning invalid sequences are replaced
by a placeholder character.
Example: ``unquote('/El%20Ni%C3%B1o/')`` yields ``'/El Niño/'``.
.. versionchanged:: 3.9
*string* parameter supports bytes and str objects (previously only str).
.. function:: unquote_plus(string, encoding='utf-8', errors='replace')
Like :func:`unquote`, but also replace plus signs with spaces, as required
for unquoting HTML form values.
*string* must be a :class:`str`.
Example: ``unquote_plus('/El+Ni%C3%B1o/')`` yields ``'/El Niño/'``.
.. function:: unquote_to_bytes(string)
Replace ``%xx`` escapes with their single-octet equivalent, and return a
:class:`bytes` object.
*string* may be either a :class:`str` or a :class:`bytes` object.
If it is a :class:`str`, unescaped non-ASCII characters in *string*
are encoded into UTF-8 bytes.
Example: ``unquote_to_bytes('a%26%EF')`` yields ``b'a&\xef'``.
.. function:: urlencode(query, doseq=False, safe='', encoding=None, \
errors=None, quote_via=quote_plus)
Convert a mapping object or a sequence of two-element tuples, which may
contain :class:`str` or :class:`bytes` objects, to a percent-encoded ASCII
text string. If the resultant string is to be used as a *data* for POST
operation with the :func:`~urllib.request.urlopen` function, then
it should be encoded to bytes, otherwise it would result in a
:exc:`TypeError`.
The resulting string is a series of ``key=value`` pairs separated by ``'&'``
characters, where both *key* and *value* are quoted using the *quote_via*
function. By default, :func:`quote_plus` is used to quote the values, which
means spaces are quoted as a ``'+'`` character and '/' characters are
encoded as ``%2F``, which follows the standard for GET requests
(``application/x-www-form-urlencoded``). An alternate function that can be
passed as *quote_via* is :func:`quote`, which will encode spaces as ``%20``
and not encode '/' characters. For maximum control of what is quoted, use
``quote`` and specify a value for *safe*.
When a sequence of two-element tuples is used as the *query*
argument, the first element of each tuple is a key and the second is a
value. The value element in itself can be a sequence and in that case, if
the optional parameter *doseq* evaluates to ``True``, individual
``key=value`` pairs separated by ``'&'`` are generated for each element of
the value sequence for the key. The order of parameters in the encoded
string will match the order of parameter tuples in the sequence.
The *safe*, *encoding*, and *errors* parameters are passed down to
*quote_via* (the *encoding* and *errors* parameters are only passed
when a query element is a :class:`str`).
To reverse this encoding process, :func:`parse_qs` and :func:`parse_qsl` are
provided in this module to parse query strings into Python data structures.
Refer to :ref:`urllib examples <urllib-examples>` to find out how the
:func:`urllib.parse.urlencode` method can be used for generating the query
string of a URL or data for a POST request.
.. versionchanged:: 3.2
*query* supports bytes and string objects.
.. versionadded:: 3.5
*quote_via* parameter.
.. seealso::
`WHATWG`_ - URL Living standard
Working Group for the URL Standard that defines URLs, domains, IP addresses, the
application/x-www-form-urlencoded format, and their API.
:rfc:`3986` - Uniform Resource Identifiers
This is the current standard (STD66). Any changes to urllib.parse module
should conform to this. Certain deviations could be observed, which are
mostly for backward compatibility purposes and for certain de-facto
parsing requirements as commonly observed in major browsers.
:rfc:`2732` - Format for Literal IPv6 Addresses in URL's.
This specifies the parsing requirements of IPv6 URLs.
:rfc:`2396` - Uniform Resource Identifiers (URI): Generic Syntax
Document describing the generic syntactic requirements for both Uniform Resource
Names (URNs) and Uniform Resource Locators (URLs).
:rfc:`2368` - The mailto URL scheme.
Parsing requirements for mailto URL schemes.
:rfc:`1808` - Relative Uniform Resource Locators
This Request For Comments includes the rules for joining an absolute and a
relative URL, including a fair number of "Abnormal Examples" which govern the
treatment of border cases.
:rfc:`1738` - Uniform Resource Locators (URL)
This specifies the formal syntax and semantics of absolute URLs.
.. _WHATWG: https://url.spec.whatwg.org/
| PypiClean |
/geo_espresso-0.3.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/espresso/_magnetotelluric_1D/examples/field_MT.ipynb | # Inversion of field MT data from the Coompana Province, South Australia
## MT site KN_1
### Data source: https://ecat.ga.gov.au/geonetwork/srv/eng/catalog.search#/metadata/112942
### Results report: https://ecat.ga.gov.au/geonetwork/srv/eng/catalog.search#/metadata/112981 (Figure 5)
## 0. Import modules
```
import numpy as np
import matplotlib.pyplot as plt
import cofi
from espresso import Magnetotelluric1D
from espresso._magnetotelluric_1D import load_data
mt = Magnetotelluric1D()
```
#### 0.1 Load the data
```
filename = '16-A_KN2.dat'
freqs, dobs, derr = load_data(filename, error_floor = 0.05)
```
The data in the *16-A_KN2.dat* file has been transformed from the original *16-A_KN2.edi* file.
The original file contains the information about the full impedance tensor $Z$ and its error. For the purpose of the 1D inversion presented here, we calculated beforehand the determinant of the complex impedance tensor, from which we then calculated the apparent resistivity, phase and their respective errors.
#### 0.2 Set-up and plot the data
```
#set observed data
mt.set_obs_data(dobs, derr, freqs)
# plot the data
fig = mt.plot_data(mt.data, Cm = mt.covariance_matrix)
```
#### 0.3. Define a starting 1D mesh and model for the inversion
```
# the mesh contains many cells to produce a smooth model
nLayers, min_thickness, vertical_growth= 100, 3, 1.1
thicknesses = [min_thickness * vertical_growth**i for i in range(nLayers)]
starting_depths = np.cumsum(thicknesses)
starting_model = np.ones((len(starting_depths)+1)) * 2 # 100 ohm.m starting model (log10 scale)
print('Model bottom: %.2f m'%starting_depths[-1])
```
#### 0.4. Set new starting model and mesh
```
mt.set_start_model(starting_model)
mt.set_start_mesh(starting_depths)
```
## 1. Define the problem
```
# define CoFI BaseProblem
mt_problem = cofi.BaseProblem()
mt_problem.set_initial_model(mt.starting_model)
# add regularization: smoothing
smoothing_factor = 50
reg_smoothing = smoothing_factor * cofi.utils.QuadraticReg("smoothing", (mt.model_size,))
reg = reg_smoothing
def objective_func(model, reg):
dpred = mt.forward(model)
data_misfit = mt.misfit(mt.data,dpred,mt.inverse_covariance_matrix)
model_reg = reg(model)
return data_misfit + model_reg
mt_problem.set_objective(objective_func, args=[reg])
#mt_problem.summary()
```
## 2. Define the inversion options
```
#mt_problem.suggest_tools();
my_options = cofi.InversionOptions()
my_options.set_tool("scipy.optimize.minimize")
my_options.set_params(method="L-BFGS-B",options={'ftol':1e-3,'maxiter': 100})
#my_options.summary()
#my_options.suggest_solver_params()
```
## 3. Start an inversion
```
print("Running inversion...")
inv = cofi.Inversion(mt_problem, my_options)
inv_result = inv.run()
print(" done!")
inv_result.summary()
```
## 4. Plotting inversion results
```
fig1 = mt.plot_model(inv_result.model, max_depth = -500, title='Smooth inversion'); # inverted model
fig = mt.plot_data(mt.data, mt.forward(inv_result.model), Cm = mt.covariance_matrix)
nRMSE = np.sqrt(mt.misfit(mt.data, mt.forward(inv_result.model), Cm_inv = mt.inverse_covariance_matrix)/mt.data_size)
print('nRMSE = %.3f'%nRMSE)
```
| PypiClean |
/intake-esm-2023.7.7.tar.gz/intake-esm-2023.7.7/docs/source/how-to/define-and-use-derived-variable-registry.md | ---
jupytext:
text_representation:
format_name: myst
kernelspec:
display_name: Python 3
name: python3
---
# Define and use derived variable registry
## What is a derived variable ?
A derived variable is a variable that is not present in the original dataset, but is computed from one or more variables in the dataset. For example, a derived variable could be temperature in degrees Fahrenheit. Often times, climate model models write temperature in Celsius or Kelvin, but the user may want degrees Fahrenheit!
This is a really simple example; derived variables could include more sophsticated diagnostic output like aggregations of terms in a tracer budget or gradients in a particular field.
```{note}
Currently, the derived variable implementation requires variables on the same grid, etc.; i.e., it assumes that all variables involved can be merged within **the same dataset**.
```
A traditional workflow for derived variables might consist of the following:
- Load the data
- Apply some function to the loaded datasets
- Plot the output
But what if we could couple those first two steps? What if we could have some set of **variable definitions**, consisting of variable requirements, such as `dependent variables`, and a function which derives the quantity. This is what the `derived_variable` funtionality offers in `intake-esm`! This enables users to share a "registry" of derived variables across catalogs!
Let's get started with an example!
```{code-cell} ipython3
import intake
from intake_esm import DerivedVariableRegistry
```
## How to define a derived variable
Let's compute a derived variable - wind speed! This can be derived from using the zonal (`U`) and meridional (`V`) components of the wind.
### Step 1: define a function to compute `wind speed`
```{code-cell} ipython3
import numpy as np
def calc_wind_speed(ds):
ds['wind_speed'] = np.sqrt(ds.U ** 2 + ds.V ** 2)
ds['wind_speed'].attrs = {'units': 'm/s',
'long_name': 'Wind Speed',
'derived_by': 'intake-esm'}
return ds
```
### Step 2: create our derived variable registry
We need to instantiate our derived variable registry, which will store our derived variable information! We use the variable `dvr` for this (**D**erived**V**ariable**R**egistry).
```{code-cell} ipython3
dvr = DerivedVariableRegistry()
```
In order to add our derived variable to the registry, we need to add a [decorator](https://www.python.org/dev/peps/pep-0318/)to our function. This allows us to define our derived variable, dependent variables, and the function associated with the calculation.
```{note}
For more in-depth details about decorators, check this tutorial: [Primer on Python Decorators](https://realpython.com/primer-on-python-decorators/)
```
```{code-cell} ipython3
@dvr.register(variable='wind_speed', query={'variable': ['U', 'V']})
def calc_wind_speed(ds):
ds['wind_speed'] = np.sqrt(ds.U ** 2 + ds.V ** 2)
ds['wind_speed'].attrs = {'units': 'm/s',
'long_name': 'Wind Speed',
'derived_by': 'intake-esm'}
return ds
```
The `register` function has two required arguments: `variable` and `query`. In this particular example, the derived variable `wind_speed` is derived from `U` and `V`. It is possible to specify additional, required metadata in the query , e.g. `U` and `V` from monthly control runs (e.g `query={'variable': ['U', 'V'], 'experiment': 'CTRL', 'frequency': 'monthl'}` in the case of CESM Large Ensemble).
You'll notice `dvr` now has a registered variable, `wind_speed`, which was defined in the cell above!
```{code-cell} ipython3
dvr
```
```{warning}
All fields (keys) specified in the query argument when registering a derived variable must be present in the catalog otherwise you will get a validation error when connecting a derived variable registry to an intake-esm catalog.
```
### Step 3: connect our derived variable registry to an intake-esm catalog
The derived variable registry is now ready to be used with an intake-esm catalog. To do this, we need to add the registry to the catalog. In this case, we will use data from the CESM Large Ensemble (LENS). This is a climate model ensemble, a subset of which is hosted on the AWS Cloud. If you are interested in learning more about this dataset, check out the [LENS on AWS documentation page](https://ncar.github.io/cesm-lens-aws/).
We connect our derived variable registry to a catalog by using the `registry` argument when instantiating the catalog:
```{code-cell} ipython3
data_catalog = intake.open_esm_datastore(
'https://raw.githubusercontent.com/NCAR/cesm-lens-aws/master/intake-catalogs/aws-cesm1-le.json',
registry=dvr,
)
```
You'll notice we have a new field - `derived_variable` which has 1 unique value. This is because we have only registered one derived variable, `wind_speed`.
```{code-cell} ipython3
data_catalog
```
Let's also subset for monthly frequency, as well as the 20th century (20C) and RCP 8.5 (RCP85) experiments.
```{code-cell} ipython3
catalog_subset = data_catalog.search(
variable=['wind_speed'], frequency='monthly', experiment='RCP85'
)
catalog_subset
```
When loading in the data, `intake-esm` will lazily add our calculation for `wind_speed` to the appropriate datasets!
```{code-cell} ipython3
dsets = catalog_subset.to_dataset_dict(
xarray_open_kwargs={'backend_kwargs': {'storage_options': {'anon': True}}}
)
dsets.keys()
```
Let's look at single dataset from this dictionary of datasets... using the key `atm.CTRL.monthly`. You'll notice upon reading in the dataset, we have three variables:
- `U`
- `V`
- `wind_speed`
```{code-cell} ipython3
ds = dsets['atm.RCP85.monthly']
ds
```
```{code-cell} ipython3
---
tags: [hide-input, hide-output]
---
import intake_esm # just to display version information
intake_esm.show_versions()
```
| PypiClean |
/allenact_plugins-0.5.3.tar.gz/allenact_plugins-0.5.3/allenact_plugins/gym_plugin/gym_models.py | from typing import Dict, Union, Optional, Tuple, Any, Sequence, cast
import gym
import torch
import torch.nn as nn
from allenact.algorithms.onpolicy_sync.policy import (
ActorCriticModel,
DistributionType,
)
from allenact.base_abstractions.misc import ActorCriticOutput, Memory
from allenact_plugins.gym_plugin.gym_distributions import GaussianDistr
class MemorylessActorCritic(ActorCriticModel[GaussianDistr]):
"""ActorCriticModel for gym tasks with continuous control in the range [-1,
1]."""
def __init__(
self,
input_uuid: str,
action_space: gym.spaces.Box,
observation_space: gym.spaces.Dict,
action_std: float = 0.5,
mlp_hidden_dims: Sequence[int] = (64, 32),
):
super().__init__(action_space, observation_space)
self.input_uuid = input_uuid
assert len(observation_space[self.input_uuid].shape) == 1
state_dim = observation_space[self.input_uuid].shape[0]
assert len(action_space.shape) == 1
action_dim = action_space.shape[0]
mlp_hidden_dims = (state_dim,) + tuple(mlp_hidden_dims)
# action mean range -1 to 1
self.actor = nn.Sequential(
*self.make_mlp_hidden(nn.Tanh, *mlp_hidden_dims),
nn.Linear(32, action_dim),
nn.Tanh(),
)
# critic
self.critic = nn.Sequential(
*self.make_mlp_hidden(nn.Tanh, *mlp_hidden_dims), nn.Linear(32, 1),
)
# maximum standard deviation
self.register_buffer(
"action_std",
torch.tensor([action_std] * action_dim).view(1, 1, -1),
persistent=False,
)
@staticmethod
def make_mlp_hidden(nl, *dims):
res = []
for it, dim in enumerate(dims[:-1]):
res.append(nn.Linear(dim, dims[it + 1]),)
res.append(nl())
return res
def _recurrent_memory_specification(self):
return None
def forward( # type:ignore
self,
observations: Dict[str, Union[torch.FloatTensor, Dict[str, Any]]],
memory: Memory,
prev_actions: Any,
masks: torch.FloatTensor,
) -> Tuple[ActorCriticOutput[DistributionType], Optional[Memory]]:
means = self.actor(observations[self.input_uuid])
values = self.critic(observations[self.input_uuid])
return (
ActorCriticOutput(
cast(DistributionType, GaussianDistr(loc=means, scale=self.action_std)),
values,
{},
),
None, # no Memory
) | PypiClean |
/PyHive-abmn614-0.6.5.tar.gz/PyHive-abmn614-0.6.5/pyhive/sqlalchemy_presto.py | from __future__ import absolute_import
from __future__ import unicode_literals
import re
from sqlalchemy import exc
from sqlalchemy import types
from sqlalchemy import util
# TODO shouldn't use mysql type
from sqlalchemy.databases import mysql
from sqlalchemy.engine import default
from sqlalchemy.sql import compiler
from sqlalchemy.sql.compiler import SQLCompiler
from pyhive import presto
from pyhive.common import UniversalSet
class PrestoIdentifierPreparer(compiler.IdentifierPreparer):
# Just quote everything to make things simpler / easier to upgrade
reserved_words = UniversalSet()
_type_map = {
'boolean': types.Boolean,
'tinyint': mysql.MSTinyInteger,
'smallint': types.SmallInteger,
'integer': types.Integer,
'bigint': types.BigInteger,
'real': types.Float,
'double': types.Float,
'varchar': types.String,
'timestamp': types.TIMESTAMP,
'date': types.DATE,
'varbinary': types.VARBINARY,
}
class PrestoCompiler(SQLCompiler):
def visit_char_length_func(self, fn, **kw):
return 'length{}'.format(self.function_argspec(fn, **kw))
class PrestoTypeCompiler(compiler.GenericTypeCompiler):
def visit_CLOB(self, type_, **kw):
raise ValueError("Presto does not support the CLOB column type.")
def visit_NCLOB(self, type_, **kw):
raise ValueError("Presto does not support the NCLOB column type.")
def visit_DATETIME(self, type_, **kw):
raise ValueError("Presto does not support the DATETIME column type.")
def visit_FLOAT(self, type_, **kw):
return 'DOUBLE'
def visit_TEXT(self, type_, **kw):
if type_.length:
return 'VARCHAR({:d})'.format(type_.length)
else:
return 'VARCHAR'
class PrestoDialect(default.DefaultDialect):
name = 'presto'
driver = 'rest'
paramstyle = 'pyformat'
preparer = PrestoIdentifierPreparer
statement_compiler = PrestoCompiler
supports_alter = False
supports_pk_autoincrement = False
supports_default_values = False
supports_empty_insert = False
supports_multivalues_insert = True
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
description_encoding = None
supports_native_boolean = True
type_compiler = PrestoTypeCompiler
@classmethod
def dbapi(cls):
return presto
def create_connect_args(self, url):
db_parts = (url.database or 'hive').split('/')
kwargs = {
'host': url.host,
'port': url.port or 8080,
'username': url.username,
'password': url.password
}
kwargs.update(url.query)
if len(db_parts) == 1:
kwargs['catalog'] = db_parts[0]
elif len(db_parts) == 2:
kwargs['catalog'] = db_parts[0]
kwargs['schema'] = db_parts[1]
else:
raise ValueError("Unexpected database format {}".format(url.database))
return [], kwargs
def get_schema_names(self, connection, **kw):
return [row.Schema for row in connection.execute('SHOW SCHEMAS')]
def _get_table_columns(self, connection, table_name, schema):
full_table = self.identifier_preparer.quote_identifier(table_name)
if schema:
full_table = self.identifier_preparer.quote_identifier(schema) + '.' + full_table
try:
return connection.execute('SHOW COLUMNS FROM {}'.format(full_table))
except (presto.DatabaseError, exc.DatabaseError) as e:
# Normally SQLAlchemy should wrap this exception in sqlalchemy.exc.DatabaseError, which
# it successfully does in the Hive version. The difference with Presto is that this
# error is raised when fetching the cursor's description rather than the initial execute
# call. SQLAlchemy doesn't handle this. Thus, we catch the unwrapped
# presto.DatabaseError here.
# Does the table exist?
msg = (
e.args[0].get('message') if e.args and isinstance(e.args[0], dict)
else e.args[0] if e.args and isinstance(e.args[0], str)
else None
)
regex = r"Table\ \'.*{}\'\ does\ not\ exist".format(re.escape(table_name))
if msg and re.search(regex, msg):
raise exc.NoSuchTableError(table_name)
else:
raise
def has_table(self, connection, table_name, schema=None):
try:
self._get_table_columns(connection, table_name, schema)
return True
except exc.NoSuchTableError:
return False
def get_columns(self, connection, table_name, schema=None, **kw):
rows = self._get_table_columns(connection, table_name, schema)
result = []
for row in rows:
try:
coltype = _type_map[row.Type]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" % (row.Type, row.Column))
coltype = types.NullType
result.append({
'name': row.Column,
'type': coltype,
# newer Presto no longer includes this column
'nullable': getattr(row, 'Null', True),
'default': None,
})
return result
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# Hive has no support for foreign keys.
return []
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# Hive has no support for primary keys.
return []
def get_indexes(self, connection, table_name, schema=None, **kw):
rows = self._get_table_columns(connection, table_name, schema)
col_names = []
for row in rows:
part_key = 'Partition Key'
# Presto puts this information in one of 3 places depending on version
# - a boolean column named "Partition Key"
# - a string in the "Comment" column
# - a string in the "Extra" column
is_partition_key = (
(part_key in row and row[part_key])
or row['Comment'].startswith(part_key)
or ('Extra' in row and 'partition key' in row['Extra'])
)
if is_partition_key:
col_names.append(row['Column'])
if col_names:
return [{'name': 'partition', 'column_names': col_names, 'unique': False}]
else:
return []
def get_table_names(self, connection, schema=None, **kw):
query = 'SHOW TABLES'
if schema:
query += ' FROM ' + self.identifier_preparer.quote_identifier(schema)
return [row.Table for row in connection.execute(query)]
def do_rollback(self, dbapi_connection):
# No transactions for Presto
pass
def _check_unicode_returns(self, connection, additional_tests=None):
# requests gives back Unicode strings
return True
def _check_unicode_description(self, connection):
# requests gives back Unicode strings
return True | PypiClean |
/django-ra-erp-1.3.1.tar.gz/django-ra-erp-1.3.1/ra/static/ra/plugins/datatables/DataTables-1.10.20/js/dataTables.bootstrap.min.js | var $jscomp=$jscomp||{};$jscomp.scope={};$jscomp.findInternal=function(a,b,c){a instanceof String&&(a=String(a));for(var e=a.length,d=0;d<e;d++){var k=a[d];if(b.call(c,k,d,a))return{i:d,v:k}}return{i:-1,v:void 0}};$jscomp.ASSUME_ES5=!1;$jscomp.ASSUME_NO_NATIVE_MAP=!1;$jscomp.ASSUME_NO_NATIVE_SET=!1;$jscomp.SIMPLE_FROUND_POLYFILL=!1;
$jscomp.defineProperty=$jscomp.ASSUME_ES5||"function"==typeof Object.defineProperties?Object.defineProperty:function(a,b,c){a!=Array.prototype&&a!=Object.prototype&&(a[b]=c.value)};$jscomp.getGlobal=function(a){return"undefined"!=typeof window&&window===a?a:"undefined"!=typeof global&&null!=global?global:a};$jscomp.global=$jscomp.getGlobal(this);
$jscomp.polyfill=function(a,b,c,e){if(b){c=$jscomp.global;a=a.split(".");for(e=0;e<a.length-1;e++){var d=a[e];d in c||(c[d]={});c=c[d]}a=a[a.length-1];e=c[a];b=b(e);b!=e&&null!=b&&$jscomp.defineProperty(c,a,{configurable:!0,writable:!0,value:b})}};$jscomp.polyfill("Array.prototype.find",function(a){return a?a:function(a,c){return $jscomp.findInternal(this,a,c).v}},"es6","es3");
(function(a){"function"===typeof define&&define.amd?define(["jquery","datatables.net"],function(b){return a(b,window,document)}):"object"===typeof exports?module.exports=function(b,c){b||(b=window);c&&c.fn.dataTable||(c=require("datatables.net")(b,c).$);return a(c,b,b.document)}:a(jQuery,window,document)})(function(a,b,c,e){var d=a.fn.dataTable;a.extend(!0,d.defaults,{dom:"<'row'<'col-sm-6'l><'col-sm-6'f>><'row'<'col-sm-12'tr>><'row'<'col-sm-5'i><'col-sm-7'p>>",renderer:"bootstrap"});a.extend(d.ext.classes,
{sWrapper:"dataTables_wrapper form-inline dt-bootstrap",sFilterInput:"form-control input-sm",sLengthSelect:"form-control input-sm",sProcessing:"dataTables_processing panel panel-default"});d.ext.renderer.pageButton.bootstrap=function(b,l,v,w,m,r){var k=new d.Api(b),x=b.oClasses,n=b.oLanguage.oPaginate,y=b.oLanguage.oAria.paginate||{},g,h,t=0,u=function(c,d){var e,l=function(b){b.preventDefault();a(b.currentTarget).hasClass("disabled")||k.page()==b.data.action||k.page(b.data.action).draw("page")};
var q=0;for(e=d.length;q<e;q++){var f=d[q];if(a.isArray(f))u(c,f);else{h=g="";switch(f){case "ellipsis":g="…";h="disabled";break;case "first":g=n.sFirst;h=f+(0<m?"":" disabled");break;case "previous":g=n.sPrevious;h=f+(0<m?"":" disabled");break;case "next":g=n.sNext;h=f+(m<r-1?"":" disabled");break;case "last":g=n.sLast;h=f+(m<r-1?"":" disabled");break;default:g=f+1,h=m===f?"active":""}if(g){var p=a("<li>",{"class":x.sPageButton+" "+h,id:0===v&&"string"===typeof f?b.sTableId+"_"+f:null}).append(a("<a>",
{href:"#","aria-controls":b.sTableId,"aria-label":y[f],"data-dt-idx":t,tabindex:b.iTabIndex}).html(g)).appendTo(c);b.oApi._fnBindAction(p,{action:f},l);t++}}}};try{var p=a(l).find(c.activeElement).data("dt-idx")}catch(z){}u(a(l).empty().html('<ul class="pagination"/>').children("ul"),w);p!==e&&a(l).find("[data-dt-idx="+p+"]").focus()};return d}); | PypiClean |
/xai-benchmark-0.3.0.tar.gz/xai-benchmark-0.3.0/xaib/metrics/example_selection/parameter_randomization_check.py | from typing import Any, Dict, Union
import numpy as np
from tqdm import tqdm
from ...base import Dataset, Explainer, Metric, Model
from ...utils import SimpleDataloader, batch_count_eq, minmax_normalize
class ParameterRandomizationCheck(Metric):
"""
Parameter randomization check is a sanity-check.
To ensure that the model influence explanations the
following is done. The model is changed and it is expected that
explanations should not stay the same is model changed.
This check uses random model baselines instead of same models
with randomized internal states.
Then the explanations on the original data are obtained.
They are compared with explanations done with the original model by
counting how many examples were the same for same data points.
The less the better.
**The less the better**
- **Worst case:** explanations are the same, so it is Constant explainer
- **Best case:** is reached when explanations are the opposite,
distance between them maximized.
The problem with this kind of metric is
with its maximization. It seems redundant to maximize it because more
different explanations on random states do not mean that the model is
more correct.
It is difficult to define best case explainer in this case - the metric has no maximum value.
"""
def __init__(
self, ds: Dataset, model: Model, noisy_model: Model, **kwargs: Any
) -> None:
super().__init__(ds, model, **kwargs)
self._noisy_model = noisy_model
self.name = "parameter_randomization_check"
self.direction = "down"
def compute(
self,
expl: Explainer,
batch_size: int = 1,
expl_kwargs: Union[Dict[Any, Any], None] = None,
expl_noisy_kwargs: Union[Dict[Any, Any], None] = None,
) -> None:
if expl_kwargs is None:
expl_kwargs = {}
if expl_noisy_kwargs is None:
expl_noisy_kwargs = {}
diffs_expl = []
for batch in tqdm(SimpleDataloader(self._ds, batch_size)):
item = batch["item"]
explanation_batch = expl.predict(item, self._model, **expl_kwargs)
noisy_explanation_batch = expl.predict(
item, self._noisy_model, **expl_noisy_kwargs
)
diffs_expl += batch_count_eq(explanation_batch, noisy_explanation_batch)
return sum(diffs_expl) / len(diffs_expl) | PypiClean |
/aspose-tasks-cloud-22.12.0.tar.gz/aspose-tasks-cloud-22.12.0/asposetaskscloud/models/week_day_type.py | import pprint
import re # noqa: F401
import six
class WeekDayType(object):
"""Represents a weekday of a project in the instance of RecurringInfo class.
"""
"""
allowed enum values
"""
NONE = "None"
SUNDAY = "Sunday"
MONDAY = "Monday"
TUESDAY = "Tuesday"
WEDNESDAY = "Wednesday"
THURSDAY = "Thursday"
FRIDAY = "Friday"
SATURDAY = "Saturday"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""WeekDayType - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WeekDayType):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | PypiClean |
/fake-bge-module-latest-20230415.tar.gz/fake-bge-module-latest-20230415/bl_operators/userpref.py | import sys
import typing
import bpy_types
class PREFERENCES_OT_addon_disable(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, _context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_addon_enable(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, _context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_addon_expand(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, _context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_addon_install(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_addon_refresh(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, _context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_addon_remove(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_addon(self, module):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_addon_show(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_app_template_install(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, _context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_copy_prev(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, _context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, _context):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def previous_version(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_keyconfig_activate(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, _context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_keyconfig_export(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_keyconfig_import(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, _context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_keyconfig_remove(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_keyconfig_test(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_keyitem_add(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_keyitem_remove(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_keyitem_restore(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, context):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_keymap_restore(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_script_directory_new(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_script_directory_remove(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_studiolight_copy_settings(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_studiolight_install(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_studiolight_new(bpy_types.Operator):
ask_override = None
''' '''
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_studiolight_show(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_options = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll(self, _context):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_studiolight_uninstall(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class PREFERENCES_OT_theme_install(bpy_types.Operator):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def as_keywords(self, ignore):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def execute(self, _context):
'''
'''
pass
def get(self):
'''
'''
pass
def id_properties_clear(self):
'''
'''
pass
def id_properties_ensure(self):
'''
'''
pass
def id_properties_ui(self):
'''
'''
pass
def invoke(self, context, _event):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def poll_message_set(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass | PypiClean |
/synth_forc-0.1.2-py3-none-any.whl/synth_forc/qt/save_dialog.py |
from PyQt6 import QtCore, QtGui, QtWidgets
class Ui_SaveDialog(object):
def setupUi(self, SaveDialog):
SaveDialog.setObjectName("SaveDialog")
SaveDialog.resize(773, 332)
self.gridLayout = QtWidgets.QGridLayout(SaveDialog)
self.gridLayout.setObjectName("gridLayout")
self.txt_aspect_ratio_distribution = QtWidgets.QLineEdit(SaveDialog)
self.txt_aspect_ratio_distribution.setObjectName("txt_aspect_ratio_distribution")
self.gridLayout.addWidget(self.txt_aspect_ratio_distribution, 4, 1, 1, 1)
self.lbl_aspect_ratio_distribution = QtWidgets.QLabel(SaveDialog)
self.lbl_aspect_ratio_distribution.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.lbl_aspect_ratio_distribution.setObjectName("lbl_aspect_ratio_distribution")
self.gridLayout.addWidget(self.lbl_aspect_ratio_distribution, 4, 0, 1, 1)
self.frm_save_exit = QtWidgets.QFrame(SaveDialog)
self.frm_save_exit.setFrameShape(QtWidgets.QFrame.Shape.NoFrame)
self.frm_save_exit.setFrameShadow(QtWidgets.QFrame.Shadow.Plain)
self.frm_save_exit.setObjectName("frm_save_exit")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.frm_save_exit)
self.horizontalLayout.setObjectName("horizontalLayout")
self.btn_save = QtWidgets.QPushButton(self.frm_save_exit)
self.btn_save.setMinimumSize(QtCore.QSize(60, 0))
self.btn_save.setMaximumSize(QtCore.QSize(60, 16777215))
self.btn_save.setObjectName("btn_save")
self.horizontalLayout.addWidget(self.btn_save)
self.btn_exit = QtWidgets.QPushButton(self.frm_save_exit)
self.btn_exit.setMinimumSize(QtCore.QSize(60, 0))
self.btn_exit.setMaximumSize(QtCore.QSize(60, 16777215))
self.btn_exit.setObjectName("btn_exit")
self.horizontalLayout.addWidget(self.btn_exit)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.gridLayout.addWidget(self.frm_save_exit, 5, 0, 1, 2)
self.btn_forc_file_dialog = QtWidgets.QPushButton(SaveDialog)
self.btn_forc_file_dialog.setObjectName("btn_forc_file_dialog")
self.gridLayout.addWidget(self.btn_forc_file_dialog, 1, 2, 1, 1)
self.txt_size_distribution = QtWidgets.QLineEdit(SaveDialog)
self.txt_size_distribution.setObjectName("txt_size_distribution")
self.gridLayout.addWidget(self.txt_size_distribution, 3, 1, 1, 1)
self.txt_forc = QtWidgets.QLineEdit(SaveDialog)
self.txt_forc.setObjectName("txt_forc")
self.gridLayout.addWidget(self.txt_forc, 1, 1, 1, 1)
self.lbl_forc = QtWidgets.QLabel(SaveDialog)
self.lbl_forc.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.lbl_forc.setObjectName("lbl_forc")
self.gridLayout.addWidget(self.lbl_forc, 1, 0, 1, 1)
self.lbl_forc_loops = QtWidgets.QLabel(SaveDialog)
self.lbl_forc_loops.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.lbl_forc_loops.setObjectName("lbl_forc_loops")
self.gridLayout.addWidget(self.lbl_forc_loops, 2, 0, 1, 1)
self.lbl_size_distribution = QtWidgets.QLabel(SaveDialog)
self.lbl_size_distribution.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.lbl_size_distribution.setObjectName("lbl_size_distribution")
self.gridLayout.addWidget(self.lbl_size_distribution, 3, 0, 1, 1)
self.txt_forc_loops = QtWidgets.QLineEdit(SaveDialog)
self.txt_forc_loops.setObjectName("txt_forc_loops")
self.gridLayout.addWidget(self.txt_forc_loops, 2, 1, 1, 1)
self.btn_clear_forc = QtWidgets.QPushButton(SaveDialog)
self.btn_clear_forc.setObjectName("btn_clear_forc")
self.gridLayout.addWidget(self.btn_clear_forc, 1, 3, 1, 1)
self.btn_forc_loops_file_dialog = QtWidgets.QPushButton(SaveDialog)
self.btn_forc_loops_file_dialog.setObjectName("btn_forc_loops_file_dialog")
self.gridLayout.addWidget(self.btn_forc_loops_file_dialog, 2, 2, 1, 1)
self.btn_size_distribution_file_dialog = QtWidgets.QPushButton(SaveDialog)
self.btn_size_distribution_file_dialog.setObjectName("btn_size_distribution_file_dialog")
self.gridLayout.addWidget(self.btn_size_distribution_file_dialog, 3, 2, 1, 1)
self.btn_aspect_ratio_distribution_file_dialog = QtWidgets.QPushButton(SaveDialog)
self.btn_aspect_ratio_distribution_file_dialog.setObjectName("btn_aspect_ratio_distribution_file_dialog")
self.gridLayout.addWidget(self.btn_aspect_ratio_distribution_file_dialog, 4, 2, 1, 1)
self.btn_clear_forc_loops = QtWidgets.QPushButton(SaveDialog)
self.btn_clear_forc_loops.setObjectName("btn_clear_forc_loops")
self.gridLayout.addWidget(self.btn_clear_forc_loops, 2, 3, 1, 1)
self.btn_clear_size_distribution = QtWidgets.QPushButton(SaveDialog)
self.btn_clear_size_distribution.setObjectName("btn_clear_size_distribution")
self.gridLayout.addWidget(self.btn_clear_size_distribution, 3, 3, 1, 1)
self.btn_clear_aspect_ratio_distribution = QtWidgets.QPushButton(SaveDialog)
self.btn_clear_aspect_ratio_distribution.setObjectName("btn_clear_aspect_ratio_distribution")
self.gridLayout.addWidget(self.btn_clear_aspect_ratio_distribution, 4, 3, 1, 1)
self.lbl_message = QtWidgets.QLabel(SaveDialog)
self.lbl_message.setWordWrap(True)
self.lbl_message.setObjectName("lbl_message")
self.gridLayout.addWidget(self.lbl_message, 0, 0, 1, 4)
self.retranslateUi(SaveDialog)
QtCore.QMetaObject.connectSlotsByName(SaveDialog)
SaveDialog.setTabOrder(self.txt_forc, self.txt_forc_loops)
SaveDialog.setTabOrder(self.txt_forc_loops, self.txt_size_distribution)
SaveDialog.setTabOrder(self.txt_size_distribution, self.txt_aspect_ratio_distribution)
SaveDialog.setTabOrder(self.txt_aspect_ratio_distribution, self.btn_save)
SaveDialog.setTabOrder(self.btn_save, self.btn_exit)
SaveDialog.setTabOrder(self.btn_exit, self.btn_forc_file_dialog)
SaveDialog.setTabOrder(self.btn_forc_file_dialog, self.btn_clear_forc)
SaveDialog.setTabOrder(self.btn_clear_forc, self.btn_forc_loops_file_dialog)
SaveDialog.setTabOrder(self.btn_forc_loops_file_dialog, self.btn_clear_forc_loops)
SaveDialog.setTabOrder(self.btn_clear_forc_loops, self.btn_size_distribution_file_dialog)
SaveDialog.setTabOrder(self.btn_size_distribution_file_dialog, self.btn_clear_size_distribution)
SaveDialog.setTabOrder(self.btn_clear_size_distribution, self.btn_aspect_ratio_distribution_file_dialog)
SaveDialog.setTabOrder(self.btn_aspect_ratio_distribution_file_dialog, self.btn_clear_aspect_ratio_distribution)
def retranslateUi(self, SaveDialog):
_translate = QtCore.QCoreApplication.translate
SaveDialog.setWindowTitle(_translate("SaveDialog", "Save"))
self.txt_aspect_ratio_distribution.setToolTip(_translate("SaveDialog", "<html><head/><body><p>Aspect ratio graph output file name.</p></body></html>"))
self.lbl_aspect_ratio_distribution.setText(_translate("SaveDialog", "Aspect ratio distribution:"))
self.btn_save.setToolTip(_translate("SaveDialog", "<html><head/><body><p>Save files.</p></body></html>"))
self.btn_save.setText(_translate("SaveDialog", "save"))
self.btn_exit.setToolTip(_translate("SaveDialog", "<html><head/><body><p>Exit without saving.</p></body></html>"))
self.btn_exit.setText(_translate("SaveDialog", "exit"))
self.btn_forc_file_dialog.setToolTip(_translate("SaveDialog", "<html><head/><body><p>Open FORC diagram file output dialog.</p></body></html>"))
self.btn_forc_file_dialog.setText(_translate("SaveDialog", "..."))
self.txt_size_distribution.setToolTip(_translate("SaveDialog", "<html><head/><body><p>Size distribution graph output file name.</p></body></html>"))
self.txt_forc.setToolTip(_translate("SaveDialog", "<html><head/><body><p>FORC diagram figure output file name.</p></body></html>"))
self.lbl_forc.setText(_translate("SaveDialog", "FORC:"))
self.lbl_forc_loops.setText(_translate("SaveDialog", "FORC loops:"))
self.lbl_size_distribution.setText(_translate("SaveDialog", "Size distribution:"))
self.txt_forc_loops.setToolTip(_translate("SaveDialog", "<html><head/><body><p>FORC loops figure output file name.</p></body></html>"))
self.btn_clear_forc.setToolTip(_translate("SaveDialog", "<html><head/><body><p>Clear FORC diagram file output.</p></body></html>"))
self.btn_clear_forc.setText(_translate("SaveDialog", "clear"))
self.btn_forc_loops_file_dialog.setToolTip(_translate("SaveDialog", "<html><head/><body><p>Open FORC loops diagram file output dialog.</p></body></html>"))
self.btn_forc_loops_file_dialog.setText(_translate("SaveDialog", "..."))
self.btn_size_distribution_file_dialog.setToolTip(_translate("SaveDialog", "<html><head/><body><p>Open size distribution graph file output dialog.</p></body></html>"))
self.btn_size_distribution_file_dialog.setText(_translate("SaveDialog", "..."))
self.btn_aspect_ratio_distribution_file_dialog.setToolTip(_translate("SaveDialog", "<html><head/><body><p>Open aspect ratio distribution graph file output dialog.</p></body></html>"))
self.btn_aspect_ratio_distribution_file_dialog.setText(_translate("SaveDialog", "..."))
self.btn_clear_forc_loops.setToolTip(_translate("SaveDialog", "<html><head/><body><p>Clear FORC loops diagram file output.</p></body></html>"))
self.btn_clear_forc_loops.setText(_translate("SaveDialog", "clear"))
self.btn_clear_size_distribution.setToolTip(_translate("SaveDialog", "<html><head/><body><p>Clear size distribution graph file output.</p></body></html>"))
self.btn_clear_size_distribution.setText(_translate("SaveDialog", "clear"))
self.btn_clear_aspect_ratio_distribution.setToolTip(_translate("SaveDialog", "<html><head/><body><p>Clear aspect ratio distribution graph file output.</p></body></html>"))
self.btn_clear_aspect_ratio_distribution.setText(_translate("SaveDialog", "clear"))
self.lbl_message.setText(_translate("SaveDialog", "Input file names for the images you\'d like to save. Please note that any existing file names will be overwritten.")) | PypiClean |
/gbptestneutron-0.9.0.tar.gz/gbptestneutron-0.9.0.dev194.g9d5bd52/neutron/plugins/cisco/l3/plugging_drivers/n1kv_trunking_driver.py |
import eventlet
from oslo.config import cfg
from sqlalchemy.orm import exc
from sqlalchemy.sql import expression as expr
from neutron.api.v2 import attributes
from neutron.common import exceptions as n_exc
from neutron import context as n_context
from neutron.db import models_v2
from neutron.extensions import providernet as pr_net
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.db.l3 import l3_models
from neutron.plugins.cisco.extensions import n1kv
import neutron.plugins.cisco.l3.plugging_drivers as plug
from neutron.plugins.cisco.l3.plugging_drivers import (n1kv_plugging_constants
as n1kv_const)
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
N1KV_TRUNKING_DRIVER_OPTS = [
cfg.StrOpt('management_port_profile', default='osn_mgmt_pp',
help=_("Name of N1kv port profile for management ports.")),
cfg.StrOpt('t1_port_profile', default='osn_t1_pp',
help=_("Name of N1kv port profile for T1 ports (i.e., ports "
"carrying traffic from VXLAN segmented networks).")),
cfg.StrOpt('t2_port_profile', default='osn_t2_pp',
help=_("Name of N1kv port profile for T2 ports (i.e., ports "
"carrying traffic from VLAN segmented networks).")),
cfg.StrOpt('t1_network_profile', default='osn_t1_np',
help=_("Name of N1kv network profile for T1 networks (i.e., "
"trunk networks for VXLAN segmented traffic).")),
cfg.StrOpt('t2_network_profile', default='osn_t2_np',
help=_("Name of N1kv network profile for T2 networks (i.e., "
"trunk networks for VLAN segmented traffic).")),
]
cfg.CONF.register_opts(N1KV_TRUNKING_DRIVER_OPTS, "n1kv")
MIN_LL_VLAN_TAG = 10
MAX_LL_VLAN_TAG = 200
FULL_VLAN_SET = set(range(MIN_LL_VLAN_TAG, MAX_LL_VLAN_TAG + 1))
DELETION_ATTEMPTS = 5
SECONDS_BETWEEN_DELETION_ATTEMPTS = 3
# Port lookups can fail so retries are needed
MAX_HOSTING_PORT_LOOKUP_ATTEMPTS = 10
SECONDS_BETWEEN_HOSTING_PORT_LOOKSUPS = 2
class N1kvTrunkingPlugDriver(plug.PluginSidePluggingDriver):
"""Driver class for service VMs used with the N1kv plugin.
The driver makes use N1kv plugin's VLAN trunk feature.
"""
_mgmt_port_profile_id = None
_t1_port_profile_id = None
_t2_port_profile_id = None
_t1_network_profile_id = None
_t2_network_profile_id = None
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
@classmethod
def _get_profile_id(cls, p_type, resource, name):
try:
tenant_id = manager.NeutronManager.get_service_plugins()[
constants.L3_ROUTER_NAT].l3_tenant_id()
except AttributeError:
return
if tenant_id is None:
return
core_plugin = manager.NeutronManager.get_plugin()
if p_type == 'net_profile':
profiles = core_plugin.get_network_profiles(
n_context.get_admin_context(),
{'tenant_id': [tenant_id], 'name': [name]},
['id'])
else:
profiles = core_plugin.get_policy_profiles(
n_context.get_admin_context(),
{'tenant_id': [tenant_id], 'name': [name]},
['id'])
if len(profiles) == 1:
return profiles[0]['id']
elif len(profiles) > 1:
# Profile must have a unique name.
LOG.error(_('The %(resource)s %(name)s does not have unique name. '
'Please refer to admin guide and create one.'),
{'resource': resource, 'name': name})
else:
# Profile has not been created.
LOG.error(_('There is no %(resource)s %(name)s. Please refer to '
'admin guide and create one.'),
{'resource': resource, 'name': name})
@classmethod
def mgmt_port_profile_id(cls):
if cls._mgmt_port_profile_id is None:
cls._mgmt_port_profile_id = cls._get_profile_id(
'port_profile', 'N1kv port profile',
cfg.CONF.n1kv.management_port_profile)
return cls._mgmt_port_profile_id
@classmethod
def t1_port_profile_id(cls):
if cls._t1_port_profile_id is None:
cls._t1_port_profile_id = cls._get_profile_id(
'port_profile', 'N1kv port profile',
cfg.CONF.n1kv.t1_port_profile)
return cls._t1_port_profile_id
@classmethod
def t2_port_profile_id(cls):
if cls._t2_port_profile_id is None:
cls._t2_port_profile_id = cls._get_profile_id(
'port_profile', 'N1kv port profile',
cfg.CONF.n1kv.t2_port_profile)
return cls._t2_port_profile_id
@classmethod
def t1_network_profile_id(cls):
if cls._t1_network_profile_id is None:
cls._t1_network_profile_id = cls._get_profile_id(
'net_profile', 'N1kv network profile',
cfg.CONF.n1kv.t1_network_profile)
return cls._t1_network_profile_id
@classmethod
def t2_network_profile_id(cls):
if cls._t2_network_profile_id is None:
cls._t2_network_profile_id = cls._get_profile_id(
'net_profile', 'N1kv network profile',
cfg.CONF.n1kv.t2_network_profile)
return cls._t2_network_profile_id
def create_hosting_device_resources(self, context, complementary_id,
tenant_id, mgmt_nw_id,
mgmt_sec_grp_id, max_hosted):
mgmt_port = None
t1_n, t1_sn, t2_n, t2_sn, t_p = [], [], [], [], []
if mgmt_nw_id is not None and tenant_id is not None:
# Create port for mgmt interface
p_spec = {'port': {
'tenant_id': tenant_id,
'admin_state_up': True,
'name': 'mgmt',
'network_id': mgmt_nw_id,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'n1kv:profile_id': self.mgmt_port_profile_id(),
'device_id': "",
# Use device_owner attribute to ensure we can query for these
# ports even before Nova has set device_id attribute.
'device_owner': complementary_id}}
try:
mgmt_port = self._core_plugin.create_port(context,
p_spec)
# The trunk networks
n_spec = {'network': {'tenant_id': tenant_id,
'admin_state_up': True,
'name': n1kv_const.T1_NETWORK_NAME,
'shared': False}}
# Until Nova allows spinning up VMs with VIFs on
# networks without subnet(s) we create "dummy" subnets
# for the trunk networks
s_spec = {'subnet': {
'tenant_id': tenant_id,
'admin_state_up': True,
'cidr': n1kv_const.SUBNET_PREFIX,
'enable_dhcp': False,
'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'ip_version': 4,
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
'host_routes': attributes.ATTR_NOT_SPECIFIED}}
for i in xrange(max_hosted):
# Create T1 trunk network for this router
self._create_resources(
context, "T1", i, n_spec, n1kv_const.T1_NETWORK_NAME,
self.t1_network_profile_id(), t1_n, s_spec,
n1kv_const.T1_SUBNET_NAME, t1_sn, p_spec,
n1kv_const.T1_PORT_NAME, self.t1_port_profile_id(),
t_p)
# Create T2 trunk network for this router
self._create_resources(
context, "T2", i, n_spec, n1kv_const.T2_NETWORK_NAME,
self.t2_network_profile_id(), t2_n, s_spec,
n1kv_const.T2_SUBNET_NAME, t2_sn, p_spec,
n1kv_const.T2_PORT_NAME, self.t2_port_profile_id(),
t_p)
except n_exc.NeutronException as e:
LOG.error(_('Error %s when creating service VM resources. '
'Cleaning up.'), e)
resources = {'ports': t_p, 'networks': t1_n + t2_n,
'subnets': t1_sn + t2_sn}
self.delete_hosting_device_resources(
context, tenant_id, mgmt_port, **resources)
mgmt_port = None
t1_n, t1_sn, t2_n, t2_sn, t_p = [], [], [], [], []
return {'mgmt_port': mgmt_port,
'ports': t_p,
'networks': t1_n + t2_n,
'subnets': t1_sn + t2_sn}
def _create_resources(self, context, type_name, resource_index,
n_spec, net_namebase, net_profile, t_n,
s_spec, subnet_namebase, t_sn,
p_spec, port_namebase, port_profile, t_p):
index = str(resource_index + 1)
# Create trunk network
n_spec['network'].update({'name': net_namebase + index,
'n1kv:profile_id': net_profile})
t_n.append(self._core_plugin.create_network(context, n_spec))
LOG.debug('Created %(t_n)s network with name %(name)s and id %(id)s',
{'t_n': type_name, 'name': n_spec['network']['name'],
'id': t_n[resource_index]['id']})
# Create dummy subnet for the trunk network
s_spec['subnet'].update({'name': subnet_namebase + index,
'network_id': t_n[resource_index]['id']})
t_sn.append(self._core_plugin.create_subnet(context, s_spec))
# Create port for on trunk network
p_spec['port'].update({'name': port_namebase + index,
'network_id': t_n[resource_index]['id'],
'n1kv:profile_id': port_profile})
t_p.append(self._core_plugin.create_port(context, p_spec))
LOG.debug('Created %(t_n)s port with name %(name)s, id %(id)s on '
'subnet %(subnet)s',
{'t_n': type_name, 'name': t_n[resource_index]['name'],
'id': t_n[resource_index]['id'],
'subnet': t_sn[resource_index]['id']})
def get_hosting_device_resources(self, context, id, complementary_id,
tenant_id, mgmt_nw_id):
ports, nets, subnets = [], [], []
mgmt_port = None
# Ports for hosting device may not yet have 'device_id' set to
# Nova assigned uuid of VM instance. However, those ports will still
# have 'device_owner' attribute set to complementary_id. Hence, we
# use both attributes in the query to ensure we find all ports.
query = context.session.query(models_v2.Port)
query = query.filter(expr.or_(
models_v2.Port.device_id == id,
models_v2.Port.device_owner == complementary_id))
for port in query:
if port['network_id'] != mgmt_nw_id:
ports.append(port)
nets.append({'id': port['network_id']})
subnets.append({'id': port['fixed_ips'][0]['subnet_id']})
else:
mgmt_port = port
return {'mgmt_port': mgmt_port,
'ports': ports, 'networks': nets, 'subnets': subnets}
def delete_hosting_device_resources(self, context, tenant_id, mgmt_port,
**kwargs):
attempts = 1
port_ids = set(p['id'] for p in kwargs['ports'])
subnet_ids = set(s['id'] for s in kwargs['subnets'])
net_ids = set(n['id'] for n in kwargs['networks'])
while mgmt_port is not None or port_ids or subnet_ids or net_ids:
if attempts == DELETION_ATTEMPTS:
LOG.warning(_('Aborting resource deletion after %d '
'unsuccessful attempts'), DELETION_ATTEMPTS)
return
else:
if attempts > 1:
eventlet.sleep(SECONDS_BETWEEN_DELETION_ATTEMPTS)
LOG.info(_('Resource deletion attempt %d starting'), attempts)
# Remove anything created.
if mgmt_port is not None:
ml = set([mgmt_port['id']])
self._delete_resources(context, "management port",
self._core_plugin.delete_port,
n_exc.PortNotFound, ml)
if not ml:
mgmt_port = None
self._delete_resources(context, "trunk port",
self._core_plugin.delete_port,
n_exc.PortNotFound, port_ids)
self._delete_resources(context, "subnet",
self._core_plugin.delete_subnet,
n_exc.SubnetNotFound, subnet_ids)
self._delete_resources(context, "trunk network",
self._core_plugin.delete_network,
n_exc.NetworkNotFound, net_ids)
attempts += 1
LOG.info(_('Resource deletion succeeded'))
def _delete_resources(self, context, name, deleter, exception_type,
resource_ids):
for item_id in resource_ids.copy():
try:
deleter(context, item_id)
resource_ids.remove(item_id)
except exception_type:
resource_ids.remove(item_id)
except n_exc.NeutronException as e:
LOG.error(_('Failed to delete %(resource_name) %(net_id)s '
'for service vm due to %(err)s'),
{'resource_name': name, 'net_id': item_id, 'err': e})
def setup_logical_port_connectivity(self, context, port_db):
# Add the VLAN to the VLANs that the hosting port trunks.
self._perform_logical_port_connectivity_action(
context, port_db, 'Adding', n1kv.SEGMENT_ADD)
def teardown_logical_port_connectivity(self, context, port_db):
# Remove the VLAN from the VLANs that the hosting port trunks.
self._perform_logical_port_connectivity_action(
context, port_db, 'Removing', n1kv.SEGMENT_DEL)
def extend_hosting_port_info(self, context, port_db, hosting_info):
hosting_info['segmentation_id'] = port_db.hosting_info.segmentation_id
def allocate_hosting_port(self, context, router_id, port_db, network_type,
hosting_device_id):
allocations = self._get_router_ports_with_hosting_info_qry(
context, router_id).all()
trunk_mappings = {}
if not allocations:
# Router has no ports with hosting port allocated to them yet
# whatsoever, so we select an unused port (that trunks networks
# of correct type) on the hosting device.
id_allocated_port = self._get_unused_service_vm_trunk_port(
context, hosting_device_id, network_type)
else:
# Router has at least one port with hosting port allocated to it.
# If there is only one allocated hosting port then it may be for
# the wrong network type. Iterate to determine the hosting port.
id_allocated_port = None
for item in allocations:
if item.hosting_info['network_type'] == network_type:
# For VXLAN we need to determine used link local tags.
# For VLAN we don't need to but the following lines will
# be performed once anyway since we break out of the
# loop later. That does not matter.
tag = item.hosting_info['segmentation_id']
trunk_mappings[item['network_id']] = tag
id_allocated_port = item.hosting_info['hosting_port_id']
else:
port_twin_id = item.hosting_info['hosting_port_id']
if network_type == 'vlan':
# For a router port belonging to a VLAN network we can
# break here since we now know (or have information to
# determine) hosting_port and the VLAN tag is provided by
# the core plugin.
break
if id_allocated_port is None:
# Router only had hosting port for wrong network
# type allocated yet. So get that port's sibling.
id_allocated_port = self._get_other_port_id_in_pair(
context, port_twin_id, hosting_device_id)
if id_allocated_port is None:
# Database must have been messed up if this happens ...
LOG.debug('n1kv_trunking_driver: Could not allocate hosting port')
return
if network_type == 'vxlan':
# For VLXAN we choose the (link local) VLAN tag
used_tags = set(trunk_mappings.values())
allocated_vlan = min(sorted(FULL_VLAN_SET - used_tags))
else:
# For VLAN core plugin provides VLAN tag.
trunk_mappings[port_db['network_id']] = None
tags = self._core_plugin.get_networks(
context, {'id': [port_db['network_id']]},
[pr_net.SEGMENTATION_ID])
allocated_vlan = (None if tags == []
else tags[0].get(pr_net.SEGMENTATION_ID))
if allocated_vlan is None:
# Database must have been messed up if this happens ...
LOG.debug('n1kv_trunking_driver: Could not allocate VLAN')
return
return {'allocated_port_id': id_allocated_port,
'allocated_vlan': allocated_vlan}
def _perform_logical_port_connectivity_action(self, context, port_db,
action_str, action):
if (port_db is None or port_db.hosting_info is None or
port_db.hosting_info.hosting_port is None):
return
np_id_t_nw = self._core_plugin.get_network(
context, port_db.hosting_info.hosting_port['network_id'],
[n1kv.PROFILE_ID])
if np_id_t_nw.get(n1kv.PROFILE_ID) == self.t1_network_profile_id():
# for vxlan trunked segment, id:s end with ':'link local vlan tag
trunk_spec = (port_db['network_id'] + ':' +
str(port_db.hosting_info.segmentation_id))
else:
trunk_spec = port_db['network_id']
LOG.info(_('Updating trunk: %(action)s VLAN %(tag)d for network_id '
'%(id)s'), {'action': action,
'tag': port_db.hosting_info.segmentation_id,
'id': port_db['network_id']})
#TODO(bobmel): enable statement below when N1kv does not trunk all
if False:
self._core_plugin.update_network(
context, port_db.hosting_info.hosting_port['network_id'],
{'network': {action: trunk_spec}})
def _get_trunk_mappings(self, context, hosting_port_id):
query = context.session.query(l3_models.HostedHostingPortBinding)
query = query.filter(
l3_models.HostedHostingPortBinding.hosting_port_id ==
hosting_port_id)
return dict((hhpb.logical_port['network_id'], hhpb.segmentation_id)
for hhpb in query)
def _get_unused_service_vm_trunk_port(self, context, hd_id, network_type):
name = (n1kv_const.T2_PORT_NAME if network_type == 'vlan'
else n1kv_const.T1_PORT_NAME)
attempts = 0
while True:
# mysql> SELECT * FROM ports WHERE device_id = 'hd_id1' AND
# id NOT IN (SELECT hosting_port_id FROM hostedhostingportbindings)
# AND
# name LIKE '%t1%'
# ORDER BY name;
stmt = context.session.query(
l3_models.HostedHostingPortBinding.hosting_port_id).subquery()
query = context.session.query(models_v2.Port.id)
query = query.filter(
expr.and_(models_v2.Port.device_id == hd_id,
~models_v2.Port.id.in_(stmt),
models_v2.Port.name.like('%' + name + '%')))
query = query.order_by(models_v2.Port.name)
res = query.first()
if res is None:
if attempts >= MAX_HOSTING_PORT_LOOKUP_ATTEMPTS:
# This should not happen ...
LOG.error(_('Hosting port DB inconsistency for '
'hosting device %s'), hd_id)
return
else:
# The service VM may not have plugged its VIF into the
# Neutron Port yet so we wait and make another lookup.
attempts += 1
LOG.info(_('Attempt %(attempt)d to find trunk ports for '
'hosting device %(hd_id)s failed. Trying '
'again in %(time)d seconds.'),
{'attempt': attempts, 'hd_id': hd_id,
'time': SECONDS_BETWEEN_HOSTING_PORT_LOOKSUPS})
eventlet.sleep(SECONDS_BETWEEN_HOSTING_PORT_LOOKSUPS)
else:
break
return res[0]
def _get_router_ports_with_hosting_info_qry(self, context, router_id,
device_owner=None,
hosting_port_id=None):
# Query for a router's ports that have trunking information
query = context.session.query(models_v2.Port)
query = query.join(
l3_models.HostedHostingPortBinding,
models_v2.Port.id ==
l3_models.HostedHostingPortBinding.logical_port_id)
query = query.filter(models_v2.Port.device_id == router_id)
if device_owner is not None:
query = query.filter(models_v2.Port.device_owner == device_owner)
if hosting_port_id is not None:
query = query.filter(
l3_models.HostedHostingPortBinding.hosting_port_id ==
hosting_port_id)
return query
def _get_other_port_id_in_pair(self, context, port_id, hosting_device_id):
query = context.session.query(models_v2.Port)
query = query.filter(models_v2.Port.id == port_id)
try:
port = query.one()
name, index = port['name'].split(':')
name += ':'
if name == n1kv_const.T1_PORT_NAME:
other_port_name = n1kv_const.T2_PORT_NAME + index
else:
other_port_name = n1kv_const.T1_PORT_NAME + index
query = context.session.query(models_v2.Port)
query = query.filter(models_v2.Port.device_id == hosting_device_id,
models_v2.Port.name == other_port_name)
other_port = query.one()
return other_port['id']
except (exc.NoResultFound, exc.MultipleResultsFound):
# This should not happen ...
LOG.error(_('Port trunk pair DB inconsistency for port %s'),
port_id)
return | PypiClean |
/tb-rest-client-3.5.tar.gz/tb-rest-client-3.5/tb_rest_client/models/models_ce/tenant_profile_data.py | # Copyright 2023. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pprint
import re # noqa: F401
import six
class TenantProfileData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'configuration': 'TenantProfileConfiguration',
'queue_configuration': 'list[TenantProfileQueueConfiguration]'
}
attribute_map = {
'configuration': 'configuration',
'queue_configuration': 'queueConfiguration'
}
def __init__(self, configuration=None, queue_configuration=None): # noqa: E501
"""TenantProfileData - a model defined in Swagger""" # noqa: E501
self._configuration = None
self._queue_configuration = None
self.discriminator = None
if configuration is not None:
self.configuration = configuration
if queue_configuration is not None:
self.queue_configuration = queue_configuration
@property
def configuration(self):
"""Gets the configuration of this TenantProfileData. # noqa: E501
:return: The configuration of this TenantProfileData. # noqa: E501
:rtype: TenantProfileConfiguration
"""
return self._configuration
@configuration.setter
def configuration(self, configuration):
"""Sets the configuration of this TenantProfileData.
:param configuration: The configuration of this TenantProfileData. # noqa: E501
:type: TenantProfileConfiguration
"""
self._configuration = configuration
@property
def queue_configuration(self):
"""Gets the queue_configuration of this TenantProfileData. # noqa: E501
JSON array of queue configuration per tenant profile # noqa: E501
:return: The queue_configuration of this TenantProfileData. # noqa: E501
:rtype: list[TenantProfileQueueConfiguration]
"""
return self._queue_configuration
@queue_configuration.setter
def queue_configuration(self, queue_configuration):
"""Sets the queue_configuration of this TenantProfileData.
JSON array of queue configuration per tenant profile # noqa: E501
:param queue_configuration: The queue_configuration of this TenantProfileData. # noqa: E501
:type: list[TenantProfileQueueConfiguration]
"""
self._queue_configuration = queue_configuration
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TenantProfileData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TenantProfileData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | PypiClean |
/hfcca-1.10.2.tar.gz/hfcca-1.10.2/lizard_languages/code_reader.py | import re
class CodeStateMachine(object):
''' the state machine '''
def __init__(self, context):
self.context = context
self._state = self._state_global
self.br_count = 0
self.rut_tokens = []
@staticmethod
def read_inside_brackets_then(brs, end_state=None):
def decorator(func):
def read_until_matching_brackets(self, token):
self.br_count += {brs[0]: 1, brs[1]: -1}.get(token, 0)
if self.br_count == 0 and end_state is not None:
self.next(getattr(self, end_state))
if self.br_count == 0 or end_state is not None:
func(self, token)
return read_until_matching_brackets
return decorator
@staticmethod
def read_until_then(tokens):
def decorator(func):
def read_until_then_token(self, token):
if token in tokens:
func(self, token, self.rut_tokens)
self.rut_tokens = []
else:
self.rut_tokens.append(token)
return read_until_then_token
return decorator
def __call__(self, tokens, reader):
self.context = reader.context
for token in tokens:
self._state(token)
yield token
self.eof()
def _state_global(self, token):
pass
def eof(self):
pass
def next(self, state, token=None):
self._state = state
if token is not None:
self._state(token)
class CodeReader(CodeStateMachine):
''' CodeReaders are used to parse function structures from code of different
language. Each language will need a subclass of CodeReader. '''
ext = []
languages = None
extra_subclasses = set()
def __init__(self, context):
super(CodeReader, self).__init__(context)
@classmethod
def match_filename(cls, filename):
def compile_file_extension_re(*exts):
return re.compile(r".*\.(" + r"|".join(exts) + r")$", re.I)
return compile_file_extension_re(*cls.ext).match(filename)
def eof(self):
pass
@staticmethod
def generate_tokens(source_code, addition=''):
def _generate_tokens(source_code, addition):
# DO NOT put any sub groups in the regex. Good for performance
_until_end = r"(?:\\\n|[^\n])*"
combined_symbols = ["||", "&&", "===", "!==", "==", "!=", "<=",
">=",
"++", "--", '+=', '-=',
'*=', '/=', '^=', '&=', '|=', "..."]
token_pattern = re.compile(
r"(?:" +
r"/\*.*?\*/" +
addition +
r"|\w+" +
r"|\"(?:\\.|[^\"\\])*\"" +
r"|\'(?:\\.|[^\'\\])*?\'" +
r"|//" + _until_end +
r"|\#" +
r"|:=|::|\*\*" +
r"|" + r"|".join(re.escape(s) for s in combined_symbols) +
r"|\\\n" +
r"|\n" +
r"|[^\S\n]+" +
r"|.)", re.M | re.S)
macro = ""
for token in token_pattern.findall(source_code):
if macro:
if "\\\n" in token or "\n" not in token:
macro += token
else:
yield macro
yield token
macro = ""
elif token == "#":
macro = token
else:
yield token
if macro:
yield macro
return [t for t in _generate_tokens(source_code, addition)] | PypiClean |
/pygameboycore-0.0.4.tar.gz/pygameboycore-0.0.4/lib/pybind11/docs/advanced/functions.rst | Functions
#########
Before proceeding with this section, make sure that you are already familiar
with the basics of binding functions and classes, as explained in :doc:`/basics`
and :doc:`/classes`. The following guide is applicable to both free and member
functions, i.e. *methods* in Python.
.. _return_value_policies:
Return value policies
=====================
Python and C++ use fundamentally different ways of managing the memory and
lifetime of objects managed by them. This can lead to issues when creating
bindings for functions that return a non-trivial type. Just by looking at the
type information, it is not clear whether Python should take charge of the
returned value and eventually free its resources, or if this is handled on the
C++ side. For this reason, pybind11 provides a several *return value policy*
annotations that can be passed to the :func:`module::def` and
:func:`class_::def` functions. The default policy is
:enum:`return_value_policy::automatic`.
Return value policies are tricky, and it's very important to get them right.
Just to illustrate what can go wrong, consider the following simple example:
.. code-block:: cpp
/* Function declaration */
Data *get_data() { return _data; /* (pointer to a static data structure) */ }
...
/* Binding code */
m.def("get_data", &get_data); // <-- KABOOM, will cause crash when called from Python
What's going on here? When ``get_data()`` is called from Python, the return
value (a native C++ type) must be wrapped to turn it into a usable Python type.
In this case, the default return value policy (:enum:`return_value_policy::automatic`)
causes pybind11 to assume ownership of the static ``_data`` instance.
When Python's garbage collector eventually deletes the Python
wrapper, pybind11 will also attempt to delete the C++ instance (via ``operator
delete()``) due to the implied ownership. At this point, the entire application
will come crashing down, though errors could also be more subtle and involve
silent data corruption.
In the above example, the policy :enum:`return_value_policy::reference` should have
been specified so that the global data instance is only *referenced* without any
implied transfer of ownership, i.e.:
.. code-block:: cpp
m.def("get_data", &get_data, return_value_policy::reference);
On the other hand, this is not the right policy for many other situations,
where ignoring ownership could lead to resource leaks.
As a developer using pybind11, it's important to be familiar with the different
return value policies, including which situation calls for which one of them.
The following table provides an overview of available policies:
.. tabularcolumns:: |p{0.5\textwidth}|p{0.45\textwidth}|
+--------------------------------------------------+----------------------------------------------------------------------------+
| Return value policy | Description |
+==================================================+============================================================================+
| :enum:`return_value_policy::take_ownership` | Reference an existing object (i.e. do not create a new copy) and take |
| | ownership. Python will call the destructor and delete operator when the |
| | object's reference count reaches zero. Undefined behavior ensues when the |
| | C++ side does the same, or when the data was not dynamically allocated. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::copy` | Create a new copy of the returned object, which will be owned by Python. |
| | This policy is comparably safe because the lifetimes of the two instances |
| | are decoupled. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::move` | Use ``std::move`` to move the return value contents into a new instance |
| | that will be owned by Python. This policy is comparably safe because the |
| | lifetimes of the two instances (move source and destination) are decoupled.|
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::reference` | Reference an existing object, but do not take ownership. The C++ side is |
| | responsible for managing the object's lifetime and deallocating it when |
| | it is no longer used. Warning: undefined behavior will ensue when the C++ |
| | side deletes an object that is still referenced and used by Python. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::reference_internal` | Indicates that the lifetime of the return value is tied to the lifetime |
| | of a parent object, namely the implicit ``this``, or ``self`` argument of |
| | the called method or property. Internally, this policy works just like |
| | :enum:`return_value_policy::reference` but additionally applies a |
| | ``keep_alive<0, 1>`` *call policy* (described in the next section) that |
| | prevents the parent object from being garbage collected as long as the |
| | return value is referenced by Python. This is the default policy for |
| | property getters created via ``def_property``, ``def_readwrite``, etc. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::automatic` | **Default policy.** This policy falls back to the policy |
| | :enum:`return_value_policy::take_ownership` when the return value is a |
| | pointer. Otherwise, it uses :enum:`return_value_policy::move` or |
| | :enum:`return_value_policy::copy` for rvalue and lvalue references, |
| | respectively. See above for a description of what all of these different |
| | policies do. |
+--------------------------------------------------+----------------------------------------------------------------------------+
| :enum:`return_value_policy::automatic_reference` | As above, but use policy :enum:`return_value_policy::reference` when the |
| | return value is a pointer. This is the default conversion policy for |
| | function arguments when calling Python functions manually from C++ code |
| | (i.e. via handle::operator()). You probably won't need to use this. |
+--------------------------------------------------+----------------------------------------------------------------------------+
Return value policies can also be applied to properties:
.. code-block:: cpp
class_<MyClass>(m, "MyClass")
.def_property("data", &MyClass::getData, &MyClass::setData,
py::return_value_policy::copy);
Technically, the code above applies the policy to both the getter and the
setter function, however, the setter doesn't really care about *return*
value policies which makes this a convenient terse syntax. Alternatively,
targeted arguments can be passed through the :class:`cpp_function` constructor:
.. code-block:: cpp
class_<MyClass>(m, "MyClass")
.def_property("data"
py::cpp_function(&MyClass::getData, py::return_value_policy::copy),
py::cpp_function(&MyClass::setData)
);
.. warning::
Code with invalid return value policies might access uninitialized memory or
free data structures multiple times, which can lead to hard-to-debug
non-determinism and segmentation faults, hence it is worth spending the
time to understand all the different options in the table above.
.. note::
One important aspect of the above policies is that they only apply to
instances which pybind11 has *not* seen before, in which case the policy
clarifies essential questions about the return value's lifetime and
ownership. When pybind11 knows the instance already (as identified by its
type and address in memory), it will return the existing Python object
wrapper rather than creating a new copy.
.. note::
The next section on :ref:`call_policies` discusses *call policies* that can be
specified *in addition* to a return value policy from the list above. Call
policies indicate reference relationships that can involve both return values
and parameters of functions.
.. note::
As an alternative to elaborate call policies and lifetime management logic,
consider using smart pointers (see the section on :ref:`smart_pointers` for
details). Smart pointers can tell whether an object is still referenced from
C++ or Python, which generally eliminates the kinds of inconsistencies that
can lead to crashes or undefined behavior. For functions returning smart
pointers, it is not necessary to specify a return value policy.
.. _call_policies:
Additional call policies
========================
In addition to the above return value policies, further *call policies* can be
specified to indicate dependencies between parameters or ensure a certain state
for the function call.
Keep alive
----------
In general, this policy is required when the C++ object is any kind of container
and another object is being added to the container. ``keep_alive<Nurse, Patient>``
indicates that the argument with index ``Patient`` should be kept alive at least
until the argument with index ``Nurse`` is freed by the garbage collector. Argument
indices start at one, while zero refers to the return value. For methods, index
``1`` refers to the implicit ``this`` pointer, while regular arguments begin at
index ``2``. Arbitrarily many call policies can be specified. When a ``Nurse``
with value ``None`` is detected at runtime, the call policy does nothing.
When the nurse is not a pybind11-registered type, the implementation internally
relies on the ability to create a *weak reference* to the nurse object. When
the nurse object is not a pybind11-registered type and does not support weak
references, an exception will be thrown.
Consider the following example: here, the binding code for a list append
operation ties the lifetime of the newly added element to the underlying
container:
.. code-block:: cpp
py::class_<List>(m, "List")
.def("append", &List::append, py::keep_alive<1, 2>());
For consistency, the argument indexing is identical for constructors. Index
``1`` still refers to the implicit ``this`` pointer, i.e. the object which is
being constructed. Index ``0`` refers to the return type which is presumed to
be ``void`` when a constructor is viewed like a function. The following example
ties the lifetime of the constructor element to the constructed object:
.. code-block:: cpp
py::class_<Nurse>(m, "Nurse")
.def(py::init<Patient &>(), py::keep_alive<1, 2>());
.. note::
``keep_alive`` is analogous to the ``with_custodian_and_ward`` (if Nurse,
Patient != 0) and ``with_custodian_and_ward_postcall`` (if Nurse/Patient ==
0) policies from Boost.Python.
Call guard
----------
The ``call_guard<T>`` policy allows any scope guard type ``T`` to be placed
around the function call. For example, this definition:
.. code-block:: cpp
m.def("foo", foo, py::call_guard<T>());
is equivalent to the following pseudocode:
.. code-block:: cpp
m.def("foo", [](args...) {
T scope_guard;
return foo(args...); // forwarded arguments
});
The only requirement is that ``T`` is default-constructible, but otherwise any
scope guard will work. This is very useful in combination with `gil_scoped_release`.
See :ref:`gil`.
Multiple guards can also be specified as ``py::call_guard<T1, T2, T3...>``. The
constructor order is left to right and destruction happens in reverse.
.. seealso::
The file :file:`tests/test_call_policies.cpp` contains a complete example
that demonstrates using `keep_alive` and `call_guard` in more detail.
.. _python_objects_as_args:
Python objects as arguments
===========================
pybind11 exposes all major Python types using thin C++ wrapper classes. These
wrapper classes can also be used as parameters of functions in bindings, which
makes it possible to directly work with native Python types on the C++ side.
For instance, the following statement iterates over a Python ``dict``:
.. code-block:: cpp
void print_dict(py::dict dict) {
/* Easily interact with Python types */
for (auto item : dict)
std::cout << "key=" << std::string(py::str(item.first)) << ", "
<< "value=" << std::string(py::str(item.second)) << std::endl;
}
It can be exported:
.. code-block:: cpp
m.def("print_dict", &print_dict);
And used in Python as usual:
.. code-block:: pycon
>>> print_dict({'foo': 123, 'bar': 'hello'})
key=foo, value=123
key=bar, value=hello
For more information on using Python objects in C++, see :doc:`/advanced/pycpp/index`.
Accepting \*args and \*\*kwargs
===============================
Python provides a useful mechanism to define functions that accept arbitrary
numbers of arguments and keyword arguments:
.. code-block:: python
def generic(*args, **kwargs):
... # do something with args and kwargs
Such functions can also be created using pybind11:
.. code-block:: cpp
void generic(py::args args, py::kwargs kwargs) {
/// .. do something with args
if (kwargs)
/// .. do something with kwargs
}
/// Binding code
m.def("generic", &generic);
The class ``py::args`` derives from ``py::tuple`` and ``py::kwargs`` derives
from ``py::dict``.
You may also use just one or the other, and may combine these with other
arguments as long as the ``py::args`` and ``py::kwargs`` arguments are the last
arguments accepted by the function.
Please refer to the other examples for details on how to iterate over these,
and on how to cast their entries into C++ objects. A demonstration is also
available in ``tests/test_kwargs_and_defaults.cpp``.
.. note::
When combining \*args or \*\*kwargs with :ref:`keyword_args` you should
*not* include ``py::arg`` tags for the ``py::args`` and ``py::kwargs``
arguments.
Default arguments revisited
===========================
The section on :ref:`default_args` previously discussed basic usage of default
arguments using pybind11. One noteworthy aspect of their implementation is that
default arguments are converted to Python objects right at declaration time.
Consider the following example:
.. code-block:: cpp
py::class_<MyClass>("MyClass")
.def("myFunction", py::arg("arg") = SomeType(123));
In this case, pybind11 must already be set up to deal with values of the type
``SomeType`` (via a prior instantiation of ``py::class_<SomeType>``), or an
exception will be thrown.
Another aspect worth highlighting is that the "preview" of the default argument
in the function signature is generated using the object's ``__repr__`` method.
If not available, the signature may not be very helpful, e.g.:
.. code-block:: pycon
FUNCTIONS
...
| myFunction(...)
| Signature : (MyClass, arg : SomeType = <SomeType object at 0x101b7b080>) -> NoneType
...
The first way of addressing this is by defining ``SomeType.__repr__``.
Alternatively, it is possible to specify the human-readable preview of the
default argument manually using the ``arg_v`` notation:
.. code-block:: cpp
py::class_<MyClass>("MyClass")
.def("myFunction", py::arg_v("arg", SomeType(123), "SomeType(123)"));
Sometimes it may be necessary to pass a null pointer value as a default
argument. In this case, remember to cast it to the underlying type in question,
like so:
.. code-block:: cpp
py::class_<MyClass>("MyClass")
.def("myFunction", py::arg("arg") = (SomeType *) nullptr);
.. _nonconverting_arguments:
Non-converting arguments
========================
Certain argument types may support conversion from one type to another. Some
examples of conversions are:
* :ref:`implicit_conversions` declared using ``py::implicitly_convertible<A,B>()``
* Calling a method accepting a double with an integer argument
* Calling a ``std::complex<float>`` argument with a non-complex python type
(for example, with a float). (Requires the optional ``pybind11/complex.h``
header).
* Calling a function taking an Eigen matrix reference with a numpy array of the
wrong type or of an incompatible data layout. (Requires the optional
``pybind11/eigen.h`` header).
This behaviour is sometimes undesirable: the binding code may prefer to raise
an error rather than convert the argument. This behaviour can be obtained
through ``py::arg`` by calling the ``.noconvert()`` method of the ``py::arg``
object, such as:
.. code-block:: cpp
m.def("floats_only", [](double f) { return 0.5 * f; }, py::arg("f").noconvert());
m.def("floats_preferred", [](double f) { return 0.5 * f; }, py::arg("f"));
Attempting the call the second function (the one without ``.noconvert()``) with
an integer will succeed, but attempting to call the ``.noconvert()`` version
will fail with a ``TypeError``:
.. code-block:: pycon
>>> floats_preferred(4)
2.0
>>> floats_only(4)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: floats_only(): incompatible function arguments. The following argument types are supported:
1. (f: float) -> float
Invoked with: 4
You may, of course, combine this with the :var:`_a` shorthand notation (see
:ref:`keyword_args`) and/or :ref:`default_args`. It is also permitted to omit
the argument name by using the ``py::arg()`` constructor without an argument
name, i.e. by specifying ``py::arg().noconvert()``.
.. note::
When specifying ``py::arg`` options it is necessary to provide the same
number of options as the bound function has arguments. Thus if you want to
enable no-convert behaviour for just one of several arguments, you will
need to specify a ``py::arg()`` annotation for each argument with the
no-convert argument modified to ``py::arg().noconvert()``.
.. _none_arguments:
Allow/Prohibiting None arguments
================================
When a C++ type registered with :class:`py::class_` is passed as an argument to
a function taking the instance as pointer or shared holder (e.g. ``shared_ptr``
or a custom, copyable holder as described in :ref:`smart_pointers`), pybind
allows ``None`` to be passed from Python which results in calling the C++
function with ``nullptr`` (or an empty holder) for the argument.
To explicitly enable or disable this behaviour, using the
``.none`` method of the :class:`py::arg` object:
.. code-block:: cpp
py::class_<Dog>(m, "Dog").def(py::init<>());
py::class_<Cat>(m, "Cat").def(py::init<>());
m.def("bark", [](Dog *dog) -> std::string {
if (dog) return "woof!"; /* Called with a Dog instance */
else return "(no dog)"; /* Called with None, dog == nullptr */
}, py::arg("dog").none(true));
m.def("meow", [](Cat *cat) -> std::string {
// Can't be called with None argument
return "meow";
}, py::arg("cat").none(false));
With the above, the Python call ``bark(None)`` will return the string ``"(no
dog)"``, while attempting to call ``meow(None)`` will raise a ``TypeError``:
.. code-block:: pycon
>>> from animals import Dog, Cat, bark, meow
>>> bark(Dog())
'woof!'
>>> meow(Cat())
'meow'
>>> bark(None)
'(no dog)'
>>> meow(None)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: meow(): incompatible function arguments. The following argument types are supported:
1. (cat: animals.Cat) -> str
Invoked with: None
The default behaviour when the tag is unspecified is to allow ``None``.
Overload resolution order
=========================
When a function or method with multiple overloads is called from Python,
pybind11 determines which overload to call in two passes. The first pass
attempts to call each overload without allowing argument conversion (as if
every argument had been specified as ``py::arg().noconvert()`` as described
above).
If no overload succeeds in the no-conversion first pass, a second pass is
attempted in which argument conversion is allowed (except where prohibited via
an explicit ``py::arg().noconvert()`` attribute in the function definition).
If the second pass also fails a ``TypeError`` is raised.
Within each pass, overloads are tried in the order they were registered with
pybind11.
What this means in practice is that pybind11 will prefer any overload that does
not require conversion of arguments to an overload that does, but otherwise prefers
earlier-defined overloads to later-defined ones.
.. note::
pybind11 does *not* further prioritize based on the number/pattern of
overloaded arguments. That is, pybind11 does not prioritize a function
requiring one conversion over one requiring three, but only prioritizes
overloads requiring no conversion at all to overloads that require
conversion of at least one argument.
| PypiClean |
/ansible-8.3.0-py3-none-any.whl/ansible_collections/community/vmware/plugins/modules/vcenter_extension.py |
# Copyright: (c) 2018, Michael Tipton <mike () ibeta.org>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: vcenter_extension
short_description: Register/deregister vCenter Extensions
description:
- This module can be used to register/deregister vCenter Extensions.
author:
- Michael Tipton (@castawayegr)
options:
extension_key:
description:
- The extension key of the extension to install or uninstall.
required: true
type: str
version:
description:
- The version of the extension you are installing or uninstalling.
required: true
type: str
name:
description:
- Required for C(state=present). The name of the extension you are installing.
type: str
company:
description:
- Required for C(state=present). The name of the company that makes the extension.
type: str
description:
description:
- Required for C(state=present). A short description of the extension.
type: str
email:
description:
- Required for C(state=present). Administrator email to use for extension.
type: str
url:
description:
- Required for C(state=present). Link to server hosting extension zip file to install.
type: str
ssl_thumbprint:
description:
- Required for C(state=present). SSL thumbprint of the extension hosting server.
type: str
server_type:
description:
- Required for C(state=present). Type of server being used to install the extension (SOAP, REST, HTTP, etc.).
default: vsphere-client-serenity
type: str
client_type:
description:
- Required for C(state=present). Type of client the extension is (win32, .net, linux, etc.).
default: vsphere-client-serenity
type: str
visible:
description:
- Show the extension in solution manager inside vCenter.
default: true
type: bool
state:
description:
- Add or remove vCenter Extension.
choices: [absent, present]
default: present
type: str
extends_documentation_fragment:
- community.vmware.vmware.documentation
'''
EXAMPLES = r'''
- name: Register vCenter Extension
community.vmware.vcenter_extension:
hostname: "{{ groups['vcsa'][0] }}"
username: "{{ vcenter_username }}"
password: "{{ site_password }}"
extension_key: "{{ extension_key }}"
version: "1.0"
company: "Acme"
name: "Acme Extension"
description: "acme management"
email: "user@example.com"
url: "https://10.0.0.1/ACME-vSphere-web-plugin-1.0.zip"
ssl_thumbprint: "{{ ssl_thumbprint }}"
state: present
delegate_to: localhost
register: register_extension
- name: Deregister vCenter Extension
community.vmware.vcenter_extension:
hostname: "{{ groups['vcsa'][0] }}"
username: "{{ vcenter_username }}"
password: "{{ site_password }}"
extension_key: "{{ extension_key }}"
version: "1.0"
state: absent
delegate_to: localhost
register: deregister_extension
'''
RETURN = r'''
result:
description: information about performed operation
returned: always
type: str
sample: "'com.acme.Extension' installed."
'''
try:
from pyVmomi import vim
except ImportError:
pass
import datetime
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.vmware.plugins.module_utils.vmware import connect_to_api, vmware_argument_spec
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
extension_key=dict(type='str', required=True, no_log=False),
version=dict(type='str', required=True),
email=dict(type='str', required=False),
description=dict(type='str', required=False),
company=dict(type='str', required=False),
name=dict(type='str', required=False),
url=dict(type='str', required=False),
ssl_thumbprint=dict(type='str', required=False),
client_type=dict(type='str', default='vsphere-client-serenity', required=False),
server_type=dict(type='str', default='vsphere-client-serenity', required=False),
visible=dict(type='bool', default='True', required=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
required_if=[
['state', 'present', ['email', 'description', 'company', 'name', 'url', 'ssl_thumbprint', 'server_type', 'client_type']]
]
)
state = module.params['state']
extension_key = module.params['extension_key']
version = module.params['version']
email = module.params['email']
desc = module.params['description']
name = module.params['name']
company = module.params['company']
client_type = module.params['client_type']
server_type = module.params['server_type']
url = module.params['url']
visible = module.params['visible']
thumbprint = module.params['ssl_thumbprint']
content = connect_to_api(module, False)
em = content.extensionManager
key_check = em.FindExtension(extension_key)
results = dict(changed=False, installed=dict())
if state == 'present' and key_check:
results['changed'] = False
results['installed'] = "'%s' is already installed" % (extension_key)
elif state == 'present' and not key_check:
extension = vim.Extension()
extension.key = extension_key
extension.company = company
extension.version = version
extension.lastHeartbeatTime = datetime.datetime.now()
description = vim.Description()
description.label = name
description.summary = desc
extension.description = description
extension.shownInSolutionManager = visible
client = vim.Extension.ClientInfo()
client.company = company
client.version = version
client.description = description
client.type = client_type
client.url = url
extension.client = [client]
server = vim.Extension.ServerInfo()
server.company = company
server.description = description
server.type = server_type
server.adminEmail = email
server.serverThumbprint = thumbprint
server.url = url
extension.server = [server]
em.RegisterExtension(extension)
results['changed'] = True
results['installed'] = "'%s' installed." % (extension_key)
elif state == 'absent' and key_check:
em.UnregisterExtension(extension_key)
results['changed'] = True
results['installed'] = "'%s' uninstalled." % (extension_key)
elif state == 'absent' and not key_check:
results['changed'] = False
results['installed'] = "'%s' is not installed." % (extension_key)
module.exit_json(**results)
if __name__ == '__main__':
main() | PypiClean |
/school_pubsub-0.7.0.tar.gz/school_pubsub-0.7.0/school_backends.py | from pubnub.pubnub import PubNub, SubscribeListener
from pubnub.pnconfiguration import PNConfiguration
from uuid import uuid4
from pubsub import (
get_secret,
call_mapped_method,
)
import importlib, redis, time, os, uuid
IS_VERBOSE = os.environ.get('VERBOSE', 'True') == 'True'
CHECKIN_INTERVAL = 60000 # 1 minute:
KEY_EXPIRY = 70 # seconds
class RedisBackend:
"""
* list listening apps
* show recent events and their subscribers' acknowledgments
"""
def __init__(self, channel, appname, instance_id = None):
self.channel = channel
self.appname = appname
self.instance_id = instance_id
if self.instance_id is None:
self.instance_id = str(uuid.uuid4())
self.redis = redis.StrictRedis(
host=get_secret('PUBSUB_HOST', 'redis'),
password=get_secret('PUBSUB_PASSWORD', None),
port=int(get_secret('PUBSUB_PORT', '6379')),
db=int(get_secret('PUBSUB_INDEX', '0'))
)
def __ack(self, event, event_id):
"""
Inform the publisher that we've received the message
"""
key = 'pubsub.events.actions.{}.{}.received'.format(event, event_id)
self.redis.sadd(key, self.appname)
if IS_VERBOSE:
print('<< - {} received by {}'.format(event, self.appname))
def health_check(self):
"""
Checks that the subscriber has checked in recently
"""
key = 'pubsub.subscribers.alive.{}.{}'.format(
self.appname,
self.get_subscriber_id()
)
alive = self.redis.get(key)
if not alive:
raise ConnectionError('Subscriber has failed to connect to pubsub')
def clean(self):
"""
Clean old subscribers from the registry
"""
pass
def register(self, events):
"""
Register a subscriber: Inform the pubsub appname and what events
we're listening to events is a list of events to which this app will
listen
"""
print('Registering: {}'.format(self.appname))
print('------------------------------------')
for event in events:
key = 'pubsub.events.{}.subscribers'.format(event)
# for event, get subscrived apps
self.redis.sadd(key, self.appname)
# for app, get events
key = 'pubsub.applications.{}.events'.format(self.appname)
self.redis.sadd(key, event)
print(" - {}".format(event))
print('------------------------------------')
def de_register(self, events):
"""
If a subscriber disappears, it must de-register itself
TODO: make sure this is called on ctrl-C
"""
for event in events:
key = 'pubsub.events.{}.subscribers'.format(event)
# for event, get subscrived apps
self.redis.srem(key, self.appname)
# for app, get events
key = 'pubsub.applications.{}.events'.format(self.appname)
self.redis.srem(key, event)
def check_in(self, events):
"""
A subscriber must check in periodically to let the system know that
it's still there and listening
"""
print('Checking in: ')
key = 'pubsub.subscribers.alive.{}.{}'.format(
self.appname,
self.instance_id
)
self.redis.incrby(key, CHECKIN_INTERVAL)
self.redis.expire(key, KEY_EXPIRY)
self.register(events)
def publish(self, key, payload):
event_id = str(uuid4())
data = {
"key": key,
"id": event_id,
"payload": payload
}
result = self.redis.publish(self.channel, data)
redis_key = 'pubsub.events.actions.{}.{}.published'.format(key,
event_id)
self.redis.sadd(redis_key, self.appname)
if IS_VERBOSE:
print('>> {} -> {}.{}'.format(self.appname, self.channel, key))
return result
@staticmethod
def set_subscriber_id(subscriber_id):
f = open("subscriber", "a+")
f.write('{}'.format(subscriber_id))
@staticmethod
def get_subscriber_id():
f = open("subscriber", "r")
subscriber_id = f.read()
return subscriber_id
def subscribe(self, function_mapper):
p = self.redis.pubsub()
p.subscribe(self.channel)
events = [key for key, value in function_mapper.items()]
self.check_in(events)
self.set_subscriber_id(self.instance_id)
count = 0
while True:
message = p.get_message()
if message:
event, event_id = call_mapped_method(message, function_mapper)
if event is not None:
self.__ack(event, event_id)
count += 1
if count > CHECKIN_INTERVAL:
self.check_in(function_mapper)
count = 0
time.sleep(0.001) # be nice to the system :)
class PubNubBackend:
"""
Usage:
**Subscribe**
```
pubsub = PubNubBackend(channel, pub_key, sub_key)
pubsub.subscribe()
```
Requires environment variables:
* PUBNUB_PUBLISH_KEY
* PUBNUB_SUBSCRIBE_KEY
"""
def __init__(self, channel):
publish_key = get_secret('PUBNUB_PUBLISH_KEY', None)
subscribe_key = get_secret('PUBNUB_SUBSCRIBE_KEY', None)
if None in [subscribe_key, publish_key]:
msg = ('Please make sure you\'ve set environment varialbes: '
'PUBNUB_PUBLISH_KEY and PUBNUB_SUBSCRIBE_KEY')
raise Exception(msg)
pnconfig = PNConfiguration()
pnconfig.subscribe_key = subscribe_key
pnconfig.publish_key = publish_key
pnconfig.ssl = False
self.channel = channel
self.pubnub = PubNub(pnconfig)
def publish(self, key, payload):
def publish_callback(result, status):
if result:
print(result)
if status.error is not None:
raise Exception('PubSub publish error: %s: %s' %
(status.error, status.error_data))
data = {
"key": key,
"payload": payload
}
self.pubnub.publish() \
.channel(self.channel) \
.message(data) \
.async(publish_callback)
def listen(self, function_mapper):
"""
Implements a multicast pub/sub. It is the responsibility of the
subscriber determine if it needs to perform any actions based on
the message key
functionmapper is a dict that maps payload keys to methods to call
Methods will receive the payload as the first argument.
e.g.:
```
function_mapper = {
'test': {
'module': 'config',
'method': 'foo'
}
}
```
"""
my_listener = SubscribeListener()
self.pubnub.add_listener(my_listener)
self.pubnub.subscribe().channels(self.channel).execute()
# self.pubnub.add_channel_to_channel_group()\
# .channel_group("test")\
# .channels(channels)\
# .sync()
my_listener.wait_for_connect()
print('connected')
while True:
result = my_listener.wait_for_message_on(self.channel)
print(result.message)
event_key = result.message.get('key')
task_definition = function_mapper.get(event_key, None)
print('key: %s' % event_key)
print('task definition: %s' % task_definition)
if task_definition is not None:
mod = importlib.import_module(task_definition.get('module'))
method = task_definition.get('method')
getattr(mod, method)(result.message) | PypiClean |
/pulumi_nutanix-0.0.42.tar.gz/pulumi_nutanix-0.0.42/pulumi_nutanix/floating_ip.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['FloatingIpArgs', 'FloatingIp']
@pulumi.input_type
class FloatingIpArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
external_subnet_reference_name: Optional[pulumi.Input[str]] = None,
external_subnet_reference_uuid: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None,
vm_nic_reference_uuid: Optional[pulumi.Input[str]] = None,
vpc_reference_name: Optional[pulumi.Input[str]] = None,
vpc_reference_uuid: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a FloatingIp resource.
:param pulumi.Input[str] api_version: The version of the API.
:param pulumi.Input[str] external_subnet_reference_name: The reference to a subnet. Should not be used with
{external_subnet_reference_uuid} .
:param pulumi.Input[str] external_subnet_reference_uuid: The reference to a subnet. Should not be used with {external_subnet_reference_name} .
:param pulumi.Input[str] private_ip: Private IP with which floating IP is associated. Should be used with vpc_reference .
:param pulumi.Input[str] vm_nic_reference_uuid: The reference to a vm_nic .
:param pulumi.Input[str] vpc_reference_name: The reference to a vpc. Should not be used with {vpc_reference_uuid}.
:param pulumi.Input[str] vpc_reference_uuid: The reference to a vpc. Should not be used with {vpc_reference_name}.
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if external_subnet_reference_name is not None:
pulumi.set(__self__, "external_subnet_reference_name", external_subnet_reference_name)
if external_subnet_reference_uuid is not None:
pulumi.set(__self__, "external_subnet_reference_uuid", external_subnet_reference_uuid)
if private_ip is not None:
pulumi.set(__self__, "private_ip", private_ip)
if vm_nic_reference_uuid is not None:
pulumi.set(__self__, "vm_nic_reference_uuid", vm_nic_reference_uuid)
if vpc_reference_name is not None:
pulumi.set(__self__, "vpc_reference_name", vpc_reference_name)
if vpc_reference_uuid is not None:
pulumi.set(__self__, "vpc_reference_uuid", vpc_reference_uuid)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of the API.
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter(name="externalSubnetReferenceName")
def external_subnet_reference_name(self) -> Optional[pulumi.Input[str]]:
"""
The reference to a subnet. Should not be used with
{external_subnet_reference_uuid} .
"""
return pulumi.get(self, "external_subnet_reference_name")
@external_subnet_reference_name.setter
def external_subnet_reference_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_subnet_reference_name", value)
@property
@pulumi.getter(name="externalSubnetReferenceUuid")
def external_subnet_reference_uuid(self) -> Optional[pulumi.Input[str]]:
"""
The reference to a subnet. Should not be used with {external_subnet_reference_name} .
"""
return pulumi.get(self, "external_subnet_reference_uuid")
@external_subnet_reference_uuid.setter
def external_subnet_reference_uuid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_subnet_reference_uuid", value)
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> Optional[pulumi.Input[str]]:
"""
Private IP with which floating IP is associated. Should be used with vpc_reference .
"""
return pulumi.get(self, "private_ip")
@private_ip.setter
def private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip", value)
@property
@pulumi.getter(name="vmNicReferenceUuid")
def vm_nic_reference_uuid(self) -> Optional[pulumi.Input[str]]:
"""
The reference to a vm_nic .
"""
return pulumi.get(self, "vm_nic_reference_uuid")
@vm_nic_reference_uuid.setter
def vm_nic_reference_uuid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vm_nic_reference_uuid", value)
@property
@pulumi.getter(name="vpcReferenceName")
def vpc_reference_name(self) -> Optional[pulumi.Input[str]]:
"""
The reference to a vpc. Should not be used with {vpc_reference_uuid}.
"""
return pulumi.get(self, "vpc_reference_name")
@vpc_reference_name.setter
def vpc_reference_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_reference_name", value)
@property
@pulumi.getter(name="vpcReferenceUuid")
def vpc_reference_uuid(self) -> Optional[pulumi.Input[str]]:
"""
The reference to a vpc. Should not be used with {vpc_reference_name}.
"""
return pulumi.get(self, "vpc_reference_uuid")
@vpc_reference_uuid.setter
def vpc_reference_uuid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_reference_uuid", value)
@pulumi.input_type
class _FloatingIpState:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
external_subnet_reference_name: Optional[pulumi.Input[str]] = None,
external_subnet_reference_uuid: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
private_ip: Optional[pulumi.Input[str]] = None,
vm_nic_reference_uuid: Optional[pulumi.Input[str]] = None,
vpc_reference_name: Optional[pulumi.Input[str]] = None,
vpc_reference_uuid: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering FloatingIp resources.
:param pulumi.Input[str] api_version: The version of the API.
:param pulumi.Input[str] external_subnet_reference_name: The reference to a subnet. Should not be used with
{external_subnet_reference_uuid} .
:param pulumi.Input[str] external_subnet_reference_uuid: The reference to a subnet. Should not be used with {external_subnet_reference_name} .
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: The floating_ips kind metadata.
:param pulumi.Input[str] private_ip: Private IP with which floating IP is associated. Should be used with vpc_reference .
:param pulumi.Input[str] vm_nic_reference_uuid: The reference to a vm_nic .
:param pulumi.Input[str] vpc_reference_name: The reference to a vpc. Should not be used with {vpc_reference_uuid}.
:param pulumi.Input[str] vpc_reference_uuid: The reference to a vpc. Should not be used with {vpc_reference_name}.
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if external_subnet_reference_name is not None:
pulumi.set(__self__, "external_subnet_reference_name", external_subnet_reference_name)
if external_subnet_reference_uuid is not None:
pulumi.set(__self__, "external_subnet_reference_uuid", external_subnet_reference_uuid)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if private_ip is not None:
pulumi.set(__self__, "private_ip", private_ip)
if vm_nic_reference_uuid is not None:
pulumi.set(__self__, "vm_nic_reference_uuid", vm_nic_reference_uuid)
if vpc_reference_name is not None:
pulumi.set(__self__, "vpc_reference_name", vpc_reference_name)
if vpc_reference_uuid is not None:
pulumi.set(__self__, "vpc_reference_uuid", vpc_reference_uuid)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
The version of the API.
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter(name="externalSubnetReferenceName")
def external_subnet_reference_name(self) -> Optional[pulumi.Input[str]]:
"""
The reference to a subnet. Should not be used with
{external_subnet_reference_uuid} .
"""
return pulumi.get(self, "external_subnet_reference_name")
@external_subnet_reference_name.setter
def external_subnet_reference_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_subnet_reference_name", value)
@property
@pulumi.getter(name="externalSubnetReferenceUuid")
def external_subnet_reference_uuid(self) -> Optional[pulumi.Input[str]]:
"""
The reference to a subnet. Should not be used with {external_subnet_reference_name} .
"""
return pulumi.get(self, "external_subnet_reference_uuid")
@external_subnet_reference_uuid.setter
def external_subnet_reference_uuid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_subnet_reference_uuid", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The floating_ips kind metadata.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> Optional[pulumi.Input[str]]:
"""
Private IP with which floating IP is associated. Should be used with vpc_reference .
"""
return pulumi.get(self, "private_ip")
@private_ip.setter
def private_ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_ip", value)
@property
@pulumi.getter(name="vmNicReferenceUuid")
def vm_nic_reference_uuid(self) -> Optional[pulumi.Input[str]]:
"""
The reference to a vm_nic .
"""
return pulumi.get(self, "vm_nic_reference_uuid")
@vm_nic_reference_uuid.setter
def vm_nic_reference_uuid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vm_nic_reference_uuid", value)
@property
@pulumi.getter(name="vpcReferenceName")
def vpc_reference_name(self) -> Optional[pulumi.Input[str]]:
"""
The reference to a vpc. Should not be used with {vpc_reference_uuid}.
"""
return pulumi.get(self, "vpc_reference_name")
@vpc_reference_name.setter
def vpc_reference_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_reference_name", value)
@property
@pulumi.getter(name="vpcReferenceUuid")
def vpc_reference_uuid(self) -> Optional[pulumi.Input[str]]:
"""
The reference to a vpc. Should not be used with {vpc_reference_name}.
"""
return pulumi.get(self, "vpc_reference_uuid")
@vpc_reference_uuid.setter
def vpc_reference_uuid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpc_reference_uuid", value)
class FloatingIp(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
external_subnet_reference_name: Optional[pulumi.Input[str]] = None,
external_subnet_reference_uuid: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None,
vm_nic_reference_uuid: Optional[pulumi.Input[str]] = None,
vpc_reference_name: Optional[pulumi.Input[str]] = None,
vpc_reference_uuid: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides Nutanix resource to create Floating IPs.
## create Floating IP with External Subnet UUID
```python
import pulumi
import pulumi_nutanix as nutanix
fip1 = nutanix.FloatingIp("fip1", external_subnet_reference_uuid="{{ext_sub_uuid}}")
```
## create Floating IP with vpc name with external subnet name
```python
import pulumi
import pulumi_nutanix as nutanix
fip2 = nutanix.FloatingIp("fip2",
external_subnet_reference_name="{{ext_sub_name}}",
private_ip="{{ip_address}}",
vpc_reference_name="{{vpc_name}}")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_version: The version of the API.
:param pulumi.Input[str] external_subnet_reference_name: The reference to a subnet. Should not be used with
{external_subnet_reference_uuid} .
:param pulumi.Input[str] external_subnet_reference_uuid: The reference to a subnet. Should not be used with {external_subnet_reference_name} .
:param pulumi.Input[str] private_ip: Private IP with which floating IP is associated. Should be used with vpc_reference .
:param pulumi.Input[str] vm_nic_reference_uuid: The reference to a vm_nic .
:param pulumi.Input[str] vpc_reference_name: The reference to a vpc. Should not be used with {vpc_reference_uuid}.
:param pulumi.Input[str] vpc_reference_uuid: The reference to a vpc. Should not be used with {vpc_reference_name}.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[FloatingIpArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides Nutanix resource to create Floating IPs.
## create Floating IP with External Subnet UUID
```python
import pulumi
import pulumi_nutanix as nutanix
fip1 = nutanix.FloatingIp("fip1", external_subnet_reference_uuid="{{ext_sub_uuid}}")
```
## create Floating IP with vpc name with external subnet name
```python
import pulumi
import pulumi_nutanix as nutanix
fip2 = nutanix.FloatingIp("fip2",
external_subnet_reference_name="{{ext_sub_name}}",
private_ip="{{ip_address}}",
vpc_reference_name="{{vpc_name}}")
```
:param str resource_name: The name of the resource.
:param FloatingIpArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FloatingIpArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
external_subnet_reference_name: Optional[pulumi.Input[str]] = None,
external_subnet_reference_uuid: Optional[pulumi.Input[str]] = None,
private_ip: Optional[pulumi.Input[str]] = None,
vm_nic_reference_uuid: Optional[pulumi.Input[str]] = None,
vpc_reference_name: Optional[pulumi.Input[str]] = None,
vpc_reference_uuid: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.plugin_download_url is None:
opts.plugin_download_url = _utilities.get_plugin_download_url()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FloatingIpArgs.__new__(FloatingIpArgs)
__props__.__dict__["api_version"] = api_version
__props__.__dict__["external_subnet_reference_name"] = external_subnet_reference_name
__props__.__dict__["external_subnet_reference_uuid"] = external_subnet_reference_uuid
__props__.__dict__["private_ip"] = private_ip
__props__.__dict__["vm_nic_reference_uuid"] = vm_nic_reference_uuid
__props__.__dict__["vpc_reference_name"] = vpc_reference_name
__props__.__dict__["vpc_reference_uuid"] = vpc_reference_uuid
__props__.__dict__["metadata"] = None
super(FloatingIp, __self__).__init__(
'nutanix:index/floatingIp:FloatingIp',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
external_subnet_reference_name: Optional[pulumi.Input[str]] = None,
external_subnet_reference_uuid: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
private_ip: Optional[pulumi.Input[str]] = None,
vm_nic_reference_uuid: Optional[pulumi.Input[str]] = None,
vpc_reference_name: Optional[pulumi.Input[str]] = None,
vpc_reference_uuid: Optional[pulumi.Input[str]] = None) -> 'FloatingIp':
"""
Get an existing FloatingIp resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_version: The version of the API.
:param pulumi.Input[str] external_subnet_reference_name: The reference to a subnet. Should not be used with
{external_subnet_reference_uuid} .
:param pulumi.Input[str] external_subnet_reference_uuid: The reference to a subnet. Should not be used with {external_subnet_reference_name} .
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: The floating_ips kind metadata.
:param pulumi.Input[str] private_ip: Private IP with which floating IP is associated. Should be used with vpc_reference .
:param pulumi.Input[str] vm_nic_reference_uuid: The reference to a vm_nic .
:param pulumi.Input[str] vpc_reference_name: The reference to a vpc. Should not be used with {vpc_reference_uuid}.
:param pulumi.Input[str] vpc_reference_uuid: The reference to a vpc. Should not be used with {vpc_reference_name}.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FloatingIpState.__new__(_FloatingIpState)
__props__.__dict__["api_version"] = api_version
__props__.__dict__["external_subnet_reference_name"] = external_subnet_reference_name
__props__.__dict__["external_subnet_reference_uuid"] = external_subnet_reference_uuid
__props__.__dict__["metadata"] = metadata
__props__.__dict__["private_ip"] = private_ip
__props__.__dict__["vm_nic_reference_uuid"] = vm_nic_reference_uuid
__props__.__dict__["vpc_reference_name"] = vpc_reference_name
__props__.__dict__["vpc_reference_uuid"] = vpc_reference_uuid
return FloatingIp(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> pulumi.Output[str]:
"""
The version of the API.
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter(name="externalSubnetReferenceName")
def external_subnet_reference_name(self) -> pulumi.Output[Optional[str]]:
"""
The reference to a subnet. Should not be used with
{external_subnet_reference_uuid} .
"""
return pulumi.get(self, "external_subnet_reference_name")
@property
@pulumi.getter(name="externalSubnetReferenceUuid")
def external_subnet_reference_uuid(self) -> pulumi.Output[str]:
"""
The reference to a subnet. Should not be used with {external_subnet_reference_name} .
"""
return pulumi.get(self, "external_subnet_reference_uuid")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Mapping[str, str]]:
"""
The floating_ips kind metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter(name="privateIp")
def private_ip(self) -> pulumi.Output[Optional[str]]:
"""
Private IP with which floating IP is associated. Should be used with vpc_reference .
"""
return pulumi.get(self, "private_ip")
@property
@pulumi.getter(name="vmNicReferenceUuid")
def vm_nic_reference_uuid(self) -> pulumi.Output[Optional[str]]:
"""
The reference to a vm_nic .
"""
return pulumi.get(self, "vm_nic_reference_uuid")
@property
@pulumi.getter(name="vpcReferenceName")
def vpc_reference_name(self) -> pulumi.Output[Optional[str]]:
"""
The reference to a vpc. Should not be used with {vpc_reference_uuid}.
"""
return pulumi.get(self, "vpc_reference_name")
@property
@pulumi.getter(name="vpcReferenceUuid")
def vpc_reference_uuid(self) -> pulumi.Output[str]:
"""
The reference to a vpc. Should not be used with {vpc_reference_name}.
"""
return pulumi.get(self, "vpc_reference_uuid") | PypiClean |
/django-cms-search-0.6.3.tar.gz/django-cms-search-0.6.3/docs/_build/html/_static/jquery.js | (function(A,w){function ma(){if(!c.isReady){try{s.documentElement.doScroll("left")}catch(a){setTimeout(ma,1);return}c.ready()}}function Qa(a,b){b.src?c.ajax({url:b.src,async:false,dataType:"script"}):c.globalEval(b.text||b.textContent||b.innerHTML||"");b.parentNode&&b.parentNode.removeChild(b)}function X(a,b,d,f,e,j){var i=a.length;if(typeof b==="object"){for(var o in b)X(a,o,b[o],f,e,d);return a}if(d!==w){f=!j&&f&&c.isFunction(d);for(o=0;o<i;o++)e(a[o],b,f?d.call(a[o],o,e(a[o],b)):d,j);return a}return i?
e(a[0],b):w}function J(){return(new Date).getTime()}function Y(){return false}function Z(){return true}function na(a,b,d){d[0].type=a;return c.event.handle.apply(b,d)}function oa(a){var b,d=[],f=[],e=arguments,j,i,o,k,n,r;i=c.data(this,"events");if(!(a.liveFired===this||!i||!i.live||a.button&&a.type==="click")){a.liveFired=this;var u=i.live.slice(0);for(k=0;k<u.length;k++){i=u[k];i.origType.replace(O,"")===a.type?f.push(i.selector):u.splice(k--,1)}j=c(a.target).closest(f,a.currentTarget);n=0;for(r=
j.length;n<r;n++)for(k=0;k<u.length;k++){i=u[k];if(j[n].selector===i.selector){o=j[n].elem;f=null;if(i.preType==="mouseenter"||i.preType==="mouseleave")f=c(a.relatedTarget).closest(i.selector)[0];if(!f||f!==o)d.push({elem:o,handleObj:i})}}n=0;for(r=d.length;n<r;n++){j=d[n];a.currentTarget=j.elem;a.data=j.handleObj.data;a.handleObj=j.handleObj;if(j.handleObj.origHandler.apply(j.elem,e)===false){b=false;break}}return b}}function pa(a,b){return"live."+(a&&a!=="*"?a+".":"")+b.replace(/\./g,"`").replace(/ /g,
"&")}function qa(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function ra(a,b){var d=0;b.each(function(){if(this.nodeName===(a[d]&&a[d].nodeName)){var f=c.data(a[d++]),e=c.data(this,f);if(f=f&&f.events){delete e.handle;e.events={};for(var j in f)for(var i in f[j])c.event.add(this,j,f[j][i],f[j][i].data)}}})}function sa(a,b,d){var f,e,j;b=b&&b[0]?b[0].ownerDocument||b[0]:s;if(a.length===1&&typeof a[0]==="string"&&a[0].length<512&&b===s&&!ta.test(a[0])&&(c.support.checkClone||!ua.test(a[0]))){e=
true;if(j=c.fragments[a[0]])if(j!==1)f=j}if(!f){f=b.createDocumentFragment();c.clean(a,b,f,d)}if(e)c.fragments[a[0]]=j?f:1;return{fragment:f,cacheable:e}}function K(a,b){var d={};c.each(va.concat.apply([],va.slice(0,b)),function(){d[this]=a});return d}function wa(a){return"scrollTo"in a&&a.document?a:a.nodeType===9?a.defaultView||a.parentWindow:false}var c=function(a,b){return new c.fn.init(a,b)},Ra=A.jQuery,Sa=A.$,s=A.document,T,Ta=/^[^<]*(<[\w\W]+>)[^>]*$|^#([\w-]+)$/,Ua=/^.[^:#\[\.,]*$/,Va=/\S/,
Wa=/^(\s|\u00A0)+|(\s|\u00A0)+$/g,Xa=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,P=navigator.userAgent,xa=false,Q=[],L,$=Object.prototype.toString,aa=Object.prototype.hasOwnProperty,ba=Array.prototype.push,R=Array.prototype.slice,ya=Array.prototype.indexOf;c.fn=c.prototype={init:function(a,b){var d,f;if(!a)return this;if(a.nodeType){this.context=this[0]=a;this.length=1;return this}if(a==="body"&&!b){this.context=s;this[0]=s.body;this.selector="body";this.length=1;return this}if(typeof a==="string")if((d=Ta.exec(a))&&
(d[1]||!b))if(d[1]){f=b?b.ownerDocument||b:s;if(a=Xa.exec(a))if(c.isPlainObject(b)){a=[s.createElement(a[1])];c.fn.attr.call(a,b,true)}else a=[f.createElement(a[1])];else{a=sa([d[1]],[f]);a=(a.cacheable?a.fragment.cloneNode(true):a.fragment).childNodes}return c.merge(this,a)}else{if(b=s.getElementById(d[2])){if(b.id!==d[2])return T.find(a);this.length=1;this[0]=b}this.context=s;this.selector=a;return this}else if(!b&&/^\w+$/.test(a)){this.selector=a;this.context=s;a=s.getElementsByTagName(a);return c.merge(this,
a)}else return!b||b.jquery?(b||T).find(a):c(b).find(a);else if(c.isFunction(a))return T.ready(a);if(a.selector!==w){this.selector=a.selector;this.context=a.context}return c.makeArray(a,this)},selector:"",jquery:"1.4.2",length:0,size:function(){return this.length},toArray:function(){return R.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this.slice(a)[0]:this[a]},pushStack:function(a,b,d){var f=c();c.isArray(a)?ba.apply(f,a):c.merge(f,a);f.prevObject=this;f.context=this.context;if(b===
"find")f.selector=this.selector+(this.selector?" ":"")+d;else if(b)f.selector=this.selector+"."+b+"("+d+")";return f},each:function(a,b){return c.each(this,a,b)},ready:function(a){c.bindReady();if(c.isReady)a.call(s,c);else Q&&Q.push(a);return this},eq:function(a){return a===-1?this.slice(a):this.slice(a,+a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(R.apply(this,arguments),"slice",R.call(arguments).join(","))},map:function(a){return this.pushStack(c.map(this,
function(b,d){return a.call(b,d,b)}))},end:function(){return this.prevObject||c(null)},push:ba,sort:[].sort,splice:[].splice};c.fn.init.prototype=c.fn;c.extend=c.fn.extend=function(){var a=arguments[0]||{},b=1,d=arguments.length,f=false,e,j,i,o;if(typeof a==="boolean"){f=a;a=arguments[1]||{};b=2}if(typeof a!=="object"&&!c.isFunction(a))a={};if(d===b){a=this;--b}for(;b<d;b++)if((e=arguments[b])!=null)for(j in e){i=a[j];o=e[j];if(a!==o)if(f&&o&&(c.isPlainObject(o)||c.isArray(o))){i=i&&(c.isPlainObject(i)||
c.isArray(i))?i:c.isArray(o)?[]:{};a[j]=c.extend(f,i,o)}else if(o!==w)a[j]=o}return a};c.extend({noConflict:function(a){A.$=Sa;if(a)A.jQuery=Ra;return c},isReady:false,ready:function(){if(!c.isReady){if(!s.body)return setTimeout(c.ready,13);c.isReady=true;if(Q){for(var a,b=0;a=Q[b++];)a.call(s,c);Q=null}c.fn.triggerHandler&&c(s).triggerHandler("ready")}},bindReady:function(){if(!xa){xa=true;if(s.readyState==="complete")return c.ready();if(s.addEventListener){s.addEventListener("DOMContentLoaded",
L,false);A.addEventListener("load",c.ready,false)}else if(s.attachEvent){s.attachEvent("onreadystatechange",L);A.attachEvent("onload",c.ready);var a=false;try{a=A.frameElement==null}catch(b){}s.documentElement.doScroll&&a&&ma()}}},isFunction:function(a){return $.call(a)==="[object Function]"},isArray:function(a){return $.call(a)==="[object Array]"},isPlainObject:function(a){if(!a||$.call(a)!=="[object Object]"||a.nodeType||a.setInterval)return false;if(a.constructor&&!aa.call(a,"constructor")&&!aa.call(a.constructor.prototype,
"isPrototypeOf"))return false;var b;for(b in a);return b===w||aa.call(a,b)},isEmptyObject:function(a){for(var b in a)return false;return true},error:function(a){throw a;},parseJSON:function(a){if(typeof a!=="string"||!a)return null;a=c.trim(a);if(/^[\],:{}\s]*$/.test(a.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,"@").replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,"]").replace(/(?:^|:|,)(?:\s*\[)+/g,"")))return A.JSON&&A.JSON.parse?A.JSON.parse(a):(new Function("return "+
a))();else c.error("Invalid JSON: "+a)},noop:function(){},globalEval:function(a){if(a&&Va.test(a)){var b=s.getElementsByTagName("head")[0]||s.documentElement,d=s.createElement("script");d.type="text/javascript";if(c.support.scriptEval)d.appendChild(s.createTextNode(a));else d.text=a;b.insertBefore(d,b.firstChild);b.removeChild(d)}},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,b,d){var f,e=0,j=a.length,i=j===w||c.isFunction(a);if(d)if(i)for(f in a){if(b.apply(a[f],
d)===false)break}else for(;e<j;){if(b.apply(a[e++],d)===false)break}else if(i)for(f in a){if(b.call(a[f],f,a[f])===false)break}else for(d=a[0];e<j&&b.call(d,e,d)!==false;d=a[++e]);return a},trim:function(a){return(a||"").replace(Wa,"")},makeArray:function(a,b){b=b||[];if(a!=null)a.length==null||typeof a==="string"||c.isFunction(a)||typeof a!=="function"&&a.setInterval?ba.call(b,a):c.merge(b,a);return b},inArray:function(a,b){if(b.indexOf)return b.indexOf(a);for(var d=0,f=b.length;d<f;d++)if(b[d]===
a)return d;return-1},merge:function(a,b){var d=a.length,f=0;if(typeof b.length==="number")for(var e=b.length;f<e;f++)a[d++]=b[f];else for(;b[f]!==w;)a[d++]=b[f++];a.length=d;return a},grep:function(a,b,d){for(var f=[],e=0,j=a.length;e<j;e++)!d!==!b(a[e],e)&&f.push(a[e]);return f},map:function(a,b,d){for(var f=[],e,j=0,i=a.length;j<i;j++){e=b(a[j],j,d);if(e!=null)f[f.length]=e}return f.concat.apply([],f)},guid:1,proxy:function(a,b,d){if(arguments.length===2)if(typeof b==="string"){d=a;a=d[b];b=w}else if(b&&
!c.isFunction(b)){d=b;b=w}if(!b&&a)b=function(){return a.apply(d||this,arguments)};if(a)b.guid=a.guid=a.guid||b.guid||c.guid++;return b},uaMatch:function(a){a=a.toLowerCase();a=/(webkit)[ \/]([\w.]+)/.exec(a)||/(opera)(?:.*version)?[ \/]([\w.]+)/.exec(a)||/(msie) ([\w.]+)/.exec(a)||!/compatible/.test(a)&&/(mozilla)(?:.*? rv:([\w.]+))?/.exec(a)||[];return{browser:a[1]||"",version:a[2]||"0"}},browser:{}});P=c.uaMatch(P);if(P.browser){c.browser[P.browser]=true;c.browser.version=P.version}if(c.browser.webkit)c.browser.safari=
true;if(ya)c.inArray=function(a,b){return ya.call(b,a)};T=c(s);if(s.addEventListener)L=function(){s.removeEventListener("DOMContentLoaded",L,false);c.ready()};else if(s.attachEvent)L=function(){if(s.readyState==="complete"){s.detachEvent("onreadystatechange",L);c.ready()}};(function(){c.support={};var a=s.documentElement,b=s.createElement("script"),d=s.createElement("div"),f="script"+J();d.style.display="none";d.innerHTML=" <link/><table></table><a href='/a' style='color:red;float:left;opacity:.55;'>a</a><input type='checkbox'/>";
var e=d.getElementsByTagName("*"),j=d.getElementsByTagName("a")[0];if(!(!e||!e.length||!j)){c.support={leadingWhitespace:d.firstChild.nodeType===3,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/red/.test(j.getAttribute("style")),hrefNormalized:j.getAttribute("href")==="/a",opacity:/^0.55$/.test(j.style.opacity),cssFloat:!!j.style.cssFloat,checkOn:d.getElementsByTagName("input")[0].value==="on",optSelected:s.createElement("select").appendChild(s.createElement("option")).selected,
parentNode:d.removeChild(d.appendChild(s.createElement("div"))).parentNode===null,deleteExpando:true,checkClone:false,scriptEval:false,noCloneEvent:true,boxModel:null};b.type="text/javascript";try{b.appendChild(s.createTextNode("window."+f+"=1;"))}catch(i){}a.insertBefore(b,a.firstChild);if(A[f]){c.support.scriptEval=true;delete A[f]}try{delete b.test}catch(o){c.support.deleteExpando=false}a.removeChild(b);if(d.attachEvent&&d.fireEvent){d.attachEvent("onclick",function k(){c.support.noCloneEvent=
false;d.detachEvent("onclick",k)});d.cloneNode(true).fireEvent("onclick")}d=s.createElement("div");d.innerHTML="<input type='radio' name='radiotest' checked='checked'/>";a=s.createDocumentFragment();a.appendChild(d.firstChild);c.support.checkClone=a.cloneNode(true).cloneNode(true).lastChild.checked;c(function(){var k=s.createElement("div");k.style.width=k.style.paddingLeft="1px";s.body.appendChild(k);c.boxModel=c.support.boxModel=k.offsetWidth===2;s.body.removeChild(k).style.display="none"});a=function(k){var n=
s.createElement("div");k="on"+k;var r=k in n;if(!r){n.setAttribute(k,"return;");r=typeof n[k]==="function"}return r};c.support.submitBubbles=a("submit");c.support.changeBubbles=a("change");a=b=d=e=j=null}})();c.props={"for":"htmlFor","class":"className",readonly:"readOnly",maxlength:"maxLength",cellspacing:"cellSpacing",rowspan:"rowSpan",colspan:"colSpan",tabindex:"tabIndex",usemap:"useMap",frameborder:"frameBorder"};var G="jQuery"+J(),Ya=0,za={};c.extend({cache:{},expando:G,noData:{embed:true,object:true,
applet:true},data:function(a,b,d){if(!(a.nodeName&&c.noData[a.nodeName.toLowerCase()])){a=a==A?za:a;var f=a[G],e=c.cache;if(!f&&typeof b==="string"&&d===w)return null;f||(f=++Ya);if(typeof b==="object"){a[G]=f;e[f]=c.extend(true,{},b)}else if(!e[f]){a[G]=f;e[f]={}}a=e[f];if(d!==w)a[b]=d;return typeof b==="string"?a[b]:a}},removeData:function(a,b){if(!(a.nodeName&&c.noData[a.nodeName.toLowerCase()])){a=a==A?za:a;var d=a[G],f=c.cache,e=f[d];if(b){if(e){delete e[b];c.isEmptyObject(e)&&c.removeData(a)}}else{if(c.support.deleteExpando)delete a[c.expando];
else a.removeAttribute&&a.removeAttribute(c.expando);delete f[d]}}}});c.fn.extend({data:function(a,b){if(typeof a==="undefined"&&this.length)return c.data(this[0]);else if(typeof a==="object")return this.each(function(){c.data(this,a)});var d=a.split(".");d[1]=d[1]?"."+d[1]:"";if(b===w){var f=this.triggerHandler("getData"+d[1]+"!",[d[0]]);if(f===w&&this.length)f=c.data(this[0],a);return f===w&&d[1]?this.data(d[0]):f}else return this.trigger("setData"+d[1]+"!",[d[0],b]).each(function(){c.data(this,
a,b)})},removeData:function(a){return this.each(function(){c.removeData(this,a)})}});c.extend({queue:function(a,b,d){if(a){b=(b||"fx")+"queue";var f=c.data(a,b);if(!d)return f||[];if(!f||c.isArray(d))f=c.data(a,b,c.makeArray(d));else f.push(d);return f}},dequeue:function(a,b){b=b||"fx";var d=c.queue(a,b),f=d.shift();if(f==="inprogress")f=d.shift();if(f){b==="fx"&&d.unshift("inprogress");f.call(a,function(){c.dequeue(a,b)})}}});c.fn.extend({queue:function(a,b){if(typeof a!=="string"){b=a;a="fx"}if(b===
w)return c.queue(this[0],a);return this.each(function(){var d=c.queue(this,a,b);a==="fx"&&d[0]!=="inprogress"&&c.dequeue(this,a)})},dequeue:function(a){return this.each(function(){c.dequeue(this,a)})},delay:function(a,b){a=c.fx?c.fx.speeds[a]||a:a;b=b||"fx";return this.queue(b,function(){var d=this;setTimeout(function(){c.dequeue(d,b)},a)})},clearQueue:function(a){return this.queue(a||"fx",[])}});var Aa=/[\n\t]/g,ca=/\s+/,Za=/\r/g,$a=/href|src|style/,ab=/(button|input)/i,bb=/(button|input|object|select|textarea)/i,
cb=/^(a|area)$/i,Ba=/radio|checkbox/;c.fn.extend({attr:function(a,b){return X(this,a,b,true,c.attr)},removeAttr:function(a){return this.each(function(){c.attr(this,a,"");this.nodeType===1&&this.removeAttribute(a)})},addClass:function(a){if(c.isFunction(a))return this.each(function(n){var r=c(this);r.addClass(a.call(this,n,r.attr("class")))});if(a&&typeof a==="string")for(var b=(a||"").split(ca),d=0,f=this.length;d<f;d++){var e=this[d];if(e.nodeType===1)if(e.className){for(var j=" "+e.className+" ",
i=e.className,o=0,k=b.length;o<k;o++)if(j.indexOf(" "+b[o]+" ")<0)i+=" "+b[o];e.className=c.trim(i)}else e.className=a}return this},removeClass:function(a){if(c.isFunction(a))return this.each(function(k){var n=c(this);n.removeClass(a.call(this,k,n.attr("class")))});if(a&&typeof a==="string"||a===w)for(var b=(a||"").split(ca),d=0,f=this.length;d<f;d++){var e=this[d];if(e.nodeType===1&&e.className)if(a){for(var j=(" "+e.className+" ").replace(Aa," "),i=0,o=b.length;i<o;i++)j=j.replace(" "+b[i]+" ",
" ");e.className=c.trim(j)}else e.className=""}return this},toggleClass:function(a,b){var d=typeof a,f=typeof b==="boolean";if(c.isFunction(a))return this.each(function(e){var j=c(this);j.toggleClass(a.call(this,e,j.attr("class"),b),b)});return this.each(function(){if(d==="string")for(var e,j=0,i=c(this),o=b,k=a.split(ca);e=k[j++];){o=f?o:!i.hasClass(e);i[o?"addClass":"removeClass"](e)}else if(d==="undefined"||d==="boolean"){this.className&&c.data(this,"__className__",this.className);this.className=
this.className||a===false?"":c.data(this,"__className__")||""}})},hasClass:function(a){a=" "+a+" ";for(var b=0,d=this.length;b<d;b++)if((" "+this[b].className+" ").replace(Aa," ").indexOf(a)>-1)return true;return false},val:function(a){if(a===w){var b=this[0];if(b){if(c.nodeName(b,"option"))return(b.attributes.value||{}).specified?b.value:b.text;if(c.nodeName(b,"select")){var d=b.selectedIndex,f=[],e=b.options;b=b.type==="select-one";if(d<0)return null;var j=b?d:0;for(d=b?d+1:e.length;j<d;j++){var i=
e[j];if(i.selected){a=c(i).val();if(b)return a;f.push(a)}}return f}if(Ba.test(b.type)&&!c.support.checkOn)return b.getAttribute("value")===null?"on":b.value;return(b.value||"").replace(Za,"")}return w}var o=c.isFunction(a);return this.each(function(k){var n=c(this),r=a;if(this.nodeType===1){if(o)r=a.call(this,k,n.val());if(typeof r==="number")r+="";if(c.isArray(r)&&Ba.test(this.type))this.checked=c.inArray(n.val(),r)>=0;else if(c.nodeName(this,"select")){var u=c.makeArray(r);c("option",this).each(function(){this.selected=
c.inArray(c(this).val(),u)>=0});if(!u.length)this.selectedIndex=-1}else this.value=r}})}});c.extend({attrFn:{val:true,css:true,html:true,text:true,data:true,width:true,height:true,offset:true},attr:function(a,b,d,f){if(!a||a.nodeType===3||a.nodeType===8)return w;if(f&&b in c.attrFn)return c(a)[b](d);f=a.nodeType!==1||!c.isXMLDoc(a);var e=d!==w;b=f&&c.props[b]||b;if(a.nodeType===1){var j=$a.test(b);if(b in a&&f&&!j){if(e){b==="type"&&ab.test(a.nodeName)&&a.parentNode&&c.error("type property can't be changed");
a[b]=d}if(c.nodeName(a,"form")&&a.getAttributeNode(b))return a.getAttributeNode(b).nodeValue;if(b==="tabIndex")return(b=a.getAttributeNode("tabIndex"))&&b.specified?b.value:bb.test(a.nodeName)||cb.test(a.nodeName)&&a.href?0:w;return a[b]}if(!c.support.style&&f&&b==="style"){if(e)a.style.cssText=""+d;return a.style.cssText}e&&a.setAttribute(b,""+d);a=!c.support.hrefNormalized&&f&&j?a.getAttribute(b,2):a.getAttribute(b);return a===null?w:a}return c.style(a,b,d)}});var O=/\.(.*)$/,db=function(a){return a.replace(/[^\w\s\.\|`]/g,
function(b){return"\\"+b})};c.event={add:function(a,b,d,f){if(!(a.nodeType===3||a.nodeType===8)){if(a.setInterval&&a!==A&&!a.frameElement)a=A;var e,j;if(d.handler){e=d;d=e.handler}if(!d.guid)d.guid=c.guid++;if(j=c.data(a)){var i=j.events=j.events||{},o=j.handle;if(!o)j.handle=o=function(){return typeof c!=="undefined"&&!c.event.triggered?c.event.handle.apply(o.elem,arguments):w};o.elem=a;b=b.split(" ");for(var k,n=0,r;k=b[n++];){j=e?c.extend({},e):{handler:d,data:f};if(k.indexOf(".")>-1){r=k.split(".");
k=r.shift();j.namespace=r.slice(0).sort().join(".")}else{r=[];j.namespace=""}j.type=k;j.guid=d.guid;var u=i[k],z=c.event.special[k]||{};if(!u){u=i[k]=[];if(!z.setup||z.setup.call(a,f,r,o)===false)if(a.addEventListener)a.addEventListener(k,o,false);else a.attachEvent&&a.attachEvent("on"+k,o)}if(z.add){z.add.call(a,j);if(!j.handler.guid)j.handler.guid=d.guid}u.push(j);c.event.global[k]=true}a=null}}},global:{},remove:function(a,b,d,f){if(!(a.nodeType===3||a.nodeType===8)){var e,j=0,i,o,k,n,r,u,z=c.data(a),
C=z&&z.events;if(z&&C){if(b&&b.type){d=b.handler;b=b.type}if(!b||typeof b==="string"&&b.charAt(0)==="."){b=b||"";for(e in C)c.event.remove(a,e+b)}else{for(b=b.split(" ");e=b[j++];){n=e;i=e.indexOf(".")<0;o=[];if(!i){o=e.split(".");e=o.shift();k=new RegExp("(^|\\.)"+c.map(o.slice(0).sort(),db).join("\\.(?:.*\\.)?")+"(\\.|$)")}if(r=C[e])if(d){n=c.event.special[e]||{};for(B=f||0;B<r.length;B++){u=r[B];if(d.guid===u.guid){if(i||k.test(u.namespace)){f==null&&r.splice(B--,1);n.remove&&n.remove.call(a,u)}if(f!=
null)break}}if(r.length===0||f!=null&&r.length===1){if(!n.teardown||n.teardown.call(a,o)===false)Ca(a,e,z.handle);delete C[e]}}else for(var B=0;B<r.length;B++){u=r[B];if(i||k.test(u.namespace)){c.event.remove(a,n,u.handler,B);r.splice(B--,1)}}}if(c.isEmptyObject(C)){if(b=z.handle)b.elem=null;delete z.events;delete z.handle;c.isEmptyObject(z)&&c.removeData(a)}}}}},trigger:function(a,b,d,f){var e=a.type||a;if(!f){a=typeof a==="object"?a[G]?a:c.extend(c.Event(e),a):c.Event(e);if(e.indexOf("!")>=0){a.type=
e=e.slice(0,-1);a.exclusive=true}if(!d){a.stopPropagation();c.event.global[e]&&c.each(c.cache,function(){this.events&&this.events[e]&&c.event.trigger(a,b,this.handle.elem)})}if(!d||d.nodeType===3||d.nodeType===8)return w;a.result=w;a.target=d;b=c.makeArray(b);b.unshift(a)}a.currentTarget=d;(f=c.data(d,"handle"))&&f.apply(d,b);f=d.parentNode||d.ownerDocument;try{if(!(d&&d.nodeName&&c.noData[d.nodeName.toLowerCase()]))if(d["on"+e]&&d["on"+e].apply(d,b)===false)a.result=false}catch(j){}if(!a.isPropagationStopped()&&
f)c.event.trigger(a,b,f,true);else if(!a.isDefaultPrevented()){f=a.target;var i,o=c.nodeName(f,"a")&&e==="click",k=c.event.special[e]||{};if((!k._default||k._default.call(d,a)===false)&&!o&&!(f&&f.nodeName&&c.noData[f.nodeName.toLowerCase()])){try{if(f[e]){if(i=f["on"+e])f["on"+e]=null;c.event.triggered=true;f[e]()}}catch(n){}if(i)f["on"+e]=i;c.event.triggered=false}}},handle:function(a){var b,d,f,e;a=arguments[0]=c.event.fix(a||A.event);a.currentTarget=this;b=a.type.indexOf(".")<0&&!a.exclusive;
if(!b){d=a.type.split(".");a.type=d.shift();f=new RegExp("(^|\\.)"+d.slice(0).sort().join("\\.(?:.*\\.)?")+"(\\.|$)")}e=c.data(this,"events");d=e[a.type];if(e&&d){d=d.slice(0);e=0;for(var j=d.length;e<j;e++){var i=d[e];if(b||f.test(i.namespace)){a.handler=i.handler;a.data=i.data;a.handleObj=i;i=i.handler.apply(this,arguments);if(i!==w){a.result=i;if(i===false){a.preventDefault();a.stopPropagation()}}if(a.isImmediatePropagationStopped())break}}}return a.result},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),
fix:function(a){if(a[G])return a;var b=a;a=c.Event(b);for(var d=this.props.length,f;d;){f=this.props[--d];a[f]=b[f]}if(!a.target)a.target=a.srcElement||s;if(a.target.nodeType===3)a.target=a.target.parentNode;if(!a.relatedTarget&&a.fromElement)a.relatedTarget=a.fromElement===a.target?a.toElement:a.fromElement;if(a.pageX==null&&a.clientX!=null){b=s.documentElement;d=s.body;a.pageX=a.clientX+(b&&b.scrollLeft||d&&d.scrollLeft||0)-(b&&b.clientLeft||d&&d.clientLeft||0);a.pageY=a.clientY+(b&&b.scrollTop||
d&&d.scrollTop||0)-(b&&b.clientTop||d&&d.clientTop||0)}if(!a.which&&(a.charCode||a.charCode===0?a.charCode:a.keyCode))a.which=a.charCode||a.keyCode;if(!a.metaKey&&a.ctrlKey)a.metaKey=a.ctrlKey;if(!a.which&&a.button!==w)a.which=a.button&1?1:a.button&2?3:a.button&4?2:0;return a},guid:1E8,proxy:c.proxy,special:{ready:{setup:c.bindReady,teardown:c.noop},live:{add:function(a){c.event.add(this,a.origType,c.extend({},a,{handler:oa}))},remove:function(a){var b=true,d=a.origType.replace(O,"");c.each(c.data(this,
"events").live||[],function(){if(d===this.origType.replace(O,""))return b=false});b&&c.event.remove(this,a.origType,oa)}},beforeunload:{setup:function(a,b,d){if(this.setInterval)this.onbeforeunload=d;return false},teardown:function(a,b){if(this.onbeforeunload===b)this.onbeforeunload=null}}}};var Ca=s.removeEventListener?function(a,b,d){a.removeEventListener(b,d,false)}:function(a,b,d){a.detachEvent("on"+b,d)};c.Event=function(a){if(!this.preventDefault)return new c.Event(a);if(a&&a.type){this.originalEvent=
a;this.type=a.type}else this.type=a;this.timeStamp=J();this[G]=true};c.Event.prototype={preventDefault:function(){this.isDefaultPrevented=Z;var a=this.originalEvent;if(a){a.preventDefault&&a.preventDefault();a.returnValue=false}},stopPropagation:function(){this.isPropagationStopped=Z;var a=this.originalEvent;if(a){a.stopPropagation&&a.stopPropagation();a.cancelBubble=true}},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=Z;this.stopPropagation()},isDefaultPrevented:Y,isPropagationStopped:Y,
isImmediatePropagationStopped:Y};var Da=function(a){var b=a.relatedTarget;try{for(;b&&b!==this;)b=b.parentNode;if(b!==this){a.type=a.data;c.event.handle.apply(this,arguments)}}catch(d){}},Ea=function(a){a.type=a.data;c.event.handle.apply(this,arguments)};c.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){c.event.special[a]={setup:function(d){c.event.add(this,b,d&&d.selector?Ea:Da,a)},teardown:function(d){c.event.remove(this,b,d&&d.selector?Ea:Da)}}});if(!c.support.submitBubbles)c.event.special.submit=
{setup:function(){if(this.nodeName.toLowerCase()!=="form"){c.event.add(this,"click.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="submit"||d==="image")&&c(b).closest("form").length)return na("submit",this,arguments)});c.event.add(this,"keypress.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="text"||d==="password")&&c(b).closest("form").length&&a.keyCode===13)return na("submit",this,arguments)})}else return false},teardown:function(){c.event.remove(this,".specialSubmit")}};
if(!c.support.changeBubbles){var da=/textarea|input|select/i,ea,Fa=function(a){var b=a.type,d=a.value;if(b==="radio"||b==="checkbox")d=a.checked;else if(b==="select-multiple")d=a.selectedIndex>-1?c.map(a.options,function(f){return f.selected}).join("-"):"";else if(a.nodeName.toLowerCase()==="select")d=a.selectedIndex;return d},fa=function(a,b){var d=a.target,f,e;if(!(!da.test(d.nodeName)||d.readOnly)){f=c.data(d,"_change_data");e=Fa(d);if(a.type!=="focusout"||d.type!=="radio")c.data(d,"_change_data",
e);if(!(f===w||e===f))if(f!=null||e){a.type="change";return c.event.trigger(a,b,d)}}};c.event.special.change={filters:{focusout:fa,click:function(a){var b=a.target,d=b.type;if(d==="radio"||d==="checkbox"||b.nodeName.toLowerCase()==="select")return fa.call(this,a)},keydown:function(a){var b=a.target,d=b.type;if(a.keyCode===13&&b.nodeName.toLowerCase()!=="textarea"||a.keyCode===32&&(d==="checkbox"||d==="radio")||d==="select-multiple")return fa.call(this,a)},beforeactivate:function(a){a=a.target;c.data(a,
"_change_data",Fa(a))}},setup:function(){if(this.type==="file")return false;for(var a in ea)c.event.add(this,a+".specialChange",ea[a]);return da.test(this.nodeName)},teardown:function(){c.event.remove(this,".specialChange");return da.test(this.nodeName)}};ea=c.event.special.change.filters}s.addEventListener&&c.each({focus:"focusin",blur:"focusout"},function(a,b){function d(f){f=c.event.fix(f);f.type=b;return c.event.handle.call(this,f)}c.event.special[b]={setup:function(){this.addEventListener(a,
d,true)},teardown:function(){this.removeEventListener(a,d,true)}}});c.each(["bind","one"],function(a,b){c.fn[b]=function(d,f,e){if(typeof d==="object"){for(var j in d)this[b](j,f,d[j],e);return this}if(c.isFunction(f)){e=f;f=w}var i=b==="one"?c.proxy(e,function(k){c(this).unbind(k,i);return e.apply(this,arguments)}):e;if(d==="unload"&&b!=="one")this.one(d,f,e);else{j=0;for(var o=this.length;j<o;j++)c.event.add(this[j],d,i,f)}return this}});c.fn.extend({unbind:function(a,b){if(typeof a==="object"&&
!a.preventDefault)for(var d in a)this.unbind(d,a[d]);else{d=0;for(var f=this.length;d<f;d++)c.event.remove(this[d],a,b)}return this},delegate:function(a,b,d,f){return this.live(b,d,f,a)},undelegate:function(a,b,d){return arguments.length===0?this.unbind("live"):this.die(b,null,d,a)},trigger:function(a,b){return this.each(function(){c.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0]){a=c.Event(a);a.preventDefault();a.stopPropagation();c.event.trigger(a,b,this[0]);return a.result}},
toggle:function(a){for(var b=arguments,d=1;d<b.length;)c.proxy(a,b[d++]);return this.click(c.proxy(a,function(f){var e=(c.data(this,"lastToggle"+a.guid)||0)%d;c.data(this,"lastToggle"+a.guid,e+1);f.preventDefault();return b[e].apply(this,arguments)||false}))},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var Ga={focus:"focusin",blur:"focusout",mouseenter:"mouseover",mouseleave:"mouseout"};c.each(["live","die"],function(a,b){c.fn[b]=function(d,f,e,j){var i,o=0,k,n,r=j||this.selector,
u=j?this:c(this.context);if(c.isFunction(f)){e=f;f=w}for(d=(d||"").split(" ");(i=d[o++])!=null;){j=O.exec(i);k="";if(j){k=j[0];i=i.replace(O,"")}if(i==="hover")d.push("mouseenter"+k,"mouseleave"+k);else{n=i;if(i==="focus"||i==="blur"){d.push(Ga[i]+k);i+=k}else i=(Ga[i]||i)+k;b==="live"?u.each(function(){c.event.add(this,pa(i,r),{data:f,selector:r,handler:e,origType:i,origHandler:e,preType:n})}):u.unbind(pa(i,r),e)}}return this}});c.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error".split(" "),
function(a,b){c.fn[b]=function(d){return d?this.bind(b,d):this.trigger(b)};if(c.attrFn)c.attrFn[b]=true});A.attachEvent&&!A.addEventListener&&A.attachEvent("onunload",function(){for(var a in c.cache)if(c.cache[a].handle)try{c.event.remove(c.cache[a].handle.elem)}catch(b){}});(function(){function a(g){for(var h="",l,m=0;g[m];m++){l=g[m];if(l.nodeType===3||l.nodeType===4)h+=l.nodeValue;else if(l.nodeType!==8)h+=a(l.childNodes)}return h}function b(g,h,l,m,q,p){q=0;for(var v=m.length;q<v;q++){var t=m[q];
if(t){t=t[g];for(var y=false;t;){if(t.sizcache===l){y=m[t.sizset];break}if(t.nodeType===1&&!p){t.sizcache=l;t.sizset=q}if(t.nodeName.toLowerCase()===h){y=t;break}t=t[g]}m[q]=y}}}function d(g,h,l,m,q,p){q=0;for(var v=m.length;q<v;q++){var t=m[q];if(t){t=t[g];for(var y=false;t;){if(t.sizcache===l){y=m[t.sizset];break}if(t.nodeType===1){if(!p){t.sizcache=l;t.sizset=q}if(typeof h!=="string"){if(t===h){y=true;break}}else if(k.filter(h,[t]).length>0){y=t;break}}t=t[g]}m[q]=y}}}var f=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^[\]]*\]|['"][^'"]*['"]|[^[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,
e=0,j=Object.prototype.toString,i=false,o=true;[0,0].sort(function(){o=false;return 0});var k=function(g,h,l,m){l=l||[];var q=h=h||s;if(h.nodeType!==1&&h.nodeType!==9)return[];if(!g||typeof g!=="string")return l;for(var p=[],v,t,y,S,H=true,M=x(h),I=g;(f.exec(""),v=f.exec(I))!==null;){I=v[3];p.push(v[1]);if(v[2]){S=v[3];break}}if(p.length>1&&r.exec(g))if(p.length===2&&n.relative[p[0]])t=ga(p[0]+p[1],h);else for(t=n.relative[p[0]]?[h]:k(p.shift(),h);p.length;){g=p.shift();if(n.relative[g])g+=p.shift();
t=ga(g,t)}else{if(!m&&p.length>1&&h.nodeType===9&&!M&&n.match.ID.test(p[0])&&!n.match.ID.test(p[p.length-1])){v=k.find(p.shift(),h,M);h=v.expr?k.filter(v.expr,v.set)[0]:v.set[0]}if(h){v=m?{expr:p.pop(),set:z(m)}:k.find(p.pop(),p.length===1&&(p[0]==="~"||p[0]==="+")&&h.parentNode?h.parentNode:h,M);t=v.expr?k.filter(v.expr,v.set):v.set;if(p.length>0)y=z(t);else H=false;for(;p.length;){var D=p.pop();v=D;if(n.relative[D])v=p.pop();else D="";if(v==null)v=h;n.relative[D](y,v,M)}}else y=[]}y||(y=t);y||k.error(D||
g);if(j.call(y)==="[object Array]")if(H)if(h&&h.nodeType===1)for(g=0;y[g]!=null;g++){if(y[g]&&(y[g]===true||y[g].nodeType===1&&E(h,y[g])))l.push(t[g])}else for(g=0;y[g]!=null;g++)y[g]&&y[g].nodeType===1&&l.push(t[g]);else l.push.apply(l,y);else z(y,l);if(S){k(S,q,l,m);k.uniqueSort(l)}return l};k.uniqueSort=function(g){if(B){i=o;g.sort(B);if(i)for(var h=1;h<g.length;h++)g[h]===g[h-1]&&g.splice(h--,1)}return g};k.matches=function(g,h){return k(g,null,null,h)};k.find=function(g,h,l){var m,q;if(!g)return[];
for(var p=0,v=n.order.length;p<v;p++){var t=n.order[p];if(q=n.leftMatch[t].exec(g)){var y=q[1];q.splice(1,1);if(y.substr(y.length-1)!=="\\"){q[1]=(q[1]||"").replace(/\\/g,"");m=n.find[t](q,h,l);if(m!=null){g=g.replace(n.match[t],"");break}}}}m||(m=h.getElementsByTagName("*"));return{set:m,expr:g}};k.filter=function(g,h,l,m){for(var q=g,p=[],v=h,t,y,S=h&&h[0]&&x(h[0]);g&&h.length;){for(var H in n.filter)if((t=n.leftMatch[H].exec(g))!=null&&t[2]){var M=n.filter[H],I,D;D=t[1];y=false;t.splice(1,1);if(D.substr(D.length-
1)!=="\\"){if(v===p)p=[];if(n.preFilter[H])if(t=n.preFilter[H](t,v,l,p,m,S)){if(t===true)continue}else y=I=true;if(t)for(var U=0;(D=v[U])!=null;U++)if(D){I=M(D,t,U,v);var Ha=m^!!I;if(l&&I!=null)if(Ha)y=true;else v[U]=false;else if(Ha){p.push(D);y=true}}if(I!==w){l||(v=p);g=g.replace(n.match[H],"");if(!y)return[];break}}}if(g===q)if(y==null)k.error(g);else break;q=g}return v};k.error=function(g){throw"Syntax error, unrecognized expression: "+g;};var n=k.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF-]|\\.)+)/,
CLASS:/\.((?:[\w\u00c0-\uFFFF-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\((even|odd|[\dn+-]*)\))?/,POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(g){return g.getAttribute("href")}},
relative:{"+":function(g,h){var l=typeof h==="string",m=l&&!/\W/.test(h);l=l&&!m;if(m)h=h.toLowerCase();m=0;for(var q=g.length,p;m<q;m++)if(p=g[m]){for(;(p=p.previousSibling)&&p.nodeType!==1;);g[m]=l||p&&p.nodeName.toLowerCase()===h?p||false:p===h}l&&k.filter(h,g,true)},">":function(g,h){var l=typeof h==="string";if(l&&!/\W/.test(h)){h=h.toLowerCase();for(var m=0,q=g.length;m<q;m++){var p=g[m];if(p){l=p.parentNode;g[m]=l.nodeName.toLowerCase()===h?l:false}}}else{m=0;for(q=g.length;m<q;m++)if(p=g[m])g[m]=
l?p.parentNode:p.parentNode===h;l&&k.filter(h,g,true)}},"":function(g,h,l){var m=e++,q=d;if(typeof h==="string"&&!/\W/.test(h)){var p=h=h.toLowerCase();q=b}q("parentNode",h,m,g,p,l)},"~":function(g,h,l){var m=e++,q=d;if(typeof h==="string"&&!/\W/.test(h)){var p=h=h.toLowerCase();q=b}q("previousSibling",h,m,g,p,l)}},find:{ID:function(g,h,l){if(typeof h.getElementById!=="undefined"&&!l)return(g=h.getElementById(g[1]))?[g]:[]},NAME:function(g,h){if(typeof h.getElementsByName!=="undefined"){var l=[];
h=h.getElementsByName(g[1]);for(var m=0,q=h.length;m<q;m++)h[m].getAttribute("name")===g[1]&&l.push(h[m]);return l.length===0?null:l}},TAG:function(g,h){return h.getElementsByTagName(g[1])}},preFilter:{CLASS:function(g,h,l,m,q,p){g=" "+g[1].replace(/\\/g,"")+" ";if(p)return g;p=0;for(var v;(v=h[p])!=null;p++)if(v)if(q^(v.className&&(" "+v.className+" ").replace(/[\t\n]/g," ").indexOf(g)>=0))l||m.push(v);else if(l)h[p]=false;return false},ID:function(g){return g[1].replace(/\\/g,"")},TAG:function(g){return g[1].toLowerCase()},
CHILD:function(g){if(g[1]==="nth"){var h=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(g[2]==="even"&&"2n"||g[2]==="odd"&&"2n+1"||!/\D/.test(g[2])&&"0n+"+g[2]||g[2]);g[2]=h[1]+(h[2]||1)-0;g[3]=h[3]-0}g[0]=e++;return g},ATTR:function(g,h,l,m,q,p){h=g[1].replace(/\\/g,"");if(!p&&n.attrMap[h])g[1]=n.attrMap[h];if(g[2]==="~=")g[4]=" "+g[4]+" ";return g},PSEUDO:function(g,h,l,m,q){if(g[1]==="not")if((f.exec(g[3])||"").length>1||/^\w/.test(g[3]))g[3]=k(g[3],null,null,h);else{g=k.filter(g[3],h,l,true^q);l||m.push.apply(m,
g);return false}else if(n.match.POS.test(g[0])||n.match.CHILD.test(g[0]))return true;return g},POS:function(g){g.unshift(true);return g}},filters:{enabled:function(g){return g.disabled===false&&g.type!=="hidden"},disabled:function(g){return g.disabled===true},checked:function(g){return g.checked===true},selected:function(g){return g.selected===true},parent:function(g){return!!g.firstChild},empty:function(g){return!g.firstChild},has:function(g,h,l){return!!k(l[3],g).length},header:function(g){return/h\d/i.test(g.nodeName)},
text:function(g){return"text"===g.type},radio:function(g){return"radio"===g.type},checkbox:function(g){return"checkbox"===g.type},file:function(g){return"file"===g.type},password:function(g){return"password"===g.type},submit:function(g){return"submit"===g.type},image:function(g){return"image"===g.type},reset:function(g){return"reset"===g.type},button:function(g){return"button"===g.type||g.nodeName.toLowerCase()==="button"},input:function(g){return/input|select|textarea|button/i.test(g.nodeName)}},
setFilters:{first:function(g,h){return h===0},last:function(g,h,l,m){return h===m.length-1},even:function(g,h){return h%2===0},odd:function(g,h){return h%2===1},lt:function(g,h,l){return h<l[3]-0},gt:function(g,h,l){return h>l[3]-0},nth:function(g,h,l){return l[3]-0===h},eq:function(g,h,l){return l[3]-0===h}},filter:{PSEUDO:function(g,h,l,m){var q=h[1],p=n.filters[q];if(p)return p(g,l,h,m);else if(q==="contains")return(g.textContent||g.innerText||a([g])||"").indexOf(h[3])>=0;else if(q==="not"){h=
h[3];l=0;for(m=h.length;l<m;l++)if(h[l]===g)return false;return true}else k.error("Syntax error, unrecognized expression: "+q)},CHILD:function(g,h){var l=h[1],m=g;switch(l){case "only":case "first":for(;m=m.previousSibling;)if(m.nodeType===1)return false;if(l==="first")return true;m=g;case "last":for(;m=m.nextSibling;)if(m.nodeType===1)return false;return true;case "nth":l=h[2];var q=h[3];if(l===1&&q===0)return true;h=h[0];var p=g.parentNode;if(p&&(p.sizcache!==h||!g.nodeIndex)){var v=0;for(m=p.firstChild;m;m=
m.nextSibling)if(m.nodeType===1)m.nodeIndex=++v;p.sizcache=h}g=g.nodeIndex-q;return l===0?g===0:g%l===0&&g/l>=0}},ID:function(g,h){return g.nodeType===1&&g.getAttribute("id")===h},TAG:function(g,h){return h==="*"&&g.nodeType===1||g.nodeName.toLowerCase()===h},CLASS:function(g,h){return(" "+(g.className||g.getAttribute("class"))+" ").indexOf(h)>-1},ATTR:function(g,h){var l=h[1];g=n.attrHandle[l]?n.attrHandle[l](g):g[l]!=null?g[l]:g.getAttribute(l);l=g+"";var m=h[2];h=h[4];return g==null?m==="!=":m===
"="?l===h:m==="*="?l.indexOf(h)>=0:m==="~="?(" "+l+" ").indexOf(h)>=0:!h?l&&g!==false:m==="!="?l!==h:m==="^="?l.indexOf(h)===0:m==="$="?l.substr(l.length-h.length)===h:m==="|="?l===h||l.substr(0,h.length+1)===h+"-":false},POS:function(g,h,l,m){var q=n.setFilters[h[2]];if(q)return q(g,l,h,m)}}},r=n.match.POS;for(var u in n.match){n.match[u]=new RegExp(n.match[u].source+/(?![^\[]*\])(?![^\(]*\))/.source);n.leftMatch[u]=new RegExp(/(^(?:.|\r|\n)*?)/.source+n.match[u].source.replace(/\\(\d+)/g,function(g,
h){return"\\"+(h-0+1)}))}var z=function(g,h){g=Array.prototype.slice.call(g,0);if(h){h.push.apply(h,g);return h}return g};try{Array.prototype.slice.call(s.documentElement.childNodes,0)}catch(C){z=function(g,h){h=h||[];if(j.call(g)==="[object Array]")Array.prototype.push.apply(h,g);else if(typeof g.length==="number")for(var l=0,m=g.length;l<m;l++)h.push(g[l]);else for(l=0;g[l];l++)h.push(g[l]);return h}}var B;if(s.documentElement.compareDocumentPosition)B=function(g,h){if(!g.compareDocumentPosition||
!h.compareDocumentPosition){if(g==h)i=true;return g.compareDocumentPosition?-1:1}g=g.compareDocumentPosition(h)&4?-1:g===h?0:1;if(g===0)i=true;return g};else if("sourceIndex"in s.documentElement)B=function(g,h){if(!g.sourceIndex||!h.sourceIndex){if(g==h)i=true;return g.sourceIndex?-1:1}g=g.sourceIndex-h.sourceIndex;if(g===0)i=true;return g};else if(s.createRange)B=function(g,h){if(!g.ownerDocument||!h.ownerDocument){if(g==h)i=true;return g.ownerDocument?-1:1}var l=g.ownerDocument.createRange(),m=
h.ownerDocument.createRange();l.setStart(g,0);l.setEnd(g,0);m.setStart(h,0);m.setEnd(h,0);g=l.compareBoundaryPoints(Range.START_TO_END,m);if(g===0)i=true;return g};(function(){var g=s.createElement("div"),h="script"+(new Date).getTime();g.innerHTML="<a name='"+h+"'/>";var l=s.documentElement;l.insertBefore(g,l.firstChild);if(s.getElementById(h)){n.find.ID=function(m,q,p){if(typeof q.getElementById!=="undefined"&&!p)return(q=q.getElementById(m[1]))?q.id===m[1]||typeof q.getAttributeNode!=="undefined"&&
q.getAttributeNode("id").nodeValue===m[1]?[q]:w:[]};n.filter.ID=function(m,q){var p=typeof m.getAttributeNode!=="undefined"&&m.getAttributeNode("id");return m.nodeType===1&&p&&p.nodeValue===q}}l.removeChild(g);l=g=null})();(function(){var g=s.createElement("div");g.appendChild(s.createComment(""));if(g.getElementsByTagName("*").length>0)n.find.TAG=function(h,l){l=l.getElementsByTagName(h[1]);if(h[1]==="*"){h=[];for(var m=0;l[m];m++)l[m].nodeType===1&&h.push(l[m]);l=h}return l};g.innerHTML="<a href='#'></a>";
if(g.firstChild&&typeof g.firstChild.getAttribute!=="undefined"&&g.firstChild.getAttribute("href")!=="#")n.attrHandle.href=function(h){return h.getAttribute("href",2)};g=null})();s.querySelectorAll&&function(){var g=k,h=s.createElement("div");h.innerHTML="<p class='TEST'></p>";if(!(h.querySelectorAll&&h.querySelectorAll(".TEST").length===0)){k=function(m,q,p,v){q=q||s;if(!v&&q.nodeType===9&&!x(q))try{return z(q.querySelectorAll(m),p)}catch(t){}return g(m,q,p,v)};for(var l in g)k[l]=g[l];h=null}}();
(function(){var g=s.createElement("div");g.innerHTML="<div class='test e'></div><div class='test'></div>";if(!(!g.getElementsByClassName||g.getElementsByClassName("e").length===0)){g.lastChild.className="e";if(g.getElementsByClassName("e").length!==1){n.order.splice(1,0,"CLASS");n.find.CLASS=function(h,l,m){if(typeof l.getElementsByClassName!=="undefined"&&!m)return l.getElementsByClassName(h[1])};g=null}}})();var E=s.compareDocumentPosition?function(g,h){return!!(g.compareDocumentPosition(h)&16)}:
function(g,h){return g!==h&&(g.contains?g.contains(h):true)},x=function(g){return(g=(g?g.ownerDocument||g:0).documentElement)?g.nodeName!=="HTML":false},ga=function(g,h){var l=[],m="",q;for(h=h.nodeType?[h]:h;q=n.match.PSEUDO.exec(g);){m+=q[0];g=g.replace(n.match.PSEUDO,"")}g=n.relative[g]?g+"*":g;q=0;for(var p=h.length;q<p;q++)k(g,h[q],l);return k.filter(m,l)};c.find=k;c.expr=k.selectors;c.expr[":"]=c.expr.filters;c.unique=k.uniqueSort;c.text=a;c.isXMLDoc=x;c.contains=E})();var eb=/Until$/,fb=/^(?:parents|prevUntil|prevAll)/,
gb=/,/;R=Array.prototype.slice;var Ia=function(a,b,d){if(c.isFunction(b))return c.grep(a,function(e,j){return!!b.call(e,j,e)===d});else if(b.nodeType)return c.grep(a,function(e){return e===b===d});else if(typeof b==="string"){var f=c.grep(a,function(e){return e.nodeType===1});if(Ua.test(b))return c.filter(b,f,!d);else b=c.filter(b,f)}return c.grep(a,function(e){return c.inArray(e,b)>=0===d})};c.fn.extend({find:function(a){for(var b=this.pushStack("","find",a),d=0,f=0,e=this.length;f<e;f++){d=b.length;
c.find(a,this[f],b);if(f>0)for(var j=d;j<b.length;j++)for(var i=0;i<d;i++)if(b[i]===b[j]){b.splice(j--,1);break}}return b},has:function(a){var b=c(a);return this.filter(function(){for(var d=0,f=b.length;d<f;d++)if(c.contains(this,b[d]))return true})},not:function(a){return this.pushStack(Ia(this,a,false),"not",a)},filter:function(a){return this.pushStack(Ia(this,a,true),"filter",a)},is:function(a){return!!a&&c.filter(a,this).length>0},closest:function(a,b){if(c.isArray(a)){var d=[],f=this[0],e,j=
{},i;if(f&&a.length){e=0;for(var o=a.length;e<o;e++){i=a[e];j[i]||(j[i]=c.expr.match.POS.test(i)?c(i,b||this.context):i)}for(;f&&f.ownerDocument&&f!==b;){for(i in j){e=j[i];if(e.jquery?e.index(f)>-1:c(f).is(e)){d.push({selector:i,elem:f});delete j[i]}}f=f.parentNode}}return d}var k=c.expr.match.POS.test(a)?c(a,b||this.context):null;return this.map(function(n,r){for(;r&&r.ownerDocument&&r!==b;){if(k?k.index(r)>-1:c(r).is(a))return r;r=r.parentNode}return null})},index:function(a){if(!a||typeof a===
"string")return c.inArray(this[0],a?c(a):this.parent().children());return c.inArray(a.jquery?a[0]:a,this)},add:function(a,b){a=typeof a==="string"?c(a,b||this.context):c.makeArray(a);b=c.merge(this.get(),a);return this.pushStack(qa(a[0])||qa(b[0])?b:c.unique(b))},andSelf:function(){return this.add(this.prevObject)}});c.each({parent:function(a){return(a=a.parentNode)&&a.nodeType!==11?a:null},parents:function(a){return c.dir(a,"parentNode")},parentsUntil:function(a,b,d){return c.dir(a,"parentNode",
d)},next:function(a){return c.nth(a,2,"nextSibling")},prev:function(a){return c.nth(a,2,"previousSibling")},nextAll:function(a){return c.dir(a,"nextSibling")},prevAll:function(a){return c.dir(a,"previousSibling")},nextUntil:function(a,b,d){return c.dir(a,"nextSibling",d)},prevUntil:function(a,b,d){return c.dir(a,"previousSibling",d)},siblings:function(a){return c.sibling(a.parentNode.firstChild,a)},children:function(a){return c.sibling(a.firstChild)},contents:function(a){return c.nodeName(a,"iframe")?
a.contentDocument||a.contentWindow.document:c.makeArray(a.childNodes)}},function(a,b){c.fn[a]=function(d,f){var e=c.map(this,b,d);eb.test(a)||(f=d);if(f&&typeof f==="string")e=c.filter(f,e);e=this.length>1?c.unique(e):e;if((this.length>1||gb.test(f))&&fb.test(a))e=e.reverse();return this.pushStack(e,a,R.call(arguments).join(","))}});c.extend({filter:function(a,b,d){if(d)a=":not("+a+")";return c.find.matches(a,b)},dir:function(a,b,d){var f=[];for(a=a[b];a&&a.nodeType!==9&&(d===w||a.nodeType!==1||!c(a).is(d));){a.nodeType===
1&&f.push(a);a=a[b]}return f},nth:function(a,b,d){b=b||1;for(var f=0;a;a=a[d])if(a.nodeType===1&&++f===b)break;return a},sibling:function(a,b){for(var d=[];a;a=a.nextSibling)a.nodeType===1&&a!==b&&d.push(a);return d}});var Ja=/ jQuery\d+="(?:\d+|null)"/g,V=/^\s+/,Ka=/(<([\w:]+)[^>]*?)\/>/g,hb=/^(?:area|br|col|embed|hr|img|input|link|meta|param)$/i,La=/<([\w:]+)/,ib=/<tbody/i,jb=/<|&#?\w+;/,ta=/<script|<object|<embed|<option|<style/i,ua=/checked\s*(?:[^=]|=\s*.checked.)/i,Ma=function(a,b,d){return hb.test(d)?
a:b+"></"+d+">"},F={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],area:[1,"<map>","</map>"],_default:[0,"",""]};F.optgroup=F.option;F.tbody=F.tfoot=F.colgroup=F.caption=F.thead;F.th=F.td;if(!c.support.htmlSerialize)F._default=[1,"div<div>","</div>"];c.fn.extend({text:function(a){if(c.isFunction(a))return this.each(function(b){var d=
c(this);d.text(a.call(this,b,d.text()))});if(typeof a!=="object"&&a!==w)return this.empty().append((this[0]&&this[0].ownerDocument||s).createTextNode(a));return c.text(this)},wrapAll:function(a){if(c.isFunction(a))return this.each(function(d){c(this).wrapAll(a.call(this,d))});if(this[0]){var b=c(a,this[0].ownerDocument).eq(0).clone(true);this[0].parentNode&&b.insertBefore(this[0]);b.map(function(){for(var d=this;d.firstChild&&d.firstChild.nodeType===1;)d=d.firstChild;return d}).append(this)}return this},
wrapInner:function(a){if(c.isFunction(a))return this.each(function(b){c(this).wrapInner(a.call(this,b))});return this.each(function(){var b=c(this),d=b.contents();d.length?d.wrapAll(a):b.append(a)})},wrap:function(a){return this.each(function(){c(this).wrapAll(a)})},unwrap:function(){return this.parent().each(function(){c.nodeName(this,"body")||c(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.appendChild(a)})},
prepend:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this)});else if(arguments.length){var a=c(arguments[0]);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,
this.nextSibling)});else if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,c(arguments[0]).toArray());return a}},remove:function(a,b){for(var d=0,f;(f=this[d])!=null;d++)if(!a||c.filter(a,[f]).length){if(!b&&f.nodeType===1){c.cleanData(f.getElementsByTagName("*"));c.cleanData([f])}f.parentNode&&f.parentNode.removeChild(f)}return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++)for(b.nodeType===1&&c.cleanData(b.getElementsByTagName("*"));b.firstChild;)b.removeChild(b.firstChild);
return this},clone:function(a){var b=this.map(function(){if(!c.support.noCloneEvent&&!c.isXMLDoc(this)){var d=this.outerHTML,f=this.ownerDocument;if(!d){d=f.createElement("div");d.appendChild(this.cloneNode(true));d=d.innerHTML}return c.clean([d.replace(Ja,"").replace(/=([^="'>\s]+\/)>/g,'="$1">').replace(V,"")],f)[0]}else return this.cloneNode(true)});if(a===true){ra(this,b);ra(this.find("*"),b.find("*"))}return b},html:function(a){if(a===w)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(Ja,
""):null;else if(typeof a==="string"&&!ta.test(a)&&(c.support.leadingWhitespace||!V.test(a))&&!F[(La.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Ka,Ma);try{for(var b=0,d=this.length;b<d;b++)if(this[b].nodeType===1){c.cleanData(this[b].getElementsByTagName("*"));this[b].innerHTML=a}}catch(f){this.empty().append(a)}}else c.isFunction(a)?this.each(function(e){var j=c(this),i=j.html();j.empty().append(function(){return a.call(this,e,i)})}):this.empty().append(a);return this},replaceWith:function(a){if(this[0]&&
this[0].parentNode){if(c.isFunction(a))return this.each(function(b){var d=c(this),f=d.html();d.replaceWith(a.call(this,b,f))});if(typeof a!=="string")a=c(a).detach();return this.each(function(){var b=this.nextSibling,d=this.parentNode;c(this).remove();b?c(b).before(a):c(d).append(a)})}else return this.pushStack(c(c.isFunction(a)?a():a),"replaceWith",a)},detach:function(a){return this.remove(a,true)},domManip:function(a,b,d){function f(u){return c.nodeName(u,"table")?u.getElementsByTagName("tbody")[0]||
u.appendChild(u.ownerDocument.createElement("tbody")):u}var e,j,i=a[0],o=[],k;if(!c.support.checkClone&&arguments.length===3&&typeof i==="string"&&ua.test(i))return this.each(function(){c(this).domManip(a,b,d,true)});if(c.isFunction(i))return this.each(function(u){var z=c(this);a[0]=i.call(this,u,b?z.html():w);z.domManip(a,b,d)});if(this[0]){e=i&&i.parentNode;e=c.support.parentNode&&e&&e.nodeType===11&&e.childNodes.length===this.length?{fragment:e}:sa(a,this,o);k=e.fragment;if(j=k.childNodes.length===
1?(k=k.firstChild):k.firstChild){b=b&&c.nodeName(j,"tr");for(var n=0,r=this.length;n<r;n++)d.call(b?f(this[n],j):this[n],n>0||e.cacheable||this.length>1?k.cloneNode(true):k)}o.length&&c.each(o,Qa)}return this}});c.fragments={};c.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){c.fn[a]=function(d){var f=[];d=c(d);var e=this.length===1&&this[0].parentNode;if(e&&e.nodeType===11&&e.childNodes.length===1&&d.length===1){d[b](this[0]);
return this}else{e=0;for(var j=d.length;e<j;e++){var i=(e>0?this.clone(true):this).get();c.fn[b].apply(c(d[e]),i);f=f.concat(i)}return this.pushStack(f,a,d.selector)}}});c.extend({clean:function(a,b,d,f){b=b||s;if(typeof b.createElement==="undefined")b=b.ownerDocument||b[0]&&b[0].ownerDocument||s;for(var e=[],j=0,i;(i=a[j])!=null;j++){if(typeof i==="number")i+="";if(i){if(typeof i==="string"&&!jb.test(i))i=b.createTextNode(i);else if(typeof i==="string"){i=i.replace(Ka,Ma);var o=(La.exec(i)||["",
""])[1].toLowerCase(),k=F[o]||F._default,n=k[0],r=b.createElement("div");for(r.innerHTML=k[1]+i+k[2];n--;)r=r.lastChild;if(!c.support.tbody){n=ib.test(i);o=o==="table"&&!n?r.firstChild&&r.firstChild.childNodes:k[1]==="<table>"&&!n?r.childNodes:[];for(k=o.length-1;k>=0;--k)c.nodeName(o[k],"tbody")&&!o[k].childNodes.length&&o[k].parentNode.removeChild(o[k])}!c.support.leadingWhitespace&&V.test(i)&&r.insertBefore(b.createTextNode(V.exec(i)[0]),r.firstChild);i=r.childNodes}if(i.nodeType)e.push(i);else e=
c.merge(e,i)}}if(d)for(j=0;e[j];j++)if(f&&c.nodeName(e[j],"script")&&(!e[j].type||e[j].type.toLowerCase()==="text/javascript"))f.push(e[j].parentNode?e[j].parentNode.removeChild(e[j]):e[j]);else{e[j].nodeType===1&&e.splice.apply(e,[j+1,0].concat(c.makeArray(e[j].getElementsByTagName("script"))));d.appendChild(e[j])}return e},cleanData:function(a){for(var b,d,f=c.cache,e=c.event.special,j=c.support.deleteExpando,i=0,o;(o=a[i])!=null;i++)if(d=o[c.expando]){b=f[d];if(b.events)for(var k in b.events)e[k]?
c.event.remove(o,k):Ca(o,k,b.handle);if(j)delete o[c.expando];else o.removeAttribute&&o.removeAttribute(c.expando);delete f[d]}}});var kb=/z-?index|font-?weight|opacity|zoom|line-?height/i,Na=/alpha\([^)]*\)/,Oa=/opacity=([^)]*)/,ha=/float/i,ia=/-([a-z])/ig,lb=/([A-Z])/g,mb=/^-?\d+(?:px)?$/i,nb=/^-?\d/,ob={position:"absolute",visibility:"hidden",display:"block"},pb=["Left","Right"],qb=["Top","Bottom"],rb=s.defaultView&&s.defaultView.getComputedStyle,Pa=c.support.cssFloat?"cssFloat":"styleFloat",ja=
function(a,b){return b.toUpperCase()};c.fn.css=function(a,b){return X(this,a,b,true,function(d,f,e){if(e===w)return c.curCSS(d,f);if(typeof e==="number"&&!kb.test(f))e+="px";c.style(d,f,e)})};c.extend({style:function(a,b,d){if(!a||a.nodeType===3||a.nodeType===8)return w;if((b==="width"||b==="height")&&parseFloat(d)<0)d=w;var f=a.style||a,e=d!==w;if(!c.support.opacity&&b==="opacity"){if(e){f.zoom=1;b=parseInt(d,10)+""==="NaN"?"":"alpha(opacity="+d*100+")";a=f.filter||c.curCSS(a,"filter")||"";f.filter=
Na.test(a)?a.replace(Na,b):b}return f.filter&&f.filter.indexOf("opacity=")>=0?parseFloat(Oa.exec(f.filter)[1])/100+"":""}if(ha.test(b))b=Pa;b=b.replace(ia,ja);if(e)f[b]=d;return f[b]},css:function(a,b,d,f){if(b==="width"||b==="height"){var e,j=b==="width"?pb:qb;function i(){e=b==="width"?a.offsetWidth:a.offsetHeight;f!=="border"&&c.each(j,function(){f||(e-=parseFloat(c.curCSS(a,"padding"+this,true))||0);if(f==="margin")e+=parseFloat(c.curCSS(a,"margin"+this,true))||0;else e-=parseFloat(c.curCSS(a,
"border"+this+"Width",true))||0})}a.offsetWidth!==0?i():c.swap(a,ob,i);return Math.max(0,Math.round(e))}return c.curCSS(a,b,d)},curCSS:function(a,b,d){var f,e=a.style;if(!c.support.opacity&&b==="opacity"&&a.currentStyle){f=Oa.test(a.currentStyle.filter||"")?parseFloat(RegExp.$1)/100+"":"";return f===""?"1":f}if(ha.test(b))b=Pa;if(!d&&e&&e[b])f=e[b];else if(rb){if(ha.test(b))b="float";b=b.replace(lb,"-$1").toLowerCase();e=a.ownerDocument.defaultView;if(!e)return null;if(a=e.getComputedStyle(a,null))f=
a.getPropertyValue(b);if(b==="opacity"&&f==="")f="1"}else if(a.currentStyle){d=b.replace(ia,ja);f=a.currentStyle[b]||a.currentStyle[d];if(!mb.test(f)&&nb.test(f)){b=e.left;var j=a.runtimeStyle.left;a.runtimeStyle.left=a.currentStyle.left;e.left=d==="fontSize"?"1em":f||0;f=e.pixelLeft+"px";e.left=b;a.runtimeStyle.left=j}}return f},swap:function(a,b,d){var f={};for(var e in b){f[e]=a.style[e];a.style[e]=b[e]}d.call(a);for(e in b)a.style[e]=f[e]}});if(c.expr&&c.expr.filters){c.expr.filters.hidden=function(a){var b=
a.offsetWidth,d=a.offsetHeight,f=a.nodeName.toLowerCase()==="tr";return b===0&&d===0&&!f?true:b>0&&d>0&&!f?false:c.curCSS(a,"display")==="none"};c.expr.filters.visible=function(a){return!c.expr.filters.hidden(a)}}var sb=J(),tb=/<script(.|\s)*?\/script>/gi,ub=/select|textarea/i,vb=/color|date|datetime|email|hidden|month|number|password|range|search|tel|text|time|url|week/i,N=/=\?(&|$)/,ka=/\?/,wb=/(\?|&)_=.*?(&|$)/,xb=/^(\w+:)?\/\/([^\/?#]+)/,yb=/%20/g,zb=c.fn.load;c.fn.extend({load:function(a,b,d){if(typeof a!==
"string")return zb.call(this,a);else if(!this.length)return this;var f=a.indexOf(" ");if(f>=0){var e=a.slice(f,a.length);a=a.slice(0,f)}f="GET";if(b)if(c.isFunction(b)){d=b;b=null}else if(typeof b==="object"){b=c.param(b,c.ajaxSettings.traditional);f="POST"}var j=this;c.ajax({url:a,type:f,dataType:"html",data:b,complete:function(i,o){if(o==="success"||o==="notmodified")j.html(e?c("<div />").append(i.responseText.replace(tb,"")).find(e):i.responseText);d&&j.each(d,[i.responseText,o,i])}});return this},
serialize:function(){return c.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?c.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||ub.test(this.nodeName)||vb.test(this.type))}).map(function(a,b){a=c(this).val();return a==null?null:c.isArray(a)?c.map(a,function(d){return{name:b.name,value:d}}):{name:b.name,value:a}}).get()}});c.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),
function(a,b){c.fn[b]=function(d){return this.bind(b,d)}});c.extend({get:function(a,b,d,f){if(c.isFunction(b)){f=f||d;d=b;b=null}return c.ajax({type:"GET",url:a,data:b,success:d,dataType:f})},getScript:function(a,b){return c.get(a,null,b,"script")},getJSON:function(a,b,d){return c.get(a,b,d,"json")},post:function(a,b,d,f){if(c.isFunction(b)){f=f||d;d=b;b={}}return c.ajax({type:"POST",url:a,data:b,success:d,dataType:f})},ajaxSetup:function(a){c.extend(c.ajaxSettings,a)},ajaxSettings:{url:location.href,
global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:A.XMLHttpRequest&&(A.location.protocol!=="file:"||!A.ActiveXObject)?function(){return new A.XMLHttpRequest}:function(){try{return new A.ActiveXObject("Microsoft.XMLHTTP")}catch(a){}},accepts:{xml:"application/xml, text/xml",html:"text/html",script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},lastModified:{},etag:{},ajax:function(a){function b(){e.success&&
e.success.call(k,o,i,x);e.global&&f("ajaxSuccess",[x,e])}function d(){e.complete&&e.complete.call(k,x,i);e.global&&f("ajaxComplete",[x,e]);e.global&&!--c.active&&c.event.trigger("ajaxStop")}function f(q,p){(e.context?c(e.context):c.event).trigger(q,p)}var e=c.extend(true,{},c.ajaxSettings,a),j,i,o,k=a&&a.context||e,n=e.type.toUpperCase();if(e.data&&e.processData&&typeof e.data!=="string")e.data=c.param(e.data,e.traditional);if(e.dataType==="jsonp"){if(n==="GET")N.test(e.url)||(e.url+=(ka.test(e.url)?
"&":"?")+(e.jsonp||"callback")+"=?");else if(!e.data||!N.test(e.data))e.data=(e.data?e.data+"&":"")+(e.jsonp||"callback")+"=?";e.dataType="json"}if(e.dataType==="json"&&(e.data&&N.test(e.data)||N.test(e.url))){j=e.jsonpCallback||"jsonp"+sb++;if(e.data)e.data=(e.data+"").replace(N,"="+j+"$1");e.url=e.url.replace(N,"="+j+"$1");e.dataType="script";A[j]=A[j]||function(q){o=q;b();d();A[j]=w;try{delete A[j]}catch(p){}z&&z.removeChild(C)}}if(e.dataType==="script"&&e.cache===null)e.cache=false;if(e.cache===
false&&n==="GET"){var r=J(),u=e.url.replace(wb,"$1_="+r+"$2");e.url=u+(u===e.url?(ka.test(e.url)?"&":"?")+"_="+r:"")}if(e.data&&n==="GET")e.url+=(ka.test(e.url)?"&":"?")+e.data;e.global&&!c.active++&&c.event.trigger("ajaxStart");r=(r=xb.exec(e.url))&&(r[1]&&r[1]!==location.protocol||r[2]!==location.host);if(e.dataType==="script"&&n==="GET"&&r){var z=s.getElementsByTagName("head")[0]||s.documentElement,C=s.createElement("script");C.src=e.url;if(e.scriptCharset)C.charset=e.scriptCharset;if(!j){var B=
false;C.onload=C.onreadystatechange=function(){if(!B&&(!this.readyState||this.readyState==="loaded"||this.readyState==="complete")){B=true;b();d();C.onload=C.onreadystatechange=null;z&&C.parentNode&&z.removeChild(C)}}}z.insertBefore(C,z.firstChild);return w}var E=false,x=e.xhr();if(x){e.username?x.open(n,e.url,e.async,e.username,e.password):x.open(n,e.url,e.async);try{if(e.data||a&&a.contentType)x.setRequestHeader("Content-Type",e.contentType);if(e.ifModified){c.lastModified[e.url]&&x.setRequestHeader("If-Modified-Since",
c.lastModified[e.url]);c.etag[e.url]&&x.setRequestHeader("If-None-Match",c.etag[e.url])}r||x.setRequestHeader("X-Requested-With","XMLHttpRequest");x.setRequestHeader("Accept",e.dataType&&e.accepts[e.dataType]?e.accepts[e.dataType]+", */*":e.accepts._default)}catch(ga){}if(e.beforeSend&&e.beforeSend.call(k,x,e)===false){e.global&&!--c.active&&c.event.trigger("ajaxStop");x.abort();return false}e.global&&f("ajaxSend",[x,e]);var g=x.onreadystatechange=function(q){if(!x||x.readyState===0||q==="abort"){E||
d();E=true;if(x)x.onreadystatechange=c.noop}else if(!E&&x&&(x.readyState===4||q==="timeout")){E=true;x.onreadystatechange=c.noop;i=q==="timeout"?"timeout":!c.httpSuccess(x)?"error":e.ifModified&&c.httpNotModified(x,e.url)?"notmodified":"success";var p;if(i==="success")try{o=c.httpData(x,e.dataType,e)}catch(v){i="parsererror";p=v}if(i==="success"||i==="notmodified")j||b();else c.handleError(e,x,i,p);d();q==="timeout"&&x.abort();if(e.async)x=null}};try{var h=x.abort;x.abort=function(){x&&h.call(x);
g("abort")}}catch(l){}e.async&&e.timeout>0&&setTimeout(function(){x&&!E&&g("timeout")},e.timeout);try{x.send(n==="POST"||n==="PUT"||n==="DELETE"?e.data:null)}catch(m){c.handleError(e,x,null,m);d()}e.async||g();return x}},handleError:function(a,b,d,f){if(a.error)a.error.call(a.context||a,b,d,f);if(a.global)(a.context?c(a.context):c.event).trigger("ajaxError",[b,a,f])},active:0,httpSuccess:function(a){try{return!a.status&&location.protocol==="file:"||a.status>=200&&a.status<300||a.status===304||a.status===
1223||a.status===0}catch(b){}return false},httpNotModified:function(a,b){var d=a.getResponseHeader("Last-Modified"),f=a.getResponseHeader("Etag");if(d)c.lastModified[b]=d;if(f)c.etag[b]=f;return a.status===304||a.status===0},httpData:function(a,b,d){var f=a.getResponseHeader("content-type")||"",e=b==="xml"||!b&&f.indexOf("xml")>=0;a=e?a.responseXML:a.responseText;e&&a.documentElement.nodeName==="parsererror"&&c.error("parsererror");if(d&&d.dataFilter)a=d.dataFilter(a,b);if(typeof a==="string")if(b===
"json"||!b&&f.indexOf("json")>=0)a=c.parseJSON(a);else if(b==="script"||!b&&f.indexOf("javascript")>=0)c.globalEval(a);return a},param:function(a,b){function d(i,o){if(c.isArray(o))c.each(o,function(k,n){b||/\[\]$/.test(i)?f(i,n):d(i+"["+(typeof n==="object"||c.isArray(n)?k:"")+"]",n)});else!b&&o!=null&&typeof o==="object"?c.each(o,function(k,n){d(i+"["+k+"]",n)}):f(i,o)}function f(i,o){o=c.isFunction(o)?o():o;e[e.length]=encodeURIComponent(i)+"="+encodeURIComponent(o)}var e=[];if(b===w)b=c.ajaxSettings.traditional;
if(c.isArray(a)||a.jquery)c.each(a,function(){f(this.name,this.value)});else for(var j in a)d(j,a[j]);return e.join("&").replace(yb,"+")}});var la={},Ab=/toggle|show|hide/,Bb=/^([+-]=)?([\d+-.]+)(.*)$/,W,va=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];c.fn.extend({show:function(a,b){if(a||a===0)return this.animate(K("show",3),a,b);else{a=0;for(b=this.length;a<b;a++){var d=c.data(this[a],"olddisplay");
this[a].style.display=d||"";if(c.css(this[a],"display")==="none"){d=this[a].nodeName;var f;if(la[d])f=la[d];else{var e=c("<"+d+" />").appendTo("body");f=e.css("display");if(f==="none")f="block";e.remove();la[d]=f}c.data(this[a],"olddisplay",f)}}a=0;for(b=this.length;a<b;a++)this[a].style.display=c.data(this[a],"olddisplay")||"";return this}},hide:function(a,b){if(a||a===0)return this.animate(K("hide",3),a,b);else{a=0;for(b=this.length;a<b;a++){var d=c.data(this[a],"olddisplay");!d&&d!=="none"&&c.data(this[a],
"olddisplay",c.css(this[a],"display"))}a=0;for(b=this.length;a<b;a++)this[a].style.display="none";return this}},_toggle:c.fn.toggle,toggle:function(a,b){var d=typeof a==="boolean";if(c.isFunction(a)&&c.isFunction(b))this._toggle.apply(this,arguments);else a==null||d?this.each(function(){var f=d?a:c(this).is(":hidden");c(this)[f?"show":"hide"]()}):this.animate(K("toggle",3),a,b);return this},fadeTo:function(a,b,d){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,d)},
animate:function(a,b,d,f){var e=c.speed(b,d,f);if(c.isEmptyObject(a))return this.each(e.complete);return this[e.queue===false?"each":"queue"](function(){var j=c.extend({},e),i,o=this.nodeType===1&&c(this).is(":hidden"),k=this;for(i in a){var n=i.replace(ia,ja);if(i!==n){a[n]=a[i];delete a[i];i=n}if(a[i]==="hide"&&o||a[i]==="show"&&!o)return j.complete.call(this);if((i==="height"||i==="width")&&this.style){j.display=c.css(this,"display");j.overflow=this.style.overflow}if(c.isArray(a[i])){(j.specialEasing=
j.specialEasing||{})[i]=a[i][1];a[i]=a[i][0]}}if(j.overflow!=null)this.style.overflow="hidden";j.curAnim=c.extend({},a);c.each(a,function(r,u){var z=new c.fx(k,j,r);if(Ab.test(u))z[u==="toggle"?o?"show":"hide":u](a);else{var C=Bb.exec(u),B=z.cur(true)||0;if(C){u=parseFloat(C[2]);var E=C[3]||"px";if(E!=="px"){k.style[r]=(u||1)+E;B=(u||1)/z.cur(true)*B;k.style[r]=B+E}if(C[1])u=(C[1]==="-="?-1:1)*u+B;z.custom(B,u,E)}else z.custom(B,u,"")}});return true})},stop:function(a,b){var d=c.timers;a&&this.queue([]);
this.each(function(){for(var f=d.length-1;f>=0;f--)if(d[f].elem===this){b&&d[f](true);d.splice(f,1)}});b||this.dequeue();return this}});c.each({slideDown:K("show",1),slideUp:K("hide",1),slideToggle:K("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"}},function(a,b){c.fn[a]=function(d,f){return this.animate(b,d,f)}});c.extend({speed:function(a,b,d){var f=a&&typeof a==="object"?a:{complete:d||!d&&b||c.isFunction(a)&&a,duration:a,easing:d&&b||b&&!c.isFunction(b)&&b};f.duration=c.fx.off?0:typeof f.duration===
"number"?f.duration:c.fx.speeds[f.duration]||c.fx.speeds._default;f.old=f.complete;f.complete=function(){f.queue!==false&&c(this).dequeue();c.isFunction(f.old)&&f.old.call(this)};return f},easing:{linear:function(a,b,d,f){return d+f*a},swing:function(a,b,d,f){return(-Math.cos(a*Math.PI)/2+0.5)*f+d}},timers:[],fx:function(a,b,d){this.options=b;this.elem=a;this.prop=d;if(!b.orig)b.orig={}}});c.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this);(c.fx.step[this.prop]||
c.fx.step._default)(this);if((this.prop==="height"||this.prop==="width")&&this.elem.style)this.elem.style.display="block"},cur:function(a){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];return(a=parseFloat(c.css(this.elem,this.prop,a)))&&a>-10000?a:parseFloat(c.curCSS(this.elem,this.prop))||0},custom:function(a,b,d){function f(j){return e.step(j)}this.startTime=J();this.start=a;this.end=b;this.unit=d||this.unit||"px";this.now=this.start;
this.pos=this.state=0;var e=this;f.elem=this.elem;if(f()&&c.timers.push(f)&&!W)W=setInterval(c.fx.tick,13)},show:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.show=true;this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur());c(this.elem).show()},hide:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.hide=true;this.custom(this.cur(),0)},step:function(a){var b=J(),d=true;if(a||b>=this.options.duration+this.startTime){this.now=
this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;for(var f in this.options.curAnim)if(this.options.curAnim[f]!==true)d=false;if(d){if(this.options.display!=null){this.elem.style.overflow=this.options.overflow;a=c.data(this.elem,"olddisplay");this.elem.style.display=a?a:this.options.display;if(c.css(this.elem,"display")==="none")this.elem.style.display="block"}this.options.hide&&c(this.elem).hide();if(this.options.hide||this.options.show)for(var e in this.options.curAnim)c.style(this.elem,
e,this.options.orig[e]);this.options.complete.call(this.elem)}return false}else{e=b-this.startTime;this.state=e/this.options.duration;a=this.options.easing||(c.easing.swing?"swing":"linear");this.pos=c.easing[this.options.specialEasing&&this.options.specialEasing[this.prop]||a](this.state,e,0,1,this.options.duration);this.now=this.start+(this.end-this.start)*this.pos;this.update()}return true}};c.extend(c.fx,{tick:function(){for(var a=c.timers,b=0;b<a.length;b++)a[b]()||a.splice(b--,1);a.length||
c.fx.stop()},stop:function(){clearInterval(W);W=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){c.style(a.elem,"opacity",a.now)},_default:function(a){if(a.elem.style&&a.elem.style[a.prop]!=null)a.elem.style[a.prop]=(a.prop==="width"||a.prop==="height"?Math.max(0,a.now):a.now)+a.unit;else a.elem[a.prop]=a.now}}});if(c.expr&&c.expr.filters)c.expr.filters.animated=function(a){return c.grep(c.timers,function(b){return a===b.elem}).length};c.fn.offset="getBoundingClientRect"in s.documentElement?
function(a){var b=this[0];if(a)return this.each(function(e){c.offset.setOffset(this,a,e)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);var d=b.getBoundingClientRect(),f=b.ownerDocument;b=f.body;f=f.documentElement;return{top:d.top+(self.pageYOffset||c.support.boxModel&&f.scrollTop||b.scrollTop)-(f.clientTop||b.clientTop||0),left:d.left+(self.pageXOffset||c.support.boxModel&&f.scrollLeft||b.scrollLeft)-(f.clientLeft||b.clientLeft||0)}}:function(a){var b=
this[0];if(a)return this.each(function(r){c.offset.setOffset(this,a,r)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);c.offset.initialize();var d=b.offsetParent,f=b,e=b.ownerDocument,j,i=e.documentElement,o=e.body;f=(e=e.defaultView)?e.getComputedStyle(b,null):b.currentStyle;for(var k=b.offsetTop,n=b.offsetLeft;(b=b.parentNode)&&b!==o&&b!==i;){if(c.offset.supportsFixedPosition&&f.position==="fixed")break;j=e?e.getComputedStyle(b,null):b.currentStyle;
k-=b.scrollTop;n-=b.scrollLeft;if(b===d){k+=b.offsetTop;n+=b.offsetLeft;if(c.offset.doesNotAddBorder&&!(c.offset.doesAddBorderForTableAndCells&&/^t(able|d|h)$/i.test(b.nodeName))){k+=parseFloat(j.borderTopWidth)||0;n+=parseFloat(j.borderLeftWidth)||0}f=d;d=b.offsetParent}if(c.offset.subtractsBorderForOverflowNotVisible&&j.overflow!=="visible"){k+=parseFloat(j.borderTopWidth)||0;n+=parseFloat(j.borderLeftWidth)||0}f=j}if(f.position==="relative"||f.position==="static"){k+=o.offsetTop;n+=o.offsetLeft}if(c.offset.supportsFixedPosition&&
f.position==="fixed"){k+=Math.max(i.scrollTop,o.scrollTop);n+=Math.max(i.scrollLeft,o.scrollLeft)}return{top:k,left:n}};c.offset={initialize:function(){var a=s.body,b=s.createElement("div"),d,f,e,j=parseFloat(c.curCSS(a,"marginTop",true))||0;c.extend(b.style,{position:"absolute",top:0,left:0,margin:0,border:0,width:"1px",height:"1px",visibility:"hidden"});b.innerHTML="<div style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;'><div></div></div><table style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;' cellpadding='0' cellspacing='0'><tr><td></td></tr></table>";
a.insertBefore(b,a.firstChild);d=b.firstChild;f=d.firstChild;e=d.nextSibling.firstChild.firstChild;this.doesNotAddBorder=f.offsetTop!==5;this.doesAddBorderForTableAndCells=e.offsetTop===5;f.style.position="fixed";f.style.top="20px";this.supportsFixedPosition=f.offsetTop===20||f.offsetTop===15;f.style.position=f.style.top="";d.style.overflow="hidden";d.style.position="relative";this.subtractsBorderForOverflowNotVisible=f.offsetTop===-5;this.doesNotIncludeMarginInBodyOffset=a.offsetTop!==j;a.removeChild(b);
c.offset.initialize=c.noop},bodyOffset:function(a){var b=a.offsetTop,d=a.offsetLeft;c.offset.initialize();if(c.offset.doesNotIncludeMarginInBodyOffset){b+=parseFloat(c.curCSS(a,"marginTop",true))||0;d+=parseFloat(c.curCSS(a,"marginLeft",true))||0}return{top:b,left:d}},setOffset:function(a,b,d){if(/static/.test(c.curCSS(a,"position")))a.style.position="relative";var f=c(a),e=f.offset(),j=parseInt(c.curCSS(a,"top",true),10)||0,i=parseInt(c.curCSS(a,"left",true),10)||0;if(c.isFunction(b))b=b.call(a,
d,e);d={top:b.top-e.top+j,left:b.left-e.left+i};"using"in b?b.using.call(a,d):f.css(d)}};c.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),d=this.offset(),f=/^body|html$/i.test(b[0].nodeName)?{top:0,left:0}:b.offset();d.top-=parseFloat(c.curCSS(a,"marginTop",true))||0;d.left-=parseFloat(c.curCSS(a,"marginLeft",true))||0;f.top+=parseFloat(c.curCSS(b[0],"borderTopWidth",true))||0;f.left+=parseFloat(c.curCSS(b[0],"borderLeftWidth",true))||0;return{top:d.top-
f.top,left:d.left-f.left}},offsetParent:function(){return this.map(function(){for(var a=this.offsetParent||s.body;a&&!/^body|html$/i.test(a.nodeName)&&c.css(a,"position")==="static";)a=a.offsetParent;return a})}});c.each(["Left","Top"],function(a,b){var d="scroll"+b;c.fn[d]=function(f){var e=this[0],j;if(!e)return null;if(f!==w)return this.each(function(){if(j=wa(this))j.scrollTo(!a?f:c(j).scrollLeft(),a?f:c(j).scrollTop());else this[d]=f});else return(j=wa(e))?"pageXOffset"in j?j[a?"pageYOffset":
"pageXOffset"]:c.support.boxModel&&j.document.documentElement[d]||j.document.body[d]:e[d]}});c.each(["Height","Width"],function(a,b){var d=b.toLowerCase();c.fn["inner"+b]=function(){return this[0]?c.css(this[0],d,false,"padding"):null};c.fn["outer"+b]=function(f){return this[0]?c.css(this[0],d,false,f?"margin":"border"):null};c.fn[d]=function(f){var e=this[0];if(!e)return f==null?null:this;if(c.isFunction(f))return this.each(function(j){var i=c(this);i[d](f.call(this,j,i[d]()))});return"scrollTo"in
e&&e.document?e.document.compatMode==="CSS1Compat"&&e.document.documentElement["client"+b]||e.document.body["client"+b]:e.nodeType===9?Math.max(e.documentElement["client"+b],e.body["scroll"+b],e.documentElement["scroll"+b],e.body["offset"+b],e.documentElement["offset"+b]):f===w?c.css(e,d):this.css(d,typeof f==="string"?f:f+"px")}});A.jQuery=A.$=c})(window); | PypiClean |
/flare-capa-6.1.0.tar.gz/flare-capa-6.1.0/scripts/capa2yara.py | import re
import sys
import string
import logging
import argparse
import datetime
import itertools
from pathlib import Path
import capa.main
import capa.rules
import capa.engine
import capa.features
import capa.features.insn
logger = logging.getLogger("capa2yara")
today = str(datetime.date.today())
# create unique variable names for each rule in case somebody wants to move/copy stuff around later
var_names = ["".join(letters) for letters in itertools.product(string.ascii_lowercase, repeat=3)]
# this have to be the internal names used by capa.py which are sometimes different to the ones written out in the rules, e.g. "2 or more" is "Some", count is Range
unsupported = ["characteristic", "mnemonic", "offset", "subscope", "Range"]
# further idea: shorten this list, possible stuff:
# - 2 or more strings: e.g.
# -- https://github.com/mandiant/capa-rules/blob/master/collection/file-managers/gather-direct-ftp-information.yml
# -- https://github.com/mandiant/capa-rules/blob/master/collection/browser/gather-firefox-profile-information.yml
# - count(string (1 rule: /executable/subfile/pe/contain-an-embedded-pe-file.yml)
# - count(match( could be done by creating the referenced rule a 2nd time with the condition, that it hits x times
# (only 1 rule: ./anti-analysis/anti-disasm/contain-anti-disasm-techniques.yml)
# - it would be technically possible to get the "basic blocks" working, but the rules contain mostly other non supported statements in there => not worth the effort.
# collect all converted rules to be able to check if we have needed sub rules for match:
converted_rules = []
default_tags = "CAPA "
# minimum number of rounds to do be able to convert rules which depend on referenced rules in several levels of depth
min_rounds = 5
unsupported_capa_rules = Path("unsupported_capa_rules.yml").open("wb")
unsupported_capa_rules_names = Path("unsupported_capa_rules.txt").open("wb")
unsupported_capa_rules_list = []
condition_header = """
capa_pe_file and
"""
condition_rule = """
private rule capa_pe_file : CAPA {
meta:
description = "match in PE files. used by all further CAPA rules"
author = "Arnim Rupp"
condition:
uint16be(0) == 0x4d5a
or uint16be(0) == 0x558b
or uint16be(0) == 0x5649
}
"""
def check_feature(statement, rulename):
if statement in unsupported:
logger.info("unsupported: %s in rule: %s", statement, rulename)
return True
else:
return False
def get_rule_url(path):
path = re.sub(r"\.\.\/", "", path)
path = re.sub(r"capa-rules\/", "", path)
return "https://github.com/mandiant/capa-rules/blob/master/" + path
def convert_capa_number_to_yara_bytes(number):
if not number.startswith("0x"):
print("TODO: fix decimal")
sys.exit()
number = re.sub(r"^0[xX]", "", number)
logger.info("number ok: %r", number)
# include spaces every 2 hex
bytesv = re.sub(r"(..)", r"\1 ", number)
# reverse order
bytesl = bytesv.split(" ")
bytesl.reverse()
bytesv = " ".join(bytesl)
# fix spaces
bytesv = bytesv[1:] + " "
return bytesv
def convert_rule_name(rule_name):
# yara rule names: "Identifiers must follow the same lexical conventions of the C programming language, they can contain any alphanumeric character and the underscore character
# but the first character cannot be a digit. Rule identifiers are case sensitive and cannot exceed 128 characters." so we replace any non-alphanum with _
rule_name = re.sub(r"\W", "_", rule_name)
rule_name = "capa_" + rule_name
return rule_name
def convert_description(statement):
try:
desc = statement.description
if desc:
yara_desc = " // " + desc
logger.info("using desc: %r", yara_desc)
return yara_desc
except Exception:
# no description
pass
return ""
def convert_rule(rule, rulename, cround, depth):
depth += 1
logger.info("recursion depth: %d", depth)
global var_names
def do_statement(s_type, kid):
yara_strings = ""
yara_condition = ""
if check_feature(s_type, rulename):
return "BREAK", s_type
elif s_type == "string":
string = kid.value
logger.info("doing string: %r", string)
string = string.replace("\\", "\\\\")
string = string.replace("\n", "\\n")
string = string.replace("\t", "\\t")
var_name = "str_" + var_names.pop(0)
yara_strings += "\t$" + var_name + ' = "' + string + '" ascii wide' + convert_description(kid) + "\n"
yara_condition += "\t$" + var_name + " "
elif s_type == "api" or s_type == "import":
# research needed to decide if its possible in YARA to make a difference between api & import?
# https://github.com/mandiant/capa-rules/blob/master/doc/format.md#api
api = kid.value
logger.info("doing api: %r", api)
# e.g. kernel32.CreateNamedPipe => look for kernel32.dll and CreateNamedPipe
#
# note: the handling of .NET API calls could be improved here.
# once we have a motivation and some examples, lets do that.
if "::" in api:
mod, api = api.split("::")
var_name = "api_" + var_names.pop(0)
yara_strings += "\t$" + var_name + " = /\\b" + api + "(A|W)?\\b/ ascii wide\n"
yara_condition += "\t$" + var_name + " "
elif api.count(".") == 1:
dll, api = api.split(".")
# usage of regex is needed and /i because string search for "CreateMutex" in imports() doesn't look for e.g. CreateMutexA
yara_condition += "\tpe.imports(/" + dll + "/i, /" + api + "/) "
else:
# e.g. - api: 'CallNextHookEx'
# (from user32.dll)
# even looking for empty string in dll_regex doesn't work for some files (list below) with pe.imports so do just a string search
# yara_condition += '\tpe.imports(/.{0,30}/i, /' + api + '/) '
# 5fbbfeed28b258c42e0cfeb16718b31c, 2D3EDC218A90F03089CC01715A9F047F, 7EFF498DE13CC734262F87E6B3EF38AB,
# C91887D861D9BD4A5872249B641BC9F9, a70052c45e907820187c7e6bcdc7ecca, 0596C4EA5AA8DEF47F22C85D75AACA95
var_name = "api_" + var_names.pop(0)
# limit regex with word boundary \b but also search for appended A and W
# alternatively: use something like /(\\x00|\\x01|\\x02|\\x03|\\x04)' + api + '(A|W)?\\x00/ ???
yara_strings += "\t$" + var_name + " = /\\b" + api + "(A|W)?\\b/ ascii wide\n"
yara_condition += "\t$" + var_name + " "
elif s_type == "export":
export = kid.value
logger.info("doing export: %r", export)
yara_condition += '\tpe.exports("' + export + '") '
elif s_type == "section":
# https://github.com/mandiant/capa-rules/blob/master/doc/format.md#section
section = kid.value
logger.info("doing section: %r", section)
# e.g. - section: .rsrc
var_name_sec = var_names.pop(0)
# yeah, it would be better to make one loop out of multiple sections but we're in POC-land (and I guess it's not much of a performance hit, loop over short array?)
yara_condition += (
"\tfor any " + var_name_sec + " in pe.sections : ( " + var_name_sec + '.name == "' + section + '" ) '
)
elif s_type == "match":
# https://github.com/mandiant/capa-rules/blob/master/doc/format.md#matching-prior-rule-matches-and-namespaces
match = kid.value
logger.info("doing match: %r", match)
# e.g. - match: create process
# - match: host-interaction/file-system/write
match_rule_name = convert_rule_name(match)
if match.startswith(rulename + "/"):
logger.info("Depending on myself = basic block: %s", match)
return "BREAK", "Depending on myself = basic block"
if match_rule_name in converted_rules:
yara_condition += "\t" + match_rule_name + "\n"
else:
# don't complain in the early rounds as there should be 3+ rounds (if all rules are converted)
if cround > min_rounds - 2:
logger.info("needed sub-rule not converted (yet, maybe in next round): %r", match)
return "BREAK", "needed sub-rule not converted"
else:
return "BREAK", "NOLOG"
elif s_type == "bytes":
bytesv = kid.get_value_str()
logger.info("doing bytes: %r", bytesv)
var_name = var_names.pop(0)
yara_strings += "\t$" + var_name + " = { " + bytesv + " }" + convert_description(kid) + "\n"
yara_condition += "\t$" + var_name + " "
elif s_type == "number":
number = kid.get_value_str()
logger.info("doing number: %r", number)
if len(number) < 10:
logger.info("too short for byte search (until I figure out how to do it properly): %r", number)
return "BREAK", "Number too short"
# there's just one rule which contains 0xFFFFFFF but yara gives a warning if if used
if number == "0xFFFFFFFF":
return "BREAK", "slow byte pattern for YARA search"
logger.info("number ok: %r", number)
number = convert_capa_number_to_yara_bytes(number)
logger.info("number ok: %r", number)
var_name = "num_" + var_names.pop(0)
yara_strings += "\t$" + var_name + " = { " + number + "}" + convert_description(kid) + "\n"
yara_condition += "$" + var_name + " "
elif s_type == "regex":
regex = kid.get_value_str()
logger.info("doing regex: %r", regex)
# change capas /xxx/i to yaras /xxx/ nocase, count will be used later to decide appending 'nocase'
regex, count = re.subn(r"/i$", "/", regex)
# remove / in the beginning and end
regex = regex[1:-1]
# all .* in the regexes of capa look like they should be maximum 100 chars so take 1000 to speed up rules and prevent yara warnings on poor performance
regex = regex.replace(".*", ".{,1000}")
# strange: capa accepts regexes with unescaped /
# like - string: /com/exe4j/runtime/exe4jcontroller/i in capa-rules/compiler/exe4j/compiled-with-exe4j.yml, needs a fix for yara:
# would assume that get_value_str() gives the raw string
regex = re.sub(r"(?<!\\)/", r"\/", regex)
# capa uses python regex which accepts /reg(|.exe)/ but yaras regex engine doesn't not => fix it
# /reg(|.exe)/ => /reg(.exe)?/
regex = re.sub(r"\(\|([^\)]+)\)", r"(\1)?", regex)
# change beginning of line to null byte, e.g. /^open => /\x00open
# (not word boundary because we're not looking for the beginning of a word in a text but usually a function name if there's ^ in a capa rule)
regex = re.sub(r"^\^", r"\\x00", regex)
# regex = re.sub(r"^\^", r"\\b", regex)
regex = "/" + regex + "/"
if count:
regex += " nocase"
# strange: if statement.name == "string", the string is as it is, if statement.name == "regex", the string has // around it, e.g. /regex/
var_name = "re_" + var_names.pop(0)
yara_strings += "\t" + "$" + var_name + " = " + regex + " ascii wide " + convert_description(kid) + "\n"
yara_condition += "\t" + "$" + var_name + " "
elif s_type == "Not" or s_type == "And" or s_type == "Or":
pass
else:
logger.info("something unhandled: %r", s_type)
sys.exit()
return yara_strings, yara_condition
# end: def do_statement
yara_strings_list = []
yara_condition_list = []
rule_comment = ""
incomplete = 0
statement = rule.name
logger.info("doing statement: %s", statement)
if check_feature(statement, rulename):
return "BREAK", statement, rule_comment, incomplete
if statement == "And" or statement == "Or":
desc = convert_description(rule)
if desc:
logger.info("description of bool statement: %r", desc)
yara_strings_list.append("\t" * depth + desc + "\n")
elif statement == "Not":
logger.info("one of those seldom nots: %s", rule.name)
# check for nested statements
try:
kids = rule.children
num_kids = len(kids)
logger.info("kids: %s", kids)
except Exception:
logger.info("no kids in rule: %s", rule.name)
try:
# maybe it's "Not" = only one child:
kid = rule.child
kids = [kid]
num_kids = 1
logger.info("kid: %s", kids)
except Exception:
logger.info("no kid in rule: %s", rule.name)
# just a single statement without 'and' or 'or' before it in this rule
if "kids" not in locals().keys():
logger.info("no kids: %s", rule.name)
yara_strings_sub, yara_condition_sub = do_statement(statement, rule)
if yara_strings_sub == "BREAK":
logger.info("Unknown feature at1: %s", rule.name)
return "BREAK", yara_condition_sub, rule_comment, incomplete
yara_strings_list.append(yara_strings_sub)
yara_condition_list.append(yara_condition_sub)
else:
x = 0
logger.info("doing kids: %r - len: %d", kids, num_kids)
for kid in kids:
s_type = kid.name
logger.info("doing type: %s kidnum: %d", s_type, x)
if s_type == "Some":
cmin = kid.count
logger.info("Some type with minimum: %d", cmin)
if not cmin:
logger.info("this is optional: which means, we can just ignore it")
x += 1
continue
elif statement == "Or":
logger.info("we're inside an OR, we can just ignore it")
x += 1
continue
else:
# this is "x or more". could be coded for strings TODO
return "BREAK", "Some aka x or more (TODO)", rule_comment, incomplete
if s_type == "And" or s_type == "Or" or s_type == "Not" and kid.name != "Some":
logger.info("doing bool with recursion: %r", kid)
logger.info("kid coming: %r", kid.name)
# logger.info("grandchildren: " + repr(kid.children))
#
# here we go into RECURSION
#
yara_strings_sub, yara_condition_sub, rule_comment_sub, incomplete_sub = convert_rule(
kid, rulename, cround, depth
)
logger.info("coming out of this recursion, depth: %d s_type: %s", depth, s_type)
if yara_strings_sub == "BREAK":
logger.info(
"Unknown feature at2: %s - s_type: %s - depth: %d",
rule.name,
s_type,
depth,
)
# luckily this is only a killer, if we're inside an 'And', inside 'Or' we're just missing some coverage
# only accept incomplete rules in rounds > 3 because the reason might be a reference to another rule not converted yet because of missing dependencies
logger.info("rule.name, depth, cround: %s, %d, %d", rule.name, depth, cround)
if rule.name == "Or" and depth == 1 and cround > min_rounds - 1:
logger.info(
"Unknown feature, just ignore this branch and keep the rest bec we're in Or (1): %s - depth: %s",
s_type,
depth,
)
# remove last 'or'
# yara_condition = re.sub(r'\sor $', ' ', yara_condition)
rule_comment += "This rule is incomplete because a branch inside an Or-statement had an unsupported feature and was skipped "
rule_comment += "=> coverage is reduced compared to the original capa rule. "
x += 1
incomplete = 1
continue
else:
return "BREAK", yara_condition_sub, rule_comment, incomplete
rule_comment += rule_comment_sub
yara_strings_list.append(yara_strings_sub)
yara_condition_list.append(yara_condition_sub)
incomplete = incomplete or incomplete_sub
yara_strings_sub, yara_condition_sub = do_statement(s_type, kid)
if yara_strings_sub == "BREAK":
logger.info("Unknown feature at3: %s", rule.name)
logger.info("rule.name, depth, cround: %s, %d, %d", rule.name, depth, cround)
if rule.name == "Or" and depth == 1 and cround > min_rounds - 1:
logger.info(
"Unknown feature, just ignore this branch and keep the rest bec we're in Or (2): %s - depth: %d",
s_type,
depth,
)
rule_comment += "This rule is incomplete because a branch inside an Or-statement had an unsupported feature and was skipped"
rule_comment += "=> coverage is reduced compared to the original capa rule. "
x += 1
incomplete = 1
continue
else:
return "BREAK", yara_condition_sub, rule_comment, incomplete
# don't append And or Or if we got no condition back from this kid from e.g. match in myself or unsupported feature inside 'Or'
if not yara_condition_sub:
continue
yara_strings_list.append(yara_strings_sub)
yara_condition_list.append(yara_condition_sub)
x += 1
# this might happen, if all conditions are inside "or" and none of them was supported
if not yara_condition_list:
return (
"BREAK",
'Multiple statements inside "- or:" where all unsupported, the last one was "' + s_type + '"',
rule_comment,
incomplete,
)
if statement == "And" or statement == "Or":
if yara_strings_list:
yara_strings = "".join(yara_strings_list)
else:
yara_strings = ""
yara_condition = " (\n\t\t" + ("\n\t\t" + statement.lower() + " ").join(yara_condition_list) + " \n\t) "
elif statement == "Some":
cmin = rule.count
logger.info("Some type with minimum at2: %d", cmin)
if not cmin:
logger.info("this is optional: which means, we can just ignore it")
else:
# this is "x or more". could be coded for strings TODO
return "BREAK", "Some aka x or more (TODO)", rule_comment, incomplete
elif statement == "Not":
logger.info("Not")
yara_strings = "".join(yara_strings_list)
yara_condition = "not " + "".join(yara_condition_list) + " "
else:
if len(yara_condition_list) != 1:
logger.info("something wrong around here %r - %s", yara_condition_list, statement)
sys.exit()
# strings might be empty with only conditions
if yara_strings_list:
yara_strings = "\n\t" + yara_strings_list[0]
yara_condition = "\n\t" + yara_condition_list[0]
logger.info(
"# end of convert_rule() #strings: %d #conditions: %d", len(yara_strings_list), len(yara_condition_list)
)
logger.info("strings: %s conditions: %s", yara_strings, yara_condition)
return yara_strings, yara_condition, rule_comment, incomplete
def output_yar(yara):
print(yara + "\n")
def output_unsupported_capa_rules(yaml, capa_rulename, url, reason):
if reason != "NOLOG":
if capa_rulename not in unsupported_capa_rules_list:
logger.info("unsupported: %s - reason: %s, - url: %s", capa_rulename, reason, url)
unsupported_capa_rules_list.append(capa_rulename)
unsupported_capa_rules.write(yaml.encode("utf-8") + b"\n")
unsupported_capa_rules.write(
(
"Reason: "
+ reason
+ " (there might be multiple unsupported things in this rule, this is the 1st one encountered)"
).encode("utf-8")
+ b"\n"
)
unsupported_capa_rules.write(url.encode("utf-8") + b"\n----------------------------------------------\n")
unsupported_capa_rules_names.write(capa_rulename.encode("utf-8") + b":")
unsupported_capa_rules_names.write(reason.encode("utf-8") + b":")
unsupported_capa_rules_names.write(url.encode("utf-8") + b"\n")
def convert_rules(rules, namespaces, cround, make_priv):
count_incomplete = 0
for rule in rules.rules.values():
rule_name = convert_rule_name(rule.name)
if rule.is_subscope_rule():
logger.info("skipping sub scope rule capa: %s", rule.name)
continue
if rule_name in converted_rules:
logger.info("skipping already converted rule capa: %s - yara rule: %s", rule.name, rule_name)
continue
logger.info("-------------------------- DOING RULE CAPA: %s - yara rule: ", rule.name, rule_name)
if "capa/path" in rule.meta:
url = get_rule_url(rule.meta["capa/path"])
else:
url = "no url"
logger.info("URL: %s", url)
logger.info("statements: %r", rule.statement)
# don't really know what that passed empty string is good for :)
dependencies = rule.get_dependencies(namespaces)
if len(dependencies):
logger.info("Dependencies at4: %s - dep: %s", rule.name, dependencies)
for dep in dependencies:
logger.info("Dependencies at44: %s", dep)
if not dep.startswith(rule.name + "/"):
logger.info("Depending on another rule: %s", dep)
continue
yara_strings, yara_condition, rule_comment, incomplete = convert_rule(rule.statement, rule.name, cround, 0)
if yara_strings == "BREAK":
# only give up if in final extra round #9000
if cround == 9000:
output_unsupported_capa_rules(rule.to_yaml(), rule.name, url, yara_condition)
logger.info("Unknown feature at5: %s", rule.name)
else:
yara_meta = ""
metas = rule.meta
rule_tags = ""
for meta in metas:
meta_name = meta
# e.g. 'examples:' can be a list
seen_hashes = []
if isinstance(metas[meta], list):
if meta_name == "examples":
meta_name = "hash"
if meta_name == "att&ck":
meta_name = "attack"
for attack in list(metas[meta]):
logger.info("attack: %s", attack)
# cut out tag in square brackets, e.g. Defense Evasion::Obfuscated Files or Information [T1027] => T1027
r = re.search(r"\[(T[^\]]*)", attack)
if r:
tag = r.group(1)
logger.info("attack tag: %s", tag)
tag = re.sub(r"\W", "_", tag)
rule_tags += tag + " "
# also add a line "attack = ..." to yaras 'meta:' to keep the long description:
yara_meta += '\tattack = "' + attack + '"\n'
elif meta_name == "mbc":
for mbc in list(metas[meta]):
logger.info("mbc: %s", mbc)
# cut out tag in square brackets, e.g. Cryptography::Encrypt Data::RC6 [C0027.010] => C0027.010
r = re.search(r"\[(.[^\]]*)", mbc)
if r:
tag = r.group(1)
logger.info("mbc tag: %s", tag)
tag = re.sub(r"\W", "_", tag)
rule_tags += tag + " "
# also add a line "mbc = ..." to yaras 'meta:' to keep the long description:
yara_meta += '\tmbc = "' + mbc + '"\n'
for value in metas[meta]:
if meta_name == "hash":
value = re.sub(r"^([0-9a-f]{20,64}):0x[0-9a-f]{1,10}$", r"\1", value, flags=re.IGNORECASE)
# examples in capa can contain the same hash several times with different offset, so check if it's already there:
# (keeping the offset might be interesting for some but breaks yara-ci for checking of the final rules
if value not in seen_hashes:
yara_meta += "\t" + meta_name + ' = "' + value + '"\n'
seen_hashes.append(value)
else:
# no list:
if meta == "capa/path":
url = get_rule_url(metas[meta])
meta_name = "reference"
meta_value = "This YARA rule converted from capa rule: " + url
else:
meta_value = metas[meta]
if meta_name == "name":
meta_name = "description"
meta_value += " (converted from capa rule)"
elif meta_name == "lib":
meta_value = str(meta_value)
elif meta_name == "capa/nursery":
meta_name = "capa_nursery"
meta_value = str(meta_value)
# for the rest of the maec/malware-category names:
meta_name = re.sub(r"\W", "_", meta_name)
if meta_name and meta_value:
yara_meta += "\t" + meta_name + ' = "' + meta_value + '"\n'
if rule_comment:
yara_meta += '\tcomment = "' + rule_comment + '"\n'
yara_meta += '\tdate = "' + today + '"\n'
yara_meta += '\tminimum_yara = "3.8"\n'
yara_meta += '\tlicense = "Apache-2.0 License"\n'
# check if there's some beef in condition:
tmp_yc = re.sub(r"(and|or|not)", "", yara_condition)
if re.search(r"\w", tmp_yc):
yara = ""
if make_priv:
yara = "private "
# put yara rule tags here:
rule_tags = default_tags + rule_tags
yara += "rule " + rule_name + " : " + rule_tags + " { \n meta: \n " + yara_meta + "\n"
if "$" in yara_strings:
yara += " strings: \n " + yara_strings + " \n"
yara += " condition:" + condition_header + yara_condition + "\n}"
output_yar(yara)
converted_rules.append(rule_name)
count_incomplete += incomplete
else:
output_unsupported_capa_rules(rule.to_yaml(), rule.name, url, yara_condition)
pass
return count_incomplete
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(description="Capa to YARA rule converter")
parser.add_argument("rules", type=str, help="Path to rules")
parser.add_argument("--private", "-p", action="store_true", help="Create private rules", default=False)
capa.main.install_common_args(parser, wanted={"tag"})
args = parser.parse_args(args=argv)
make_priv = args.private
if args.verbose:
level = logging.DEBUG
elif args.quiet:
level = logging.ERROR
else:
level = logging.INFO
logging.basicConfig(level=level)
logging.getLogger("capa2yara").setLevel(level)
try:
rules = capa.main.get_rules([Path(args.rules)])
namespaces = capa.rules.index_rules_by_namespace(list(rules.rules.values()))
logger.info("successfully loaded %d rules (including subscope rules which will be ignored)", len(rules))
if args.tag:
rules = rules.filter_rules_by_meta(args.tag)
logger.debug("selected %d rules", len(rules))
for i, r in enumerate(rules.rules, 1):
logger.debug(" %d. %s", i, r)
except (IOError, capa.rules.InvalidRule, capa.rules.InvalidRuleSet) as e:
logger.error("%s", str(e))
return -1
output_yar(
"// Rules from Mandiant's https://github.com/mandiant/capa-rules converted to YARA using https://github.com/mandiant/capa/blob/master/scripts/capa2yara.py by Arnim Rupp"
)
output_yar(
"// Beware: These are less rules than capa (because not all fit into YARA, stats at EOF) and is less precise e.g. capas function scopes are applied to the whole file"
)
output_yar(
'// Beware: Some rules are incomplete because an optional branch was not supported by YARA. These rules are marked in a comment in meta: (search for "incomplete")'
)
output_yar("// Rule authors and license stay the same")
output_yar(
'// att&ck and MBC tags are put into YARA rule tags. All rules are tagged with "CAPA" for easy filtering'
)
output_yar("// The date = in meta: is the date of converting (there is no date in capa rules)")
output_yar("// Minimum YARA version is 3.8.0 plus PE module")
output_yar('\nimport "pe"')
output_yar(condition_rule)
# do several rounds of converting rules because some rules for match: might not be converted in the 1st run
num_rules = 9999999
cround = 0
count_incomplete = 0
while num_rules != len(converted_rules) or cround < min_rounds:
cround += 1
logger.info("doing convert_rules(), round: %d", cround)
num_rules = len(converted_rules)
count_incomplete += convert_rules(rules, namespaces, cround, make_priv)
# one last round to collect all unconverted rules
count_incomplete += convert_rules(rules, namespaces, 9000, make_priv)
stats = "\n// converted rules : " + str(len(converted_rules))
stats += "\n// among those are incomplete : " + str(count_incomplete)
stats += "\n// unconverted rules : " + str(len(unsupported_capa_rules_list)) + "\n"
logger.info("%s", stats)
output_yar(stats)
return 0
if __name__ == "__main__":
sys.exit(main()) | PypiClean |
/echo-server-0.2.0.tar.gz/echo-server-0.2.0/echo_server.py | import os
import time
import click
import threading
try:
from socketserver import BaseRequestHandler
from socketserver import ThreadingMixIn
from socketserver import TCPServer
from socketserver import UDPServer
except ImportError:
from SocketServer import BaseRequestHandler
from SocketServer import ThreadingMixIn
from SocketServer import TCPServer
BUFFER_SIZE = 1024
class ThreadedTCPRequestHandler(BaseRequestHandler):
def handle(self):
print("CLIENT CONNECTED: ", self.request)
while True:
data = self.request.recv(BUFFER_SIZE)
print(self.request, data)
if not data:
break
else:
self.request.send(data)
print("CLIENT SHUTDOWN: ", self.request)
class ThreadedTCPServer(ThreadingMixIn, TCPServer):
pass
@click.command()
@click.option("-i", "--ignore-failed-ports", is_flag=True)
@click.option("-b", "--binding", default="0.0.0.0", help="Default to 0.0.0.0.")
@click.argument("port", nargs=-1)
def run(ignore_failed_ports, binding, port):
"""Start echo server on given ports. Press CTRL+C to stop.
The default listenning port is 3682. You can listen on many ports at the same time.
Example:
echo-server 8001 8002 8003
"""
ports = port
if not ports:
ports = [3682]
servers = []
server_threads = []
for port in ports:
try:
server = ThreadedTCPServer((binding, int(port)), ThreadedTCPRequestHandler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
servers.append(server)
server_threads.append(server_threads)
print("Echo server running on: {0}:{1}".format(binding, port))
except Exception:
print("Listen on: {0}:{1} failed.".format(binding, port))
if not ignore_failed_ports:
break
if server_threads:
while True:
try:
time.sleep(60)
except:
print("Interrupted, shutdown servers...")
for server in servers:
server.shutdown()
print("done.")
break
else:
print("None port listened.")
os.sys.exit(1)
if __name__ == "__main__":
run() | PypiClean |
/mitosheet3-0.3.281.tar.gz/mitosheet3-0.3.281/mitosheet/public/v3/sheet_functions/bool_functions.py |
# Copyright (c) Saga Inc.
# Distributed under the terms of the GPL License.
"""
Contains all functions that are useful for control flow. For now, this
is just IF statements.
All functions describe their behavior with a function documentation object
in the function docstring. Function documentation objects are described
in more detail in docs/README.md.
NOTE: This file is alphabetical order!
"""
from typing import Optional
import pandas as pd
from mitosheet.public.v3.errors import handle_sheet_function_errors
from mitosheet.public.v3.sheet_functions.utils import get_final_result_series_or_primitive, get_series_from_primitive_or_series
from mitosheet.public.v3.types.decorators import cast_values_in_all_args_to_type, cast_values_in_arg_to_type
from mitosheet.public.v3.types.sheet_function_types import AnyPrimitiveOrSeriesInputType, BoolFunctionReturnType, BoolInputType, BoolRestrictedInputType
@cast_values_in_all_args_to_type('bool')
@handle_sheet_function_errors
def AND(*argv: Optional[BoolInputType]) -> BoolFunctionReturnType:
"""
{
"function": "AND",
"description": "Returns True if all of the provided arguments are True, and False if any of the provided arguments are False.",
"search_terms": ["and", "&", "if", "conditional"],
"examples": [
"AND(True, False)",
"AND(Nums > 100, Nums < 200)",
"AND(Pay > 10, Pay < 20, Status == 'active')"
],
"syntax": "AND(boolean_condition1, [boolean_condition2, ...])",
"syntax_elements": [{
"element": "boolean_condition1",
"description": "An expression or series that returns True or False values. See IF documentation for a list of conditons."
},
{
"element": "boolean_condition2 ... [OPTIONAL]",
"description": "An expression or series that returns True or False values. See IF documentation for a list of conditons."
}
]
}
"""
# If we don't find any arguements, we default to True. Excel in practice
# defaults to a value error if there are no args
return get_final_result_series_or_primitive(
True,
argv,
lambda df: df.all().all(),
lambda previous_value, new_value: previous_value and new_value,
lambda previous_series, new_series: previous_series & new_series
)
@cast_values_in_arg_to_type('series', 'bool')
@handle_sheet_function_errors
def BOOL(series: BoolRestrictedInputType) -> BoolFunctionReturnType:
"""
{
"function": "BOOL",
"description": "Converts the passed arguments to boolean values, either True or False. For numberic values, 0 converts to False while all other values convert to True.",
"search_terms": ["bool", "boolean", "true", "false", "dtype", "convert"],
"examples": [
"BOOL(Amount_Payed)",
"AND(BOOL(Amount_Payed), Is_Paying)"
],
"syntax": "BOOL(series)",
"syntax_elements": [{
"element": "series",
"description": "An series to convert to boolean values, either True or False."
}
]
}
"""
if isinstance(series, bool):
return series
return series.fillna(False)
@cast_values_in_arg_to_type('condition', 'bool')
@handle_sheet_function_errors
def IF(condition: pd.Series, true_series: AnyPrimitiveOrSeriesInputType, false_series: AnyPrimitiveOrSeriesInputType) -> pd.Series:
"""
{
"function": "IF",
"description": "Returns one value if the condition is True. Returns the other value if the conditon is False.",
"search_terms": ["if", "conditional", "and", "or"],
"examples": [
"IF(Status == 'success', 1, 0)",
"IF(Nums > 100, 100, Nums)",
"IF(AND(Grade >= .6, Status == 'active'), 'pass', 'fail')"
],
"syntax": "IF(boolean_condition, value_if_true, value_if_false)",
"syntax_elements": [{
"element": "boolean_condition",
"description": "An expression or series that returns True or False values. Valid conditions for comparison include ==, !=, >, <, >=, <=."
},
{
"element": "value_if_true",
"description": "The value the function returns if condition is True."
},
{
"element": "value_if_false",
"description": "The value the function returns if condition is False."
}
]
}
"""
true_series = get_series_from_primitive_or_series(true_series, condition.index)
false_series = get_series_from_primitive_or_series(false_series, condition.index)
return pd.Series(
data=[true_series.loc[i] if c else false_series.loc[i] for i, c in condition.items()],
index=condition.index
)
@cast_values_in_all_args_to_type('bool')
@handle_sheet_function_errors
def OR(*argv: Optional[BoolInputType]) -> BoolFunctionReturnType:
"""
{
"function": "OR",
"description": "Returns True if any of the provided arguments are True, and False if all of the provided arguments are False.",
"search_terms": ["or", "if", "conditional"],
"examples": [
"OR(True, False)",
"OR(Status == 'success', Status == 'pass', Status == 'passed')"
],
"syntax": "OR(boolean_condition1, [boolean_condition2, ...])",
"syntax_elements": [{
"element": "boolean_condition1",
"description": "An expression or series that returns True or False values. See IF documentation for a list of conditons."
},
{
"element": "boolean_condition2 ... [OPTIONAL]",
"description": "An expression or series that returns True or False values. See IF documentation for a list of conditons."
}
]
}
"""
# If we don't find any arguements, we default to True. Excel in practice
# defaults to a value error if there are no args
return get_final_result_series_or_primitive(
False,
argv,
lambda df: df.any().any(),
lambda previous_value, new_value: previous_value or new_value,
lambda previous_series, new_series: previous_series | new_series
)
# TODO: we should see if we can list these automatically!
CONTROL_FUNCTIONS = {
'AND': AND,
'BOOL': BOOL,
'IF': IF,
'OR': OR,
} | PypiClean |
/IsoCor-2.2.1-py3-none-any.whl/isocor/ui/isocorcli.py | import argparse
import isocor as hr
import isocor.ui.isocordb
import pandas as pd
import io
import logging
from pathlib import Path
import sys
def process(args):
# create logger (should be root to catch all 'mscorrectors' loggers)
logger = logging.getLogger()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S")
# sends logging output to sys.stderr
strm_hdlr = logging.StreamHandler()
strm_hdlr.setFormatter(formatter)
logger.addHandler(strm_hdlr)
if hasattr(args, 'verbose'):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# create environment
baseenv = isocor.ui.isocordb.EnvComputing()
if hasattr(args, 'I'):
baseenv.registerIsopotes(Path(args.I))
else:
baseenv.registerIsopotes()
if hasattr(args, 'D'):
baseenv.registerDerivativesDB(Path(args.D))
else:
baseenv.registerDerivativesDB()
if hasattr(args, 'M'):
baseenv.registerMetabolitesDB(Path(args.M))
else:
baseenv.registerMetabolitesDB()
try:
# get correction parameters
data_isotopes = baseenv.dictIsotopes
tracer = args.tracer
if not(tracer in baseenv.dfIsotopes['name'].unique()):
raise ValueError(
"Can't find tracer named '{}'. Eventually check the case in your Isotopes file".format(tracer))
tracer_purity = getattr(args, 'tracer_purity', None)
if tracer_purity:
if any(i < 0 for i in tracer_purity) or any(i > 1 for i in tracer_purity) or sum(tracer_purity) != 1:
raise ValueError(
"Purity values ({}) should be within the range [0, 1], and their sum should be 1.".format(tracer_purity))
correct_NA_tracer = True if hasattr(
args, 'correct_NA_tracer') else False
resolution = getattr(args, 'resolution', None)
mz_of_resolution = getattr(args, 'mz_of_resolution', None)
resolution_formula_code = getattr(
args, 'resolution_formula_code', None)
if resolution_formula_code == 'datafile':
useformula = False
else:
useformula = True
HRmode = resolution or mz_of_resolution or resolution_formula_code
if HRmode:
if not resolution:
raise ValueError(
"Applying correction to high-resolution data: 'resolution' should be provided.")
if not mz_of_resolution:
raise ValueError(
"Applying correction to high-resolution data: 'mz_of_resolution' should be provided.")
if not resolution_formula_code:
raise ValueError(
"Applying correction to high-resolution data: 'resolution_formula' should be provided.")
if resolution <= 0:
raise ValueError(
"Resolution '{}' should be a positive number.".format(resolution))
if mz_of_resolution <= 0:
raise ValueError(
"mz at which resolution is measured '{}' should be a positive number.".format(mz_of_resolution))
baseenv.registerDatafile(Path(args.inputdata), useformula)
except Exception as err:
logger.error(
"wrong parameters. Check for errors above. {}".format(err))
raise
# log general information on the process
logger.info('------------------------------------------------')
logger.info("Correction process")
logger.info('------------------------------------------------')
logger.info(" data files")
logger.info(" data file: {}".format(args.inputdata))
logger.info(" derivatives database: {}".format(
getattr(args, 'D', 'Derivatives.dat')))
logger.info(" metabolites database: {}".format(
getattr(args, 'M', 'Metabolites.dat')))
logger.info(" isotopes database: {}".format(
getattr(args, 'I', 'Isotopes.dat')))
logger.info(" correction parameters")
logger.info(" isotopic tracer: {}".format(tracer))
logger.info(" correct natural abundance of the tracer element: {}".format(
correct_NA_tracer))
logger.info(" isotopic purity of the tracer: {}".format(tracer_purity))
if HRmode:
logger.info(" mode: high-resolution")
logger.info(" formula code: {}".format(
resolution_formula_code))
if useformula:
logger.info(
" instrument resolution: {}".format(resolution))
if resolution_formula_code != 'constant':
logger.info(" at mz: {}".format(mz_of_resolution))
else:
logger.info(" mode: low-resolution")
logger.info(" natural abundance of isotopes")
logger.info(" {}".format(data_isotopes))
logger.info(" IsoCor version: {}".format(hr.__version__))
# initialize error dict
errors = {'labels': [], 'measurements': []}
labels = baseenv.getLabelsList(useformula)
logger.info('------------------------------------------------')
logger.info('Constructing correctors for all (metabolite, derivative)...')
logger.info('------------------------------------------------')
dictMetabolites = {}
for label in labels:
try:
logger.debug("constructing {}...".format(label))
if HRmode:
if not useformula:
resolution = label[2]
resolution_formula_code = 'constant'
dictMetabolites[label] = hr.MetaboliteCorrectorFactory(
formula=baseenv.getMetaboliteFormula(label[0]), tracer=tracer, resolution=resolution, label=label[0],
data_isotopes=data_isotopes, mz_of_resolution=mz_of_resolution,
derivative_formula=baseenv.getDerivativeFormula(label[1]), tracer_purity=tracer_purity,
correct_NA_tracer=correct_NA_tracer, resolution_formula_code=resolution_formula_code,
charge=baseenv.getMetaboliteCharge(label[0]))
else:
dictMetabolites[label] = hr.MetaboliteCorrectorFactory(
formula=baseenv.getMetaboliteFormula(label[0]), tracer=tracer, label=label[0],
data_isotopes=data_isotopes,
derivative_formula=baseenv.getDerivativeFormula(label[1]), tracer_purity=tracer_purity,
correct_NA_tracer=correct_NA_tracer)
logger.info("{} successfully constructed.".format(label))
except Exception as err:
dictMetabolites[label] = None
errors['labels'] = errors['labels'] + [label]
logger.error("cannot construct {}: {}".format(label, err))
sys.exit(2)
logger.info('------------------------------------------------')
logger.info('Correcting raw MS data...')
logger.info('------------------------------------------------')
df = pd.DataFrame()
for label in labels:
metabo = dictMetabolites[label]
series, series_err = baseenv.getDataSerie(label, useformula)
for s_err in series_err:
errors['measurements'] = errors['measurements'] + \
["{} - {}".format(s_err, label)]
logger.error(
"{} - {}: Measurement vector is incomplete, some isotopologues are not provided.".format(s_err, label))
for serie in series:
if metabo:
try:
isotopic_inchi = metabo.isotopic_inchi
valuesCorrected = metabo.correct(serie[1])
logger.info("{} - {}: processed".format(serie[0], label))
except Exception as err:
isotopic_inchi = ['']*len(serie[1])
valuesCorrected = ([pd.np.nan]*len(serie[1]), [pd.np.nan]
* len(serie[1]), [pd.np.nan]*len(serie[1]), pd.np.nan)
logger.error("{} - {}: {}".format(serie[0], label, err))
errors['measurements'] = errors['measurements'] + \
["{} - {}".format(serie[0], label)]
else:
isotopic_inchi = ['']*len(serie[1])
valuesCorrected = ([pd.np.nan]*len(serie[1]), [pd.np.nan]
* len(serie[1]), [pd.np.nan]*len(serie[1]), pd.np.nan)
errors['measurements'] = errors['measurements'] + \
["{} - {}".format(serie[0], label)]
logger.error(
"{} - {}: (metabolite, derivative) corrector could not be constructed.".format(serie[0], label))
for i, line in enumerate(zip(*(serie[1], valuesCorrected[0], valuesCorrected[1], valuesCorrected[2], [valuesCorrected[3]]*len(valuesCorrected[0])))):
df = pd.concat((df, pd.DataFrame([line], index=pd.MultiIndex.from_tuples([[serie[0], label[0], label[1], i, isotopic_inchi[i]]], names=[
'sample', 'metabolite', 'derivative', 'isotopologue', 'isotopic_inchi']), columns=['area', 'corrected_area', 'isotopologue_fraction', 'residuum', 'mean_enrichment'])))
# summary results for logs
logger.info('------------------------------------------------')
logger.info("Correction process summary")
logger.info('------------------------------------------------')
logger.info(" number of samples: {}".format(
len(baseenv.getSamplesList())))
if useformula:
logger.info(
" number of (metabolite, derivative): {}".format(len(labels)))
else:
logger.info(
" number of (metabolite, derivative, resolution): {}".format(len(labels)))
nb_errors = len(errors['labels']) + len(errors['measurements'])
logger.info(" errors: {}".format(nb_errors))
if nb_errors:
logger.info(" {} errors during construction of (metabolite, derivative) correctors".format(
len(errors['labels'])))
logger.info(" {} errors during correction of measurements".format(
len(errors['measurements'])))
logger.info(" detailed information on errors are provided above.")
output = io.StringIO()
df.to_csv(output, sep='\t')
output.seek(0)
print(output.read())
def parseArgs():
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS,
description='correction of MS data for naturally occurring isotopes')
parser.add_argument("inputdata", help="measurements file to process")
parser.add_argument("-M", type=str, help="path to metabolites database")
parser.add_argument("-D", type=str, help="path to derivatives database")
parser.add_argument("-I", type=str, help="path to isotopes database")
parser.add_argument("-t", "--tracer", type=str, required=True,
help='the isotopic tracer (e.g. "13C")')
parser.add_argument("-r", "--resolution", type=float,
help='HR only: resolution of the mass spectrometer (e.g. "1e4")')
parser.add_argument("-m", "--mz_of_resolution", type=float,
help='HR only: mz at which resolution is given (e.g. "400")')
parser.add_argument("-f", "--resolution_formula_code", type=str,
choices=hr.HighResMetaboliteCorrector.RES_FORMULAS, help="HR only: spectrometer formula code")
parser.add_argument("-p", "--tracer_purity", type=lambda s: [float(item) for item in s.split(',')],
help="purity vector of the tracer")
parser.add_argument("-n", "--correct_NA_tracer",
help="flag to correct tracer natural abundance", action='store_true')
parser.add_argument("-v", "--verbose",
help="flag to enable verbose logs", action='store_true')
return parser
def start_cli():
parser = parseArgs()
args = parser.parse_args()
process(args) | PypiClean |
/smart_home_tng-2023.1.3.tar.gz/smart_home_tng-2023.1.3/smart_home_tng/core/fan.py |
import dataclasses
import datetime as dt
import enum
import functools as ft
import math
import typing
from . import helpers
from .toggle import Toggle
_SCAN_INTERVAL: typing.Final = dt.timedelta(seconds=30)
class _EntityFeature(enum.IntEnum):
"""Supported features of the fan entity."""
SET_SPEED = 1
OSCILLATE = 2
DIRECTION = 4
PRESET_MODE = 8
_SERVICE_INCREASE_SPEED: typing.Final = "increase_speed"
_SERVICE_DECREASE_SPEED: typing.Final = "decrease_speed"
_SERVICE_OSCILLATE: typing.Final = "oscillate"
_SERVICE_SET_DIRECTION: typing.Final = "set_direction"
_SERVICE_SET_PERCENTAGE: typing.Final = "set_percentage"
_SERVICE_SET_PRESET_MODE: typing.Final = "set_preset_mode"
_DIRECTION_FORWARD: typing.Final = "forward"
_DIRECTION_REVERSE: typing.Final = "reverse"
_ATTR_PERCENTAGE: typing.Final = "percentage"
_ATTR_PERCENTAGE_STEP: typing.Final = "percentage_step"
_ATTR_OSCILLATING: typing.Final = "oscillating"
_ATTR_DIRECTION: typing.Final = "direction"
_ATTR_PRESET_MODE: typing.Final = "preset_mode"
_ATTR_PRESET_MODES: typing.Final = "preset_modes"
class _NotValidPresetModeError(ValueError):
"""Exception class when the preset_mode in not in the preset_modes list."""
@dataclasses.dataclass
class _EntityDescription(Toggle.EntityDescription):
"""A class that describes fan entities."""
class _Entity(Toggle.Entity):
"""Base class for fan entities."""
_entity_description: _EntityDescription
_attr_current_direction: str | None = None
_attr_oscillating: bool | None = None
_attr_percentage: int | None
_attr_preset_mode: str | None
_attr_preset_modes: list[str] | None
_attr_speed_count: int
_attr_supported_features: int = 0
@property
def entity_description(self) -> _EntityDescription:
return super().entity_description
def set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
raise NotImplementedError()
async def async_set_percentage(self, percentage: int) -> None:
"""Set the speed of the fan, as a percentage."""
if percentage == 0:
await self.async_turn_off()
await self._shc.async_add_executor_job(self.set_percentage, percentage)
async def async_increase_speed(self, percentage_step: int = None) -> None:
"""Increase the speed of the fan."""
await self._async_adjust_speed(1, percentage_step)
async def async_decrease_speed(self, percentage_step: int = None) -> None:
"""Decrease the speed of the fan."""
await self._async_adjust_speed(-1, percentage_step)
async def _async_adjust_speed(self, modifier: int, percentage_step: int) -> None:
"""Increase or decrease the speed of the fan."""
current_percentage = self.percentage or 0
if percentage_step is not None:
new_percentage = current_percentage + (percentage_step * modifier)
else:
speed_range = (1, self.speed_count)
speed_index = math.ceil(
helpers.percentage_to_ranged_value(speed_range, current_percentage)
)
new_percentage = helpers.ranged_value_to_percentage(
speed_range, speed_index + modifier
)
new_percentage = max(0, min(100, new_percentage))
await self.async_set_percentage(new_percentage)
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
raise NotImplementedError()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
await self._shc.async_add_executor_job(self.set_preset_mode, preset_mode)
def _valid_preset_mode_or_raise(self, preset_mode: str) -> None:
"""Raise NotValidPresetModeError on invalid preset_mode."""
preset_modes = self.preset_modes
if not preset_modes or preset_mode not in preset_modes:
raise _NotValidPresetModeError(
f"The preset_mode {preset_mode} is not a valid preset_mode: {preset_modes}"
)
def set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
raise NotImplementedError()
async def async_set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
await self._shc.async_add_executor_job(self.set_direction, direction)
def turn_on(
self,
percentage: int = None,
preset_mode: str = None,
**kwargs: typing.Any,
) -> None:
"""Turn on the fan."""
raise NotImplementedError()
async def async_turn_on(
self,
percentage: int = None,
preset_mode: str = None,
**kwargs: typing.Any,
) -> None:
"""Turn on the fan."""
await self._shc.async_add_executor_job(
ft.partial(
self.turn_on,
percentage=percentage,
preset_mode=preset_mode,
**kwargs,
)
)
def oscillate(self, oscillating: bool) -> None:
"""Oscillate the fan."""
raise NotImplementedError()
async def async_oscillate(self, oscillating: bool) -> None:
"""Oscillate the fan."""
await self._shc.async_add_executor_job(self.oscillate, oscillating)
@property
def is_on(self) -> bool:
"""Return true if the entity is on."""
return (
self.percentage is not None and self.percentage > 0
) or self.preset_mode is not None
@property
def percentage(self) -> int:
"""Return the current speed as a percentage."""
if hasattr(self, "_attr_percentage"):
return self._attr_percentage
return 0
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
if hasattr(self, "_attr_speed_count"):
return self._attr_speed_count
return 100
@property
def percentage_step(self) -> float:
"""Return the step size for percentage."""
return 100 / self.speed_count
@property
def current_direction(self) -> str:
"""Return the current direction of the fan."""
return self._attr_current_direction
@property
def oscillating(self) -> bool:
"""Return whether or not the fan is currently oscillating."""
return self._attr_oscillating
@property
def capability_attributes(self) -> dict[str, list[str]]:
"""Return capability attributes."""
attrs = {}
if (
self.supported_features & _EntityFeature.SET_SPEED
or self.supported_features & _EntityFeature.PRESET_MODE
):
attrs[_ATTR_PRESET_MODES] = self.preset_modes
return attrs
@typing.final
@property
def state_attributes(self) -> dict[str, float | str]:
"""Return optional state attributes."""
data: dict[str, float | str] = {}
supported_features = self.supported_features
if supported_features & _EntityFeature.DIRECTION:
data[_ATTR_DIRECTION] = self.current_direction
if supported_features & _EntityFeature.OSCILLATE:
data[_ATTR_OSCILLATING] = self.oscillating
if supported_features & _EntityFeature.SET_SPEED:
data[_ATTR_PERCENTAGE] = self.percentage
data[_ATTR_PERCENTAGE_STEP] = self.percentage_step
if (
supported_features & _EntityFeature.PRESET_MODE
or supported_features & _EntityFeature.SET_SPEED
):
data[_ATTR_PRESET_MODE] = self.preset_mode
return data
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._attr_supported_features
@property
def preset_mode(self) -> str:
"""Return the current preset mode, e.g., auto, smart, interval, favorite.
Requires FanEntityFeature.SET_SPEED.
"""
if hasattr(self, "_attr_preset_mode"):
return self._attr_preset_mode
return None
@property
def preset_modes(self) -> list[str]:
"""Return a list of available preset modes.
Requires FanEntityFeature.SET_SPEED.
"""
if hasattr(self, "_attr_preset_modes"):
return self._attr_preset_modes
return None
# pylint: disable=unused-variable, invalid-name
class Fan:
"""Fan namespace."""
Entity: typing.TypeAlias = _Entity
EntityDescription: typing.TypeAlias = _EntityDescription
EntityFeature: typing.TypeAlias = _EntityFeature
NotValidPresetModeError: typing.TypeAlias = _NotValidPresetModeError
SERVICE_INCREASE_SPEED: typing.Final = _SERVICE_INCREASE_SPEED
SERVICE_DECREASE_SPEED: typing.Final = _SERVICE_DECREASE_SPEED
SERVICE_OSCILLATE: typing.Final = _SERVICE_OSCILLATE
SERVICE_SET_DIRECTION: typing.Final = _SERVICE_SET_DIRECTION
SERVICE_SET_PERCENTAGE: typing.Final = _SERVICE_SET_PERCENTAGE
SERVICE_SET_PRESET_MODE: typing.Final = _SERVICE_SET_PRESET_MODE
DIRECTION_FORWARD: typing.Final = _DIRECTION_FORWARD
DIRECTION_REVERSE: typing.Final = _DIRECTION_REVERSE
ATTR_PERCENTAGE: typing.Final = _ATTR_PERCENTAGE
ATTR_PERCENTAGE_STEP: typing.Final = _ATTR_PERCENTAGE_STEP
ATTR_OSCILLATING: typing.Final = _ATTR_OSCILLATING
ATTR_DIRECTION: typing.Final = _ATTR_DIRECTION
ATTR_PRESET_MODE: typing.Final = _ATTR_PRESET_MODE
ATTR_PRESET_MODES: typing.Final = _ATTR_PRESET_MODES
SCAN_INTERVAL: typing.Final = _SCAN_INTERVAL | PypiClean |
/tinned_python-0.1.2.tar.gz/tinned_python-0.1.2/README.rst | =============
Tinned Python
=============
.. image:: https://img.shields.io/pypi/v/tinned_python.svg
:target: https://pypi.python.org/pypi/tinned_python
.. image:: https://github.com/tmfnll/tinned_python/workflows/Python%20package/badge.svg
:target: https://github.com/tmfnll/tinned_python/actions?query=workflow%3A%22Python+package%22
.. image:: https://readthedocs.org/projects/tinned-python/badge/?version=latest
:target: https://tinned-python.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://pyup.io/repos/github/tmfnll/tinned_python/shield.svg
:target: https://pyup.io/repos/github/tmfnll/tinned_python/
:alt: Updates
Python data, in a tin
* Free software: MIT license
* Documentation: https://tinned-python.readthedocs.io.
Features
--------
* TODO
Credits
-------
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
| PypiClean |
/testcase_automaker-1.0.8-py3-none-any.whl/testcase_automaker/interface/http_params_generator.py | import sys
sys.path.append("../..")
from testcase_automaker.Utils.amazingutils import randoms
from allpairspy import AllPairs
import random
import copy
class http_params_generator(object):
'''
>>> params_generator = http_params_generator(parameters_structure={'name': {'type': 'string', 'value': '','range':['张三','李四'], 'iscompulsory': True},\
'phone': {'type': 'number', 'value': '', 'iscompulsory': True},\
'claimant': {'type': 'object', 'value': {'name': {'type': 'string', 'value': '', 'iscompulsory': True}\
,'phone': {'type': 'number', 'value': '', 'iscompulsory': True}}, 'iscompulsory': True},\
'informations': {'type': 'array', 'value': [{'claimant': {'type': 'object', 'value': {'name': {'type': 'string', 'value': '', 'iscompulsory': True}\
,'phone': {'type': 'number', 'value': '', 'iscompulsory': True}}, 'iscompulsory': True}},\
{'name': {'type': 'string', 'value': '', 'iscompulsory': True}}], 'iscompulsory': True}})
>>> params_generator.get_params_num()
7
>>> params_generator.generate_params_list()
>>> type(params_generator.generated_params_list)
<class 'list'>
>>> type(random.choice(params_generator.generated_params_list))
<class 'dict'>
>>> len(params_generator.generated_params_list)
7
>>> params_generator = http_params_generator(parameters_structure={})
>>> params_generator.get_params_num()
0
>>> params_generator.generate_params_list()
>>> type(params_generator.generated_params_list)
<class 'list'>
>>> len(params_generator.generated_params_list)
0
'''
def __init__(self, parameters_structure, generated_params_list=None):
self.parameters_structure = parameters_structure
self.generated_params_list = generated_params_list
# 生成参数组合列表
def generate_params_list(self):
parameters = http_params_generator.get_pairwise_list(self.get_params_num())
params_usage_2d_list = []
params_combo_list = []
if len(parameters) > 1:
for pairs in AllPairs(parameters):
params_usage_2d_list.append(pairs)
for params_usage_list in params_usage_2d_list:
yield_params_usage_list = http_params_generator.yield_list(params_usage_list)
raw_params_list = self.generate_params(parameters_usage_list=yield_params_usage_list)
prepared_params_list = http_params_generator.get_value_dic(raw_params_list)
params_combo_list.append(prepared_params_list)
elif len(parameters) == 1: # 当只有一个参数的时候第三方库ALLPAIRS不支持
yield_params_usage_list_true = http_params_generator.yield_list([True])
yield_params_usage_list_false = http_params_generator.yield_list([False])
raw_params_list_true = self.generate_params(parameters_usage_list=yield_params_usage_list_true)
prepared_params_list_true = http_params_generator.get_value_dic(raw_params_list_true)
raw_params_list_false = self.generate_params(parameters_usage_list=yield_params_usage_list_false)
prepared_params_list_false = http_params_generator.get_value_dic(raw_params_list_false)
params_combo_list.append(prepared_params_list_true)
params_combo_list.append(prepared_params_list_false)
self.generated_params_list = params_combo_list
# 生成参数
def generate_params(self, parameters_usage_list, parameters_structure=None):
if parameters_structure is None:
parameters_structure = copy.deepcopy(self.parameters_structure)
for key, attribute in parameters_structure.items():
type_name = attribute['type']
if type_name.lower() == 'object':
self.generate_params(parameters_structure=attribute['value'],
parameters_usage_list=parameters_usage_list)
continue
if type_name.lower() == 'array':
for value in attribute['value']:
self.generate_params(parameters_structure=value,
parameters_usage_list=parameters_usage_list)
continue
type_category = self.get_type_category(key)
if 'range' in attribute and attribute['range']:
generated_value = random.choice(attribute['range'])
else:
generated_value = self.get_parameter_random_value(type_name, type_category)
if next(parameters_usage_list) or ('range' in attribute and attribute['range']):
parameters_structure[key]['value'] = generated_value
else:
parameters_structure[key]['value'] = None
return parameters_structure
def get_params_num(self, parameters_structure=None, num=0):
if parameters_structure is None:
parameters_structure = copy.deepcopy(self.parameters_structure)
for key, attribute in parameters_structure.items():
type_name = attribute['type']
if type_name.lower() == 'object':
num += self.get_params_num(attribute['value'])
continue
if type_name.lower() == 'array':
for value in attribute['value']:
num += self.get_params_num(value)
continue
else:
num += 1
return num
# 比较两个字典部分是否相等
@staticmethod
def remove_duplicated_dict_in_list(dic_list):
for dic in dic_list:
pass
@staticmethod
def yield_list(input_list):
for i in input_list:
yield i
@staticmethod
def get_pairwise_list(params_num):
parameters = []
for i in range(params_num):
parameters.append([True, False])
return parameters
# 返回仅提取输入字典值中的value的值作为当前键的值的字典
@staticmethod
def get_value_dic(dic):
new_dic = dict()
for key, attribute in dic.items():
if attribute['type'].lower() == 'object':
new_dic[key] = http_params_generator.get_value_dic(attribute['value'])
continue
if attribute['type'].lower() == 'array':
new_dic[key] = []
for value in attribute['value']:
new_dic[key].append(http_params_generator.get_value_dic(value))
continue
new_dic[key] = attribute['value']
return new_dic
# 根据键名生成数据类别
@staticmethod
def get_type_category(key):
if key == 'phone':
return 'chinese_mobile_phone'
if key == 'name':
return 'chinese_name'
else:
return 'default'
# 根据数据类型以及数据类型的类别生成随机数据
@staticmethod
def get_parameter_random_value(type_name, type_category='default'):
if type_name.lower() == 'boolean':
return randoms.get_random_boolean()
if type_name.lower() == 'number':
return randoms.get_random_num(num_type=type_category)
if type_name.lower() == 'string':
return randoms.get_random_str(str_type=type_category)
if type_name.lower() == 'date':
return randoms.get_random_num(length=9)
else:
return None
if __name__ == '__main__':
import doctest
doctest.testmod() | PypiClean |
/masonite-inertia-4.2.4.tar.gz/masonite-inertia-4.2.4/README.md | <p align="center">
<img src="https://banners.beyondco.de/Masonite%20Inertia.png?theme=light&packageManager=pip+install&packageName=masonite-inertia&pattern=topography&style=style_1&description=Masonite%20Inertia%20server-side%20adapter&md=1&showWatermark=1&fontSize=100px&images=https%3A%2F%2Fgblobscdn.gitbook.com%2Fspaces%2F-L9uc-9XAlqhXkBwrLMA%2Favatar.png">
</p>
<p align="center">
<a href="https://docs.masoniteproject.com">
<img alt="Masonite Package" src="https://img.shields.io/static/v1?label=Masonite&message=package&labelColor=grey&color=blue&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAYAAAAfSC3RAAAAAXNSR0IArs4c6QAAAIRlWElmTU0AKgAAAAgABQESAAMAAAABAAEAAAEaAAUAAAABAAAASgEbAAUAAAABAAAAUgEoAAMAAAABAAIAAIdpAAQAAAABAAAAWgAAAAAAAABIAAAAAQAAAEgAAAABAAOgAQADAAAAAQABAACgAgAEAAAAAQAAAA6gAwAEAAAAAQAAAA4AAAAATspU+QAAAAlwSFlzAAALEwAACxMBAJqcGAAAAVlpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KTMInWQAAAnxJREFUKBVNUl1IVEEUPjPObdd1VdxWM0rMIl3bzbVWLSofVm3th0AhMakHHyqRiNSHEAq5b2HSVvoQRUiEECQUQkkPbRslRGigG8auoon2oPSjpev+3PWeZq7eaC5nDt93vplz5txDQJYpNxX4st4JFiwj9aCqmswUFQNS/A2YskrZJPYefkECC2GhQwAqvLYybwXrwBvq8HSNOXRO92+aH7nW8vc/wS2Z9TqneYt2KHjlf9Iv+43wFJMExzO0YE5OKe60N+AOW6OmE+WJTBrg23jjzWxMBauOlfyycsV24F+cH+zAXYUOGl+DaiDxfl245/W9OnVrSY+O2eqPkyz4sVvHoKp9gOihf5KoAVv3hkQgbj/ihG9fI3RixKcUVx7lJVaEc0vnyf2FFll+ny80ZHZiGhIKowWJBCEAKr+FSuNDLt+lxybSF51lo74arqs113dOZqwsptxNs5bwi7Q3q8npSC2AWmvjTncZf1l61e5DEizNn5mtufpsqk5+CZTuq00sP1wkNPv8jeEikVVlJso+GEwRtNs3QeBt2YP2V2ZI3Tx0e+7T89zK5tNASOLEytJAryGtkLc2PcBM5byyUWYkMQpMioYcDcchC6xN220Iv36Ot8pV0454RHLEwmmD7UWfIdX0zq3GjMPG5NKBtv5qiPEPekK2U51j1451BZoc3i+1ohSQ/UzzG5uYFFn2mwVUnO4O3JblXA91T51l3pB3QweDl7sNXMyEjbguSjrPcQNmwDkNc8CbCvDd0+xCC7RFi9wFulD3mJeXqxQevB4prrqgc0TmQ85NG/K43e2UwnMVAJIEBNfWRYR3HfnvivrIzMyo4Hgy+hfscvLo53jItAAAAABJRU5ErkJggg==">
</a>
<img alt="GitHub Workflow Status (branch)" src="https://img.shields.io/github/workflow/status/girardinsamuel/masonite-inertia/Test%20Application/2.X">
<img src="https://codecov.io/gh/girardinsamuel/masonite-inertia/branch/2.X/graph/badge.svg?token=7W8Y6UVUAT"/>
<img src="https://img.shields.io/badge/python-3.6+-blue.svg" alt="Python Version">
<img alt="PyPI" src="https://img.shields.io/pypi/v/masonite-inertia">
<img alt="License" src="https://img.shields.io/github/license/girardinsamuel/masonite-inertia">
<a href="https://github.com/psf/black"><img alt="Code style: black" src="https://img.shields.io/badge/code%20style-black-000000.svg"></a>
</p>
## Introduction
Inertia is a new approach to building classic server-driven web apps. From their own web page:
> Inertia allows you to create fully client-side rendered, single-page apps, without much of the complexity that comes with modern SPAs. It does this by leveraging existing server-side frameworks.
Inertia requires an adapter for each backend framework. This repo contains the Masonite server-side adapter for [Inertia.js](https://inertiajs.com/).
You can find the legacy Inertia PingCRM demo with Masonite here [demo (WIP)](https://github.com/girardinsamuel/pingcrm-masonite).
[Documentation 📚](https://samuelgirardin.gitbook.io/masonite-inertia)
[Quick Start ⚡️](#installation)
## Features
Almost all features of the official server-side adapters are present 😃
- Shared data
- Partial reloads
- Lazy loaded props
- Set root view in a provider
- Set root view per view
- Enable sharing Masonite routes (prefer using [masonite-js-routes](https://github.com/girardinsamuel/masonite-js-routes))
- Enable sharing Masonite flash messages
## Official Masonite Documentation
New to Masonite ? Please first read the [Official Documentation](https://docs.masoniteproject.com/).
Masonite strives to have extremely comprehensive documentation 😃. It would be wise to go through the tutorials there.
If you find any discrepencies or anything that doesn't make sense, be sure to comment directly on the documentation to start a discussion!
Hop on [Masonite Discord Community](https://discord.gg/TwKeFahmPZ) to ask any questions you need!
## Installation
**Requirements**
To get started you will need the following:
- Masonite 4.X (use `masonite-inertia>=4.0`) / Masonite 3.X(use `masonite-inertia>=3.0`) / Masonite 2.3 (use `masonite-inertia>=2.X<3.0`)
- Laravel Mix installed (new Masonite projects come with this installed already)
- a Node.js environment (npm or yarn)
```bash
pip install masonite-inertia
```
**Install NPM dependencies**
First we'll need to install some NPM packages (we are using Vue 3 here as frontend framework and `inertia-vue3` as Inertia.js client-side adapter). You can find more info on this on [Inertia.js documentation](https://inertiajs.com/client-side-setup).
```
npm install vue @inertiajs/inertia @inertiajs/inertia-vue3
```
## Configuration
Add InertiaProvider to your project in `config/providers.py`:
```python
# config/providers.py
# ...
from masonite.inertia import InertiaProvider
# ...
PROVIDERS = [
# ...
# Third Party Providers
InertiaProvider,
]
```
Inertia adapter comes with a middleware that will control some of the flow of data. Add InertiaMiddleware to your project in `HttpKernel`:
```python
# AppHttpKernel.py
from masonite.inertia import InertiaMiddleware
class AppHttpKernel(HttpKernel):
http_middleware = [InertiaMiddleware, EncryptCookies]
```
Make sure that this middleware is added before the EncryptCookies middleware else you will get
some issues with CSRF token validation as `XSRF-TOKEN` value won't be encrypted.
Finally if you want to change some parameters you can publish the package configuration file in your project:
```bash
python craft package:publish inertia
```
Congratulations! You have now setup Inertia in your project! For more information on how to use Inertia.js got to its [documentation](https://inertiajs.com/installation).
## Getting started
This section quickly explains how to use Inertia.js with Masonite. For more details please [read the documentation 📚](https://samuelgirardin.gitbook.io/masonite-inertia).
### How to use Inertia.js with Masonite adapter
We will create two routes and a controller which will load the two components scaffolded with previous command and see Inertia.js behaviour. In order to create Inertia response in our Controller, we are going to use newly available response `Inertia`. And that's it !
If you scaffolded the inertia demo you will already have the files, else:
```
python craft controller WelcomeController
```
This will create a controller `WelcomeController` but you can name it whatever you like. It would be good to keep the standard of whatever setup you have now for your home page. Then create two routes to that controller if you don't have them already:
```python
ROUTES = [
Route.get('/', 'WelcomeController@index'),
Route.get('/helloworld', 'WelcomeController@helloworld')
]
```
And finally create the controller methods. We just need to use the new `Inertia` to render our controller.
```python
# app/controllers/InertiaController.py
from masonite.inertia import Inertia
## ..
def inertia(self, view: Inertia):
return view.render('Index')
def helloworld(self, view: Inertia):
return view.render('HelloWorld')
## ..
```
This controller will render the view based on template `templates/app.html` and will load the Vue components into it depending on the route.
Note that instead of specifying a Jinja template like we normally do we can just specify a page here. So since we have `../pages/Index.vue` we specify to render `Index` here.
### Test it !
Ok now we need to do 2 more commands. The first thing is to run `npm run dev` (at root) to compile all of this (with webpack mix):
```
npm run dev
```
Now we can run the server like we normally do:
```
python craft serve
```
When we go to our homepage we will see we see `Index.vue` component:
```
Home Page
```
Click on the link you can now see `HelloWorld` without page refresh !!!!
## Contributing
Please read the [Contributing Documentation](CONTRIBUTING.md) here.
## Maintainers
- [Samuel Girardin](https://www.github.com/girardinsamuel)
Thanks to [Joseph Mancuso](https://github.com/josephmancuso) for making the PoC to couple Masonite and Inertia.js !
## License
Masonite Inertia is open-sourced software licensed under the [MIT license](LICENSE).
| PypiClean |
/elyra-3.15.0.tar.gz/elyra-3.15.0/build/labextensions/@elyra/script-debugger-extension/static/remoteEntry.c3b9ed19d9943f6d8db7.js | var _JUPYTERLAB;
/******/ (() => { // webpackBootstrap
/******/ "use strict";
/******/ var __webpack_modules__ = ({
/***/ "webpack/container/entry/@elyra/script-debugger-extension":
/*!***********************!*\
!*** container entry ***!
\***********************/
/***/ ((__unused_webpack_module, exports, __webpack_require__) => {
var moduleMap = {
"./index": () => {
return Promise.all([__webpack_require__.e("webpack_sharing_consume_default_jupyterlab_fileeditor-webpack_sharing_consume_default_jupyter-5528f8"), __webpack_require__.e("lib_index_js")]).then(() => (() => ((__webpack_require__(/*! ./lib/index.js */ "./lib/index.js")))));
},
"./extension": () => {
return Promise.all([__webpack_require__.e("webpack_sharing_consume_default_jupyterlab_fileeditor-webpack_sharing_consume_default_jupyter-5528f8"), __webpack_require__.e("lib_index_js")]).then(() => (() => ((__webpack_require__(/*! ./lib/index.js */ "./lib/index.js")))));
},
"./style": () => {
return Promise.all([__webpack_require__.e("vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_cssW-72eba1"), __webpack_require__.e("style_index_css")]).then(() => (() => ((__webpack_require__(/*! ./style/index.css */ "./style/index.css")))));
}
};
var get = (module, getScope) => {
__webpack_require__.R = getScope;
getScope = (
__webpack_require__.o(moduleMap, module)
? moduleMap[module]()
: Promise.resolve().then(() => {
throw new Error('Module "' + module + '" does not exist in container.');
})
);
__webpack_require__.R = undefined;
return getScope;
};
var init = (shareScope, initScope) => {
if (!__webpack_require__.S) return;
var name = "default"
var oldScope = __webpack_require__.S[name];
if(oldScope && oldScope !== shareScope) throw new Error("Container initialization failed as it has already been initialized with a different share scope");
__webpack_require__.S[name] = shareScope;
return __webpack_require__.I(name, initScope);
};
// This exports getters to disallow modifications
__webpack_require__.d(exports, {
get: () => (get),
init: () => (init)
});
/***/ })
/******/ });
/************************************************************************/
/******/ // The module cache
/******/ var __webpack_module_cache__ = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/ // Check if module is in cache
/******/ var cachedModule = __webpack_module_cache__[moduleId];
/******/ if (cachedModule !== undefined) {
/******/ return cachedModule.exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = __webpack_module_cache__[moduleId] = {
/******/ id: moduleId,
/******/ // no module.loaded needed
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ __webpack_modules__[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = __webpack_modules__;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = __webpack_module_cache__;
/******/
/************************************************************************/
/******/ /* webpack/runtime/compat get default export */
/******/ (() => {
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = (module) => {
/******/ var getter = module && module.__esModule ?
/******/ () => (module['default']) :
/******/ () => (module);
/******/ __webpack_require__.d(getter, { a: getter });
/******/ return getter;
/******/ };
/******/ })();
/******/
/******/ /* webpack/runtime/define property getters */
/******/ (() => {
/******/ // define getter functions for harmony exports
/******/ __webpack_require__.d = (exports, definition) => {
/******/ for(var key in definition) {
/******/ if(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {
/******/ Object.defineProperty(exports, key, { enumerable: true, get: definition[key] });
/******/ }
/******/ }
/******/ };
/******/ })();
/******/
/******/ /* webpack/runtime/ensure chunk */
/******/ (() => {
/******/ __webpack_require__.f = {};
/******/ // This file contains only the entry chunk.
/******/ // The chunk loading function for additional chunks
/******/ __webpack_require__.e = (chunkId) => {
/******/ return Promise.all(Object.keys(__webpack_require__.f).reduce((promises, key) => {
/******/ __webpack_require__.f[key](chunkId, promises);
/******/ return promises;
/******/ }, []));
/******/ };
/******/ })();
/******/
/******/ /* webpack/runtime/get javascript chunk filename */
/******/ (() => {
/******/ // This function allow to reference async chunks
/******/ __webpack_require__.u = (chunkId) => {
/******/ // return url for filenames based on template
/******/ return "" + chunkId + "." + {"webpack_sharing_consume_default_jupyterlab_fileeditor-webpack_sharing_consume_default_jupyter-5528f8":"f90e9f1e3f496362bc99","lib_index_js":"174fd9e87edf705b825d","vendors-node_modules_css-loader_dist_runtime_api_js-node_modules_css-loader_dist_runtime_cssW-72eba1":"70155eb9c2f87967c6b6","style_index_css":"4c569e1d9d970f7e4331","script-editor_lib_index_js":"5f6647119af7c8a94c32"}[chunkId] + ".js";
/******/ };
/******/ })();
/******/
/******/ /* webpack/runtime/global */
/******/ (() => {
/******/ __webpack_require__.g = (function() {
/******/ if (typeof globalThis === 'object') return globalThis;
/******/ try {
/******/ return this || new Function('return this')();
/******/ } catch (e) {
/******/ if (typeof window === 'object') return window;
/******/ }
/******/ })();
/******/ })();
/******/
/******/ /* webpack/runtime/hasOwnProperty shorthand */
/******/ (() => {
/******/ __webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))
/******/ })();
/******/
/******/ /* webpack/runtime/load script */
/******/ (() => {
/******/ var inProgress = {};
/******/ var dataWebpackPrefix = "@elyra/script-debugger-extension:";
/******/ // loadScript function to load a script via script tag
/******/ __webpack_require__.l = (url, done, key, chunkId) => {
/******/ if(inProgress[url]) { inProgress[url].push(done); return; }
/******/ var script, needAttach;
/******/ if(key !== undefined) {
/******/ var scripts = document.getElementsByTagName("script");
/******/ for(var i = 0; i < scripts.length; i++) {
/******/ var s = scripts[i];
/******/ if(s.getAttribute("src") == url || s.getAttribute("data-webpack") == dataWebpackPrefix + key) { script = s; break; }
/******/ }
/******/ }
/******/ if(!script) {
/******/ needAttach = true;
/******/ script = document.createElement('script');
/******/
/******/ script.charset = 'utf-8';
/******/ script.timeout = 120;
/******/ if (__webpack_require__.nc) {
/******/ script.setAttribute("nonce", __webpack_require__.nc);
/******/ }
/******/ script.setAttribute("data-webpack", dataWebpackPrefix + key);
/******/ script.src = url;
/******/ }
/******/ inProgress[url] = [done];
/******/ var onScriptComplete = (prev, event) => {
/******/ // avoid mem leaks in IE.
/******/ script.onerror = script.onload = null;
/******/ clearTimeout(timeout);
/******/ var doneFns = inProgress[url];
/******/ delete inProgress[url];
/******/ script.parentNode && script.parentNode.removeChild(script);
/******/ doneFns && doneFns.forEach((fn) => (fn(event)));
/******/ if(prev) return prev(event);
/******/ };
/******/ var timeout = setTimeout(onScriptComplete.bind(null, undefined, { type: 'timeout', target: script }), 120000);
/******/ script.onerror = onScriptComplete.bind(null, script.onerror);
/******/ script.onload = onScriptComplete.bind(null, script.onload);
/******/ needAttach && document.head.appendChild(script);
/******/ };
/******/ })();
/******/
/******/ /* webpack/runtime/make namespace object */
/******/ (() => {
/******/ // define __esModule on exports
/******/ __webpack_require__.r = (exports) => {
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
/******/ }
/******/ Object.defineProperty(exports, '__esModule', { value: true });
/******/ };
/******/ })();
/******/
/******/ /* webpack/runtime/sharing */
/******/ (() => {
/******/ __webpack_require__.S = {};
/******/ var initPromises = {};
/******/ var initTokens = {};
/******/ __webpack_require__.I = (name, initScope) => {
/******/ if(!initScope) initScope = [];
/******/ // handling circular init calls
/******/ var initToken = initTokens[name];
/******/ if(!initToken) initToken = initTokens[name] = {};
/******/ if(initScope.indexOf(initToken) >= 0) return;
/******/ initScope.push(initToken);
/******/ // only runs once
/******/ if(initPromises[name]) return initPromises[name];
/******/ // creates a new share scope if needed
/******/ if(!__webpack_require__.o(__webpack_require__.S, name)) __webpack_require__.S[name] = {};
/******/ // runs all init snippets from all modules reachable
/******/ var scope = __webpack_require__.S[name];
/******/ var warn = (msg) => (typeof console !== "undefined" && console.warn && console.warn(msg));
/******/ var uniqueName = "@elyra/script-debugger-extension";
/******/ var register = (name, version, factory, eager) => {
/******/ var versions = scope[name] = scope[name] || {};
/******/ var activeVersion = versions[version];
/******/ if(!activeVersion || (!activeVersion.loaded && (!eager != !activeVersion.eager ? eager : uniqueName > activeVersion.from))) versions[version] = { get: factory, from: uniqueName, eager: !!eager };
/******/ };
/******/ var initExternal = (id) => {
/******/ var handleError = (err) => (warn("Initialization of sharing external failed: " + err));
/******/ try {
/******/ var module = __webpack_require__(id);
/******/ if(!module) return;
/******/ var initFn = (module) => (module && module.init && module.init(__webpack_require__.S[name], initScope))
/******/ if(module.then) return promises.push(module.then(initFn, handleError));
/******/ var initResult = initFn(module);
/******/ if(initResult && initResult.then) return promises.push(initResult['catch'](handleError));
/******/ } catch(err) { handleError(err); }
/******/ }
/******/ var promises = [];
/******/ switch(name) {
/******/ case "default": {
/******/ register("@elyra/script-debugger-extension", "3.15.0", () => (Promise.all([__webpack_require__.e("webpack_sharing_consume_default_jupyterlab_fileeditor-webpack_sharing_consume_default_jupyter-5528f8"), __webpack_require__.e("lib_index_js")]).then(() => (() => (__webpack_require__(/*! ./lib/index.js */ "./lib/index.js"))))));
/******/ register("@elyra/script-editor", "3.15.0", () => (Promise.all([__webpack_require__.e("script-editor_lib_index_js"), __webpack_require__.e("webpack_sharing_consume_default_jupyterlab_fileeditor-webpack_sharing_consume_default_jupyter-5528f8")]).then(() => (() => (__webpack_require__(/*! ../script-editor/lib/index.js */ "../script-editor/lib/index.js"))))));
/******/ }
/******/ break;
/******/ }
/******/ if(!promises.length) return initPromises[name] = 1;
/******/ return initPromises[name] = Promise.all(promises).then(() => (initPromises[name] = 1));
/******/ };
/******/ })();
/******/
/******/ /* webpack/runtime/publicPath */
/******/ (() => {
/******/ var scriptUrl;
/******/ if (__webpack_require__.g.importScripts) scriptUrl = __webpack_require__.g.location + "";
/******/ var document = __webpack_require__.g.document;
/******/ if (!scriptUrl && document) {
/******/ if (document.currentScript)
/******/ scriptUrl = document.currentScript.src
/******/ if (!scriptUrl) {
/******/ var scripts = document.getElementsByTagName("script");
/******/ if(scripts.length) scriptUrl = scripts[scripts.length - 1].src
/******/ }
/******/ }
/******/ // When supporting browsers where an automatic publicPath is not supported you must specify an output.publicPath manually via configuration
/******/ // or pass an empty string ("") and set the __webpack_public_path__ variable from your code to use your own logic.
/******/ if (!scriptUrl) throw new Error("Automatic publicPath is not supported in this browser");
/******/ scriptUrl = scriptUrl.replace(/#.*$/, "").replace(/\?.*$/, "").replace(/\/[^\/]+$/, "/");
/******/ __webpack_require__.p = scriptUrl;
/******/ })();
/******/
/******/ /* webpack/runtime/consumes */
/******/ (() => {
/******/ var parseVersion = (str) => {
/******/ // see webpack/lib/util/semver.js for original code
/******/ var p=p=>{return p.split(".").map((p=>{return+p==p?+p:p}))},n=/^([^-+]+)?(?:-([^+]+))?(?:\+(.+))?$/.exec(str),r=n[1]?p(n[1]):[];return n[2]&&(r.length++,r.push.apply(r,p(n[2]))),n[3]&&(r.push([]),r.push.apply(r,p(n[3]))),r;
/******/ }
/******/ var versionLt = (a, b) => {
/******/ // see webpack/lib/util/semver.js for original code
/******/ a=parseVersion(a),b=parseVersion(b);for(var r=0;;){if(r>=a.length)return r<b.length&&"u"!=(typeof b[r])[0];var e=a[r],n=(typeof e)[0];if(r>=b.length)return"u"==n;var t=b[r],f=(typeof t)[0];if(n!=f)return"o"==n&&"n"==f||("s"==f||"u"==n);if("o"!=n&&"u"!=n&&e!=t)return e<t;r++}
/******/ }
/******/ var rangeToString = (range) => {
/******/ // see webpack/lib/util/semver.js for original code
/******/ var r=range[0],n="";if(1===range.length)return"*";if(r+.5){n+=0==r?">=":-1==r?"<":1==r?"^":2==r?"~":r>0?"=":"!=";for(var e=1,a=1;a<range.length;a++){e--,n+="u"==(typeof(t=range[a]))[0]?"-":(e>0?".":"")+(e=2,t)}return n}var g=[];for(a=1;a<range.length;a++){var t=range[a];g.push(0===t?"not("+o()+")":1===t?"("+o()+" || "+o()+")":2===t?g.pop()+" "+g.pop():rangeToString(t))}return o();function o(){return g.pop().replace(/^\((.+)\)$/,"$1")}
/******/ }
/******/ var satisfy = (range, version) => {
/******/ // see webpack/lib/util/semver.js for original code
/******/ if(0 in range){version=parseVersion(version);var e=range[0],r=e<0;r&&(e=-e-1);for(var n=0,i=1,a=!0;;i++,n++){var f,s,g=i<range.length?(typeof range[i])[0]:"";if(n>=version.length||"o"==(s=(typeof(f=version[n]))[0]))return!a||("u"==g?i>e&&!r:""==g!=r);if("u"==s){if(!a||"u"!=g)return!1}else if(a)if(g==s)if(i<=e){if(f!=range[i])return!1}else{if(r?f>range[i]:f<range[i])return!1;f!=range[i]&&(a=!1)}else if("s"!=g&&"n"!=g){if(r||i<=e)return!1;a=!1,i--}else{if(i<=e||s<g!=r)return!1;a=!1}else"s"!=g&&"n"!=g&&(a=!1,i--)}}var t=[],o=t.pop.bind(t);for(n=1;n<range.length;n++){var u=range[n];t.push(1==u?o()|o():2==u?o()&o():u?satisfy(u,version):!o())}return!!o();
/******/ }
/******/ var ensureExistence = (scopeName, key) => {
/******/ var scope = __webpack_require__.S[scopeName];
/******/ if(!scope || !__webpack_require__.o(scope, key)) throw new Error("Shared module " + key + " doesn't exist in shared scope " + scopeName);
/******/ return scope;
/******/ };
/******/ var findVersion = (scope, key) => {
/******/ var versions = scope[key];
/******/ var key = Object.keys(versions).reduce((a, b) => {
/******/ return !a || versionLt(a, b) ? b : a;
/******/ }, 0);
/******/ return key && versions[key]
/******/ };
/******/ var findSingletonVersionKey = (scope, key) => {
/******/ var versions = scope[key];
/******/ return Object.keys(versions).reduce((a, b) => {
/******/ return !a || (!versions[a].loaded && versionLt(a, b)) ? b : a;
/******/ }, 0);
/******/ };
/******/ var getInvalidSingletonVersionMessage = (scope, key, version, requiredVersion) => {
/******/ return "Unsatisfied version " + version + " from " + (version && scope[key][version].from) + " of shared singleton module " + key + " (required " + rangeToString(requiredVersion) + ")"
/******/ };
/******/ var getSingleton = (scope, scopeName, key, requiredVersion) => {
/******/ var version = findSingletonVersionKey(scope, key);
/******/ return get(scope[key][version]);
/******/ };
/******/ var getSingletonVersion = (scope, scopeName, key, requiredVersion) => {
/******/ var version = findSingletonVersionKey(scope, key);
/******/ if (!satisfy(requiredVersion, version)) typeof console !== "undefined" && console.warn && console.warn(getInvalidSingletonVersionMessage(scope, key, version, requiredVersion));
/******/ return get(scope[key][version]);
/******/ };
/******/ var getStrictSingletonVersion = (scope, scopeName, key, requiredVersion) => {
/******/ var version = findSingletonVersionKey(scope, key);
/******/ if (!satisfy(requiredVersion, version)) throw new Error(getInvalidSingletonVersionMessage(scope, key, version, requiredVersion));
/******/ return get(scope[key][version]);
/******/ };
/******/ var findValidVersion = (scope, key, requiredVersion) => {
/******/ var versions = scope[key];
/******/ var key = Object.keys(versions).reduce((a, b) => {
/******/ if (!satisfy(requiredVersion, b)) return a;
/******/ return !a || versionLt(a, b) ? b : a;
/******/ }, 0);
/******/ return key && versions[key]
/******/ };
/******/ var getInvalidVersionMessage = (scope, scopeName, key, requiredVersion) => {
/******/ var versions = scope[key];
/******/ return "No satisfying version (" + rangeToString(requiredVersion) + ") of shared module " + key + " found in shared scope " + scopeName + ".\n" +
/******/ "Available versions: " + Object.keys(versions).map((key) => {
/******/ return key + " from " + versions[key].from;
/******/ }).join(", ");
/******/ };
/******/ var getValidVersion = (scope, scopeName, key, requiredVersion) => {
/******/ var entry = findValidVersion(scope, key, requiredVersion);
/******/ if(entry) return get(entry);
/******/ throw new Error(getInvalidVersionMessage(scope, scopeName, key, requiredVersion));
/******/ };
/******/ var warnInvalidVersion = (scope, scopeName, key, requiredVersion) => {
/******/ typeof console !== "undefined" && console.warn && console.warn(getInvalidVersionMessage(scope, scopeName, key, requiredVersion));
/******/ };
/******/ var get = (entry) => {
/******/ entry.loaded = 1;
/******/ return entry.get()
/******/ };
/******/ var init = (fn) => (function(scopeName, a, b, c) {
/******/ var promise = __webpack_require__.I(scopeName);
/******/ if (promise && promise.then) return promise.then(fn.bind(fn, scopeName, __webpack_require__.S[scopeName], a, b, c));
/******/ return fn(scopeName, __webpack_require__.S[scopeName], a, b, c);
/******/ });
/******/
/******/ var load = /*#__PURE__*/ init((scopeName, scope, key) => {
/******/ ensureExistence(scopeName, key);
/******/ return get(findVersion(scope, key));
/******/ });
/******/ var loadFallback = /*#__PURE__*/ init((scopeName, scope, key, fallback) => {
/******/ return scope && __webpack_require__.o(scope, key) ? get(findVersion(scope, key)) : fallback();
/******/ });
/******/ var loadVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {
/******/ ensureExistence(scopeName, key);
/******/ return get(findValidVersion(scope, key, version) || warnInvalidVersion(scope, scopeName, key, version) || findVersion(scope, key));
/******/ });
/******/ var loadSingleton = /*#__PURE__*/ init((scopeName, scope, key) => {
/******/ ensureExistence(scopeName, key);
/******/ return getSingleton(scope, scopeName, key);
/******/ });
/******/ var loadSingletonVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {
/******/ ensureExistence(scopeName, key);
/******/ return getSingletonVersion(scope, scopeName, key, version);
/******/ });
/******/ var loadStrictVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {
/******/ ensureExistence(scopeName, key);
/******/ return getValidVersion(scope, scopeName, key, version);
/******/ });
/******/ var loadStrictSingletonVersionCheck = /*#__PURE__*/ init((scopeName, scope, key, version) => {
/******/ ensureExistence(scopeName, key);
/******/ return getStrictSingletonVersion(scope, scopeName, key, version);
/******/ });
/******/ var loadVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {
/******/ if(!scope || !__webpack_require__.o(scope, key)) return fallback();
/******/ return get(findValidVersion(scope, key, version) || warnInvalidVersion(scope, scopeName, key, version) || findVersion(scope, key));
/******/ });
/******/ var loadSingletonFallback = /*#__PURE__*/ init((scopeName, scope, key, fallback) => {
/******/ if(!scope || !__webpack_require__.o(scope, key)) return fallback();
/******/ return getSingleton(scope, scopeName, key);
/******/ });
/******/ var loadSingletonVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {
/******/ if(!scope || !__webpack_require__.o(scope, key)) return fallback();
/******/ return getSingletonVersion(scope, scopeName, key, version);
/******/ });
/******/ var loadStrictVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {
/******/ var entry = scope && __webpack_require__.o(scope, key) && findValidVersion(scope, key, version);
/******/ return entry ? get(entry) : fallback();
/******/ });
/******/ var loadStrictSingletonVersionCheckFallback = /*#__PURE__*/ init((scopeName, scope, key, version, fallback) => {
/******/ if(!scope || !__webpack_require__.o(scope, key)) return fallback();
/******/ return getStrictSingletonVersion(scope, scopeName, key, version);
/******/ });
/******/ var installedModules = {};
/******/ var moduleToHandlerMapping = {
/******/ "webpack/sharing/consume/default/@jupyterlab/services": () => (loadSingletonVersionCheck("default", "@jupyterlab/services", [1,6,5,2])),
/******/ "webpack/sharing/consume/default/@jupyterlab/fileeditor": () => (loadSingletonVersionCheck("default", "@jupyterlab/fileeditor", [1,3,5,2])),
/******/ "webpack/sharing/consume/default/@elyra/script-editor/@elyra/script-editor": () => (loadStrictVersionCheckFallback("default", "@elyra/script-editor", [4,3,15,0], () => (__webpack_require__.e("script-editor_lib_index_js").then(() => (() => (__webpack_require__(/*! @elyra/script-editor */ "../script-editor/lib/index.js"))))))),
/******/ "webpack/sharing/consume/default/@jupyterlab/application": () => (loadSingletonVersionCheck("default", "@jupyterlab/application", [1,3,5,2])),
/******/ "webpack/sharing/consume/default/@jupyterlab/debugger": () => (loadSingletonVersionCheck("default", "@jupyterlab/debugger", [1,3,5,2])),
/******/ "webpack/sharing/consume/default/@jupyterlab/apputils": () => (loadSingletonVersionCheck("default", "@jupyterlab/apputils", [1,3,5,2])),
/******/ "webpack/sharing/consume/default/react": () => (loadSingletonVersionCheck("default", "react", [1,17,0,1])),
/******/ "webpack/sharing/consume/default/@jupyterlab/docregistry": () => (loadVersionCheck("default", "@jupyterlab/docregistry", [1,3,5,2])),
/******/ "webpack/sharing/consume/default/@jupyterlab/logconsole": () => (loadSingletonVersionCheck("default", "@jupyterlab/logconsole", [1,3,5,2])),
/******/ "webpack/sharing/consume/default/@jupyterlab/outputarea": () => (loadVersionCheck("default", "@jupyterlab/outputarea", [1,3,5,2])),
/******/ "webpack/sharing/consume/default/@jupyterlab/rendermime": () => (loadSingletonVersionCheck("default", "@jupyterlab/rendermime", [1,3,5,2])),
/******/ "webpack/sharing/consume/default/@jupyterlab/ui-components": () => (loadSingletonVersionCheck("default", "@jupyterlab/ui-components", [1,3,5,2])),
/******/ "webpack/sharing/consume/default/@lumino/signaling": () => (loadSingletonVersionCheck("default", "@lumino/signaling", [1,1,10,0])),
/******/ "webpack/sharing/consume/default/@lumino/widgets": () => (loadSingletonVersionCheck("default", "@lumino/widgets", [1,1,33,0]))
/******/ };
/******/ // no consumes in initial chunks
/******/ var chunkMapping = {
/******/ "webpack_sharing_consume_default_jupyterlab_fileeditor-webpack_sharing_consume_default_jupyter-5528f8": [
/******/ "webpack/sharing/consume/default/@jupyterlab/services",
/******/ "webpack/sharing/consume/default/@jupyterlab/fileeditor"
/******/ ],
/******/ "lib_index_js": [
/******/ "webpack/sharing/consume/default/@elyra/script-editor/@elyra/script-editor",
/******/ "webpack/sharing/consume/default/@jupyterlab/application",
/******/ "webpack/sharing/consume/default/@jupyterlab/debugger"
/******/ ],
/******/ "script-editor_lib_index_js": [
/******/ "webpack/sharing/consume/default/@jupyterlab/apputils",
/******/ "webpack/sharing/consume/default/react",
/******/ "webpack/sharing/consume/default/@jupyterlab/docregistry",
/******/ "webpack/sharing/consume/default/@jupyterlab/logconsole",
/******/ "webpack/sharing/consume/default/@jupyterlab/outputarea",
/******/ "webpack/sharing/consume/default/@jupyterlab/rendermime",
/******/ "webpack/sharing/consume/default/@jupyterlab/ui-components",
/******/ "webpack/sharing/consume/default/@lumino/signaling",
/******/ "webpack/sharing/consume/default/@lumino/widgets"
/******/ ]
/******/ };
/******/ __webpack_require__.f.consumes = (chunkId, promises) => {
/******/ if(__webpack_require__.o(chunkMapping, chunkId)) {
/******/ chunkMapping[chunkId].forEach((id) => {
/******/ if(__webpack_require__.o(installedModules, id)) return promises.push(installedModules[id]);
/******/ var onFactory = (factory) => {
/******/ installedModules[id] = 0;
/******/ __webpack_require__.m[id] = (module) => {
/******/ delete __webpack_require__.c[id];
/******/ module.exports = factory();
/******/ }
/******/ };
/******/ var onError = (error) => {
/******/ delete installedModules[id];
/******/ __webpack_require__.m[id] = (module) => {
/******/ delete __webpack_require__.c[id];
/******/ throw error;
/******/ }
/******/ };
/******/ try {
/******/ var promise = moduleToHandlerMapping[id]();
/******/ if(promise.then) {
/******/ promises.push(installedModules[id] = promise.then(onFactory)['catch'](onError));
/******/ } else onFactory(promise);
/******/ } catch(e) { onError(e); }
/******/ });
/******/ }
/******/ }
/******/ })();
/******/
/******/ /* webpack/runtime/jsonp chunk loading */
/******/ (() => {
/******/ // no baseURI
/******/
/******/ // object to store loaded and loading chunks
/******/ // undefined = chunk not loaded, null = chunk preloaded/prefetched
/******/ // [resolve, reject, Promise] = chunk loading, 0 = chunk loaded
/******/ var installedChunks = {
/******/ "@elyra/script-debugger-extension": 0
/******/ };
/******/
/******/ __webpack_require__.f.j = (chunkId, promises) => {
/******/ // JSONP chunk loading for javascript
/******/ var installedChunkData = __webpack_require__.o(installedChunks, chunkId) ? installedChunks[chunkId] : undefined;
/******/ if(installedChunkData !== 0) { // 0 means "already installed".
/******/
/******/ // a Promise means "currently loading".
/******/ if(installedChunkData) {
/******/ promises.push(installedChunkData[2]);
/******/ } else {
/******/ if("webpack_sharing_consume_default_jupyterlab_fileeditor-webpack_sharing_consume_default_jupyter-5528f8" != chunkId) {
/******/ // setup Promise in chunk cache
/******/ var promise = new Promise((resolve, reject) => (installedChunkData = installedChunks[chunkId] = [resolve, reject]));
/******/ promises.push(installedChunkData[2] = promise);
/******/
/******/ // start chunk loading
/******/ var url = __webpack_require__.p + __webpack_require__.u(chunkId);
/******/ // create error before stack unwound to get useful stacktrace later
/******/ var error = new Error();
/******/ var loadingEnded = (event) => {
/******/ if(__webpack_require__.o(installedChunks, chunkId)) {
/******/ installedChunkData = installedChunks[chunkId];
/******/ if(installedChunkData !== 0) installedChunks[chunkId] = undefined;
/******/ if(installedChunkData) {
/******/ var errorType = event && (event.type === 'load' ? 'missing' : event.type);
/******/ var realSrc = event && event.target && event.target.src;
/******/ error.message = 'Loading chunk ' + chunkId + ' failed.\n(' + errorType + ': ' + realSrc + ')';
/******/ error.name = 'ChunkLoadError';
/******/ error.type = errorType;
/******/ error.request = realSrc;
/******/ installedChunkData[1](error);
/******/ }
/******/ }
/******/ };
/******/ __webpack_require__.l(url, loadingEnded, "chunk-" + chunkId, chunkId);
/******/ } else installedChunks[chunkId] = 0;
/******/ }
/******/ }
/******/ };
/******/
/******/ // no prefetching
/******/
/******/ // no preloaded
/******/
/******/ // no HMR
/******/
/******/ // no HMR manifest
/******/
/******/ // no on chunks loaded
/******/
/******/ // install a JSONP callback for chunk loading
/******/ var webpackJsonpCallback = (parentChunkLoadingFunction, data) => {
/******/ var [chunkIds, moreModules, runtime] = data;
/******/ // add "moreModules" to the modules object,
/******/ // then flag all "chunkIds" as loaded and fire callback
/******/ var moduleId, chunkId, i = 0;
/******/ if(chunkIds.some((id) => (installedChunks[id] !== 0))) {
/******/ for(moduleId in moreModules) {
/******/ if(__webpack_require__.o(moreModules, moduleId)) {
/******/ __webpack_require__.m[moduleId] = moreModules[moduleId];
/******/ }
/******/ }
/******/ if(runtime) var result = runtime(__webpack_require__);
/******/ }
/******/ if(parentChunkLoadingFunction) parentChunkLoadingFunction(data);
/******/ for(;i < chunkIds.length; i++) {
/******/ chunkId = chunkIds[i];
/******/ if(__webpack_require__.o(installedChunks, chunkId) && installedChunks[chunkId]) {
/******/ installedChunks[chunkId][0]();
/******/ }
/******/ installedChunks[chunkId] = 0;
/******/ }
/******/
/******/ }
/******/
/******/ var chunkLoadingGlobal = self["webpackChunk_elyra_script_debugger_extension"] = self["webpackChunk_elyra_script_debugger_extension"] || [];
/******/ chunkLoadingGlobal.forEach(webpackJsonpCallback.bind(null, 0));
/******/ chunkLoadingGlobal.push = webpackJsonpCallback.bind(null, chunkLoadingGlobal.push.bind(chunkLoadingGlobal));
/******/ })();
/******/
/******/ /* webpack/runtime/nonce */
/******/ (() => {
/******/ __webpack_require__.nc = undefined;
/******/ })();
/******/
/************************************************************************/
/******/
/******/ // module cache are used so entry inlining is disabled
/******/ // startup
/******/ // Load entry module and return exports
/******/ var __webpack_exports__ = __webpack_require__("webpack/container/entry/@elyra/script-debugger-extension");
/******/ (_JUPYTERLAB = typeof _JUPYTERLAB === "undefined" ? {} : _JUPYTERLAB)["@elyra/script-debugger-extension"] = __webpack_exports__;
/******/
/******/ })()
;
//# sourceMappingURL=remoteEntry.c3b9ed19d9943f6d8db7.js.map | PypiClean |
/jupyter_admin-0.0.2.tar.gz/jupyter_admin-0.0.2/jupyter_admin/labextension/static/vendors-node_modules_primer_react_lib-esm_TreeView_TreeView_js-node_modules_primer_react_lib--75b6cc.f37e1d649cb354894b98.js | "use strict";
(self["webpackChunk_datalayer_datalayer"] = self["webpackChunk_datalayer_datalayer"] || []).push([["vendors-node_modules_primer_react_lib-esm_TreeView_TreeView_js-node_modules_primer_react_lib--75b6cc"],{
/***/ "../../node_modules/@primer/react/lib-esm/TreeView/TreeView.js":
/*!*********************************************************************!*\
!*** ../../node_modules/@primer/react/lib-esm/TreeView/TreeView.js ***!
\*********************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "TreeView": () => (/* binding */ TreeView)
/* harmony export */ });
/* harmony import */ var _primer_octicons_react__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! @primer/octicons-react */ "webpack/sharing/consume/default/@primer/octicons-react/@primer/octicons-react?372a");
/* harmony import */ var _primer_octicons_react__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(_primer_octicons_react__WEBPACK_IMPORTED_MODULE_0__);
/* harmony import */ var _react_aria_ssr__WEBPACK_IMPORTED_MODULE_10__ = __webpack_require__(/*! @react-aria/ssr */ "../../node_modules/@react-aria/ssr/dist/module.js");
/* harmony import */ var classnames__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! classnames */ "../../node_modules/classnames/index.js");
/* harmony import */ var classnames__WEBPACK_IMPORTED_MODULE_1___default = /*#__PURE__*/__webpack_require__.n(classnames__WEBPACK_IMPORTED_MODULE_1__);
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! react */ "webpack/sharing/consume/default/react");
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_2___default = /*#__PURE__*/__webpack_require__.n(react__WEBPACK_IMPORTED_MODULE_2__);
/* harmony import */ var styled_components__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! styled-components */ "webpack/sharing/consume/default/styled-components/styled-components");
/* harmony import */ var styled_components__WEBPACK_IMPORTED_MODULE_3___default = /*#__PURE__*/__webpack_require__.n(styled_components__WEBPACK_IMPORTED_MODULE_3__);
/* harmony import */ var _constants_js__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! ../constants.js */ "../../node_modules/@primer/react/lib-esm/constants.js");
/* harmony import */ var _Dialog_ConfirmationDialog_js__WEBPACK_IMPORTED_MODULE_16__ = __webpack_require__(/*! ../Dialog/ConfirmationDialog.js */ "../../node_modules/@primer/react/lib-esm/Dialog/ConfirmationDialog.js");
/* harmony import */ var _hooks_useControllableState_js__WEBPACK_IMPORTED_MODULE_11__ = __webpack_require__(/*! ../hooks/useControllableState.js */ "../../node_modules/@primer/react/lib-esm/hooks/useControllableState.js");
/* harmony import */ var _hooks_useSafeTimeout_js__WEBPACK_IMPORTED_MODULE_12__ = __webpack_require__(/*! ../hooks/useSafeTimeout.js */ "../../node_modules/@primer/react/lib-esm/hooks/useSafeTimeout.js");
/* harmony import */ var _Spinner_js__WEBPACK_IMPORTED_MODULE_14__ = __webpack_require__(/*! ../Spinner.js */ "../../node_modules/@primer/react/lib-esm/Spinner.js");
/* harmony import */ var _sx_js__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! ../sx.js */ "../../node_modules/@primer/react/lib-esm/sx.js");
/* harmony import */ var _Text_js__WEBPACK_IMPORTED_MODULE_15__ = __webpack_require__(/*! ../Text.js */ "../../node_modules/@primer/react/lib-esm/Text.js");
/* harmony import */ var _utils_create_slots_js__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(/*! ../utils/create-slots.js */ "../../node_modules/@primer/react/lib-esm/utils/create-slots.js");
/* harmony import */ var _VisuallyHidden_js__WEBPACK_IMPORTED_MODULE_8__ = __webpack_require__(/*! ../_VisuallyHidden.js */ "../../node_modules/@primer/react/lib-esm/_VisuallyHidden.js");
/* harmony import */ var _shared_js__WEBPACK_IMPORTED_MODULE_13__ = __webpack_require__(/*! ./shared.js */ "../../node_modules/@primer/react/lib-esm/TreeView/shared.js");
/* harmony import */ var _useRovingTabIndex_js__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(/*! ./useRovingTabIndex.js */ "../../node_modules/@primer/react/lib-esm/TreeView/useRovingTabIndex.js");
/* harmony import */ var _useTypeahead_js__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! ./useTypeahead.js */ "../../node_modules/@primer/react/lib-esm/TreeView/useTypeahead.js");
// ----------------------------------------------------------------------------
// Context
const RootContext = /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createContext({
announceUpdate: () => {},
expandedStateCache: {
current: new Map()
}
});
const ItemContext = /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createContext({
itemId: '',
level: 1,
isSubTreeEmpty: false,
setIsSubTreeEmpty: () => {},
isExpanded: false,
setIsExpanded: () => {},
leadingVisualId: '',
trailingVisualId: ''
});
// ----------------------------------------------------------------------------
// TreeView
const UlBox = styled_components__WEBPACK_IMPORTED_MODULE_3___default().ul.withConfig({
displayName: "TreeView__UlBox",
componentId: "sc-4ex6b6-0"
})(["list-style:none;padding:0;margin:0;.PRIVATE_TreeView-item{outline:none;&:focus-visible > div{box-shadow:inset 0 0 0 2px ", ";@media (forced-colors:active){outline:2px solid HighlightText;outline-offset:-2;}}}.PRIVATE_TreeView-item-container{--level:1;--toggle-width:1rem;position:relative;display:grid;grid-template-columns:calc(calc(var(--level) - 1) * (var(--toggle-width) / 2)) var(--toggle-width) 1fr;grid-template-areas:'spacer toggle content';width:100%;min-height:2rem;font-size:", ";color:", ";border-radius:", ";cursor:pointer;&:hover{background-color:", ";@media (forced-colors:active){outline:2px solid transparent;outline-offset:-2px;}}@media (pointer:coarse){--toggle-width:1.5rem;min-height:2.75rem;}&:has(.PRIVATE_TreeView-item-skeleton):hover{background-color:transparent;cursor:default;@media (forced-colors:active){outline:none;}}}.PRIVATE_TreeView-item[aria-current='true'] > .PRIVATE_TreeView-item-container{background-color:", ";&::after{content:'';position:absolute;top:calc(50% - 0.75rem);left:-", ";width:0.25rem;height:1.5rem;background-color:", ";border-radius:", ";@media (forced-colors:active){background-color:HighlightText;}}}.PRIVATE_TreeView-item-toggle{grid-area:toggle;display:flex;align-items:center;justify-content:center;height:100%;color:", ";}.PRIVATE_TreeView-item-toggle--hover:hover{background-color:", ";}.PRIVATE_TreeView-item-toggle--end{border-top-left-radius:", ";border-bottom-left-radius:", ";}.PRIVATE_TreeView-item-content{grid-area:content;display:flex;align-items:center;height:100%;padding:0 ", ";gap:", ";}.PRIVATE_TreeView-item-content-text{flex:1 1 auto;width:0;overflow:hidden;white-space:nowrap;text-overflow:ellipsis;}.PRIVATE_TreeView-item-visual{display:flex;color:", ";}.PRIVATE_TreeView-item-level-line{width:100%;height:100%;border-right:1px solid;border-color:", ";}@media (hover:hover){.PRIVATE_TreeView-item-level-line{border-color:transparent;}&:hover .PRIVATE_TreeView-item-level-line,&:focus-within .PRIVATE_TreeView-item-level-line{border-color:", ";}}.PRIVATE_TreeView-directory-icon{display:grid;color:", ";}.PRIVATE_VisuallyHidden{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border-width:0;}", ""], (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)(`colors.accent.fg`), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('fontSizes.1'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('colors.fg.default'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('radii.2'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('colors.actionListItem.default.hoverBg'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('colors.actionListItem.default.selectedBg'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('space.2'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('colors.accent.fg'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('radii.2'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('colors.fg.muted'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('colors.treeViewItem.chevron.hoverBg'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('radii.2'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('radii.2'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('space.2'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('space.2'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('colors.fg.muted'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('colors.border.subtle'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('colors.border.subtle'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('colors.treeViewItem.directory.fill'), _sx_js__WEBPACK_IMPORTED_MODULE_5__["default"]);
const Root = ({
'aria-label': ariaLabel,
'aria-labelledby': ariaLabelledby,
children
}) => {
const containerRef = react__WEBPACK_IMPORTED_MODULE_2___default().useRef(null);
const [ariaLiveMessage, setAriaLiveMessage] = react__WEBPACK_IMPORTED_MODULE_2___default().useState('');
const announceUpdate = react__WEBPACK_IMPORTED_MODULE_2___default().useCallback(message => {
setAriaLiveMessage(message);
}, []);
(0,_useRovingTabIndex_js__WEBPACK_IMPORTED_MODULE_6__.useRovingTabIndex)({
containerRef
});
(0,_useTypeahead_js__WEBPACK_IMPORTED_MODULE_7__.useTypeahead)({
containerRef,
onFocusChange: element => {
if (element instanceof HTMLElement) {
element.focus();
}
}
});
const expandedStateCache = react__WEBPACK_IMPORTED_MODULE_2___default().useRef(null);
if (expandedStateCache.current === null) {
expandedStateCache.current = new Map();
}
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(RootContext.Provider, {
value: {
announceUpdate,
expandedStateCache
}
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement((react__WEBPACK_IMPORTED_MODULE_2___default().Fragment), null, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(_VisuallyHidden_js__WEBPACK_IMPORTED_MODULE_8__["default"], {
role: "status",
"aria-live": "polite",
"aria-atomic": "true"
}, ariaLiveMessage), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(UlBox, {
ref: containerRef,
role: "tree",
"aria-label": ariaLabel,
"aria-labelledby": ariaLabelledby
}, children)));
};
Root.displayName = "Root";
Root.displayName = 'TreeView';
// ----------------------------------------------------------------------------
// TreeView.Item
const {
Slots,
Slot
} = (0,_utils_create_slots_js__WEBPACK_IMPORTED_MODULE_9__["default"])(['LeadingVisual', 'TrailingVisual']);
const Item = /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().forwardRef(({
id: itemId,
current: isCurrentItem = false,
defaultExpanded,
expanded,
onExpandedChange,
onSelect,
children
}, ref) => {
const {
expandedStateCache
} = react__WEBPACK_IMPORTED_MODULE_2___default().useContext(RootContext);
const labelId = (0,_react_aria_ssr__WEBPACK_IMPORTED_MODULE_10__.useSSRSafeId)();
const leadingVisualId = (0,_react_aria_ssr__WEBPACK_IMPORTED_MODULE_10__.useSSRSafeId)();
const trailingVisualId = (0,_react_aria_ssr__WEBPACK_IMPORTED_MODULE_10__.useSSRSafeId)();
const [isExpanded, setIsExpanded] = (0,_hooks_useControllableState_js__WEBPACK_IMPORTED_MODULE_11__.useControllableState)({
name: itemId,
// If the item was previously mounted, it's expanded state might be cached.
// We check the cache first, and then fall back to the defaultExpanded prop.
// If defaultExpanded is not provided, we default to false unless the item
// is the current item, in which case we default to true.
defaultValue: () => {
var _ref, _expandedStateCache$c, _expandedStateCache$c2;
return (_ref = (_expandedStateCache$c = (_expandedStateCache$c2 = expandedStateCache.current) === null || _expandedStateCache$c2 === void 0 ? void 0 : _expandedStateCache$c2.get(itemId)) !== null && _expandedStateCache$c !== void 0 ? _expandedStateCache$c : defaultExpanded) !== null && _ref !== void 0 ? _ref : isCurrentItem;
},
value: expanded,
onChange: onExpandedChange
});
const {
level
} = react__WEBPACK_IMPORTED_MODULE_2___default().useContext(ItemContext);
const {
hasSubTree,
subTree,
childrenWithoutSubTree
} = useSubTree(children);
const [isSubTreeEmpty, setIsSubTreeEmpty] = react__WEBPACK_IMPORTED_MODULE_2___default().useState(!hasSubTree);
// Set the expanded state and cache it
const setIsExpandedWithCache = react__WEBPACK_IMPORTED_MODULE_2___default().useCallback(newIsExpanded => {
var _expandedStateCache$c3;
setIsExpanded(newIsExpanded);
(_expandedStateCache$c3 = expandedStateCache.current) === null || _expandedStateCache$c3 === void 0 ? void 0 : _expandedStateCache$c3.set(itemId, newIsExpanded);
}, [itemId, setIsExpanded, expandedStateCache]);
// Expand or collapse the subtree
const toggle = react__WEBPACK_IMPORTED_MODULE_2___default().useCallback(event => {
setIsExpandedWithCache(!isExpanded);
event === null || event === void 0 ? void 0 : event.stopPropagation();
}, [isExpanded, setIsExpandedWithCache]);
const handleKeyDown = react__WEBPACK_IMPORTED_MODULE_2___default().useCallback(event => {
switch (event.key) {
case 'Enter':
if (onSelect) {
onSelect(event);
} else {
toggle(event);
}
break;
case 'ArrowRight':
event.preventDefault();
event.stopPropagation();
setIsExpandedWithCache(true);
break;
case 'ArrowLeft':
event.preventDefault();
event.stopPropagation();
setIsExpandedWithCache(false);
break;
}
}, [onSelect, setIsExpandedWithCache, toggle]);
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(ItemContext.Provider, {
value: {
itemId,
level: level + 1,
isSubTreeEmpty,
setIsSubTreeEmpty,
isExpanded,
setIsExpanded: setIsExpandedWithCache,
leadingVisualId,
trailingVisualId
}
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("li", {
className: "PRIVATE_TreeView-item",
ref: ref,
tabIndex: 0,
id: itemId,
role: "treeitem",
"aria-labelledby": labelId,
"aria-describedby": `${leadingVisualId} ${trailingVisualId}`,
"aria-level": level,
"aria-expanded": isSubTreeEmpty ? undefined : isExpanded,
"aria-current": isCurrentItem ? 'true' : undefined,
onKeyDown: handleKeyDown,
onFocus: event => {
var _event$currentTarget$;
// Scroll the first child into view when the item receives focus
(_event$currentTarget$ = event.currentTarget.firstElementChild) === null || _event$currentTarget$ === void 0 ? void 0 : _event$currentTarget$.scrollIntoView({
block: 'nearest',
inline: 'nearest'
});
// Prevent focus event from bubbling up to parent items
event.stopPropagation();
}
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
className: "PRIVATE_TreeView-item-container",
style: {
// @ts-ignore CSS custom property
'--level': level
},
onClick: event => {
if (onSelect) {
onSelect(event);
} else {
toggle(event);
}
}
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
style: {
gridArea: 'spacer',
display: 'flex'
}
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(LevelIndicatorLines, {
level: level
})), hasSubTree ?
/*#__PURE__*/
// eslint-disable-next-line jsx-a11y/click-events-have-key-events, jsx-a11y/no-static-element-interactions
react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
className: classnames__WEBPACK_IMPORTED_MODULE_1___default()('PRIVATE_TreeView-item-toggle', onSelect && 'PRIVATE_TreeView-item-toggle--hover', level === 1 && 'PRIVATE_TreeView-item-toggle--end'),
onClick: event => {
if (onSelect) {
toggle(event);
}
}
}, isExpanded ? /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(_primer_octicons_react__WEBPACK_IMPORTED_MODULE_0__.ChevronDownIcon, {
size: 12
}) : /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(_primer_octicons_react__WEBPACK_IMPORTED_MODULE_0__.ChevronRightIcon, {
size: 12
})) : null, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
id: labelId,
className: "PRIVATE_TreeView-item-content"
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(Slots, null, slots => /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement((react__WEBPACK_IMPORTED_MODULE_2___default().Fragment), null, slots.LeadingVisual, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("span", {
className: "PRIVATE_TreeView-item-content-text"
}, childrenWithoutSubTree), slots.TrailingVisual)))), subTree));
});
/** Lines to indicate the depth of an item in a TreeView */
const LevelIndicatorLines = ({
level
}) => {
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
style: {
width: '100%',
display: 'flex'
}
}, Array.from({
length: level - 1
}).map((_, index) => /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
key: index,
className: "PRIVATE_TreeView-item-level-line"
})));
};
LevelIndicatorLines.displayName = "LevelIndicatorLines";
Item.displayName = 'TreeView.Item';
// ----------------------------------------------------------------------------
// TreeView.SubTree
const SubTree = ({
count,
state,
children
}) => {
const {
announceUpdate
} = react__WEBPACK_IMPORTED_MODULE_2___default().useContext(RootContext);
const {
itemId,
isExpanded,
isSubTreeEmpty,
setIsSubTreeEmpty
} = react__WEBPACK_IMPORTED_MODULE_2___default().useContext(ItemContext);
const [isLoadingItemVisible, setIsLoadingItemVisible] = react__WEBPACK_IMPORTED_MODULE_2___default().useState(false);
const {
safeSetTimeout
} = (0,_hooks_useSafeTimeout_js__WEBPACK_IMPORTED_MODULE_12__["default"])();
const loadingItemRef = react__WEBPACK_IMPORTED_MODULE_2___default().useRef(null);
const ref = react__WEBPACK_IMPORTED_MODULE_2___default().useRef(null);
react__WEBPACK_IMPORTED_MODULE_2___default().useEffect(() => {
// If `state` is undefined, we're working in a synchronous context and need
// to detect if the sub-tree has content. If `state === 'done` then we're
// working in an asynchronous context and need to see if there is content
// that has been loaded in.
if (state === undefined || state === 'done') {
if (!isSubTreeEmpty && !children) {
setIsSubTreeEmpty(true);
} else if (isSubTreeEmpty && children) {
setIsSubTreeEmpty(false);
}
}
}, [state, isSubTreeEmpty, setIsSubTreeEmpty, children]);
// Announce when content has loaded
react__WEBPACK_IMPORTED_MODULE_2___default().useEffect(() => {
if (state === 'done') {
const parentItem = document.getElementById(itemId);
if (!parentItem) return;
const {
current: node
} = ref;
const parentName = (0,_shared_js__WEBPACK_IMPORTED_MODULE_13__.getAccessibleName)(parentItem);
safeSetTimeout(() => {
if (node && node.childElementCount > 0) {
announceUpdate(`${parentName} content loaded`);
} else {
announceUpdate(`${parentName} is empty`);
}
});
}
}, [state, itemId, announceUpdate, safeSetTimeout]);
// Manage loading indicator state
react__WEBPACK_IMPORTED_MODULE_2___default().useEffect(() => {
// If we're in the loading state, but not showing the loading indicator yet,
// show the loading indicator
if (state === 'loading' && !isLoadingItemVisible) {
setIsLoadingItemVisible(true);
}
// If we're not in the loading state, but we're still showing a loading indicator,
// hide the loading indicator and move focus if necessary
if (state !== 'loading' && isLoadingItemVisible) {
const isLoadingItemFocused = document.activeElement === loadingItemRef.current;
setIsLoadingItemVisible(false);
if (isLoadingItemFocused) {
safeSetTimeout(() => {
const parentElement = document.getElementById(itemId);
if (!parentElement) return;
const firstChild = (0,_useRovingTabIndex_js__WEBPACK_IMPORTED_MODULE_6__.getFirstChildElement)(parentElement);
if (firstChild) {
firstChild.focus();
} else {
parentElement.focus();
}
});
}
}
}, [state, safeSetTimeout, isLoadingItemVisible, itemId]);
if (!isExpanded) {
return null;
}
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("ul", {
role: "group",
style: {
listStyle: 'none',
padding: 0,
margin: 0
}
// @ts-ignore Box doesn't have type support for `ref` used in combination with `as`
,
ref: ref
}, isLoadingItemVisible ? /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(LoadingItem, {
ref: loadingItemRef,
count: count
}) : children);
};
SubTree.displayName = "SubTree";
SubTree.displayName = 'TreeView.SubTree';
const shimmer = (0,styled_components__WEBPACK_IMPORTED_MODULE_3__.keyframes)(["from{mask-position:200%;}to{mask-position:0%;}"]);
const SkeletonItem = styled_components__WEBPACK_IMPORTED_MODULE_3___default().span.attrs({
className: 'PRIVATE_TreeView-item-skeleton'
}).withConfig({
displayName: "TreeView__SkeletonItem",
componentId: "sc-4ex6b6-1"
})(["display:flex;align-items:center;column-gap:0.5rem;height:2rem;@media (pointer:coarse){height:2.75rem;}@media (prefers-reduced-motion:no-preference){mask-image:linear-gradient(75deg,#000 30%,rgba(0,0,0,0.65) 80%);mask-size:200%;animation:", ";animation-duration:1s;animation-iteration-count:infinite;}&::before{content:'';display:block;width:1rem;height:1rem;background-color:", ";border-radius:3px;@media (forced-colors:active){outline:1px solid transparent;outline-offset:-1px;}}&::after{content:'';display:block;width:var(--tree-item-loading-width,67%);height:1rem;background-color:", ";border-radius:3px;@media (forced-colors:active){outline:1px solid transparent;outline-offset:-1px;}}&:nth-of-type(5n + 1){--tree-item-loading-width:67%;}&:nth-of-type(5n + 2){--tree-item-loading-width:47%;}&:nth-of-type(5n + 3){--tree-item-loading-width:73%;}&:nth-of-type(5n + 4){--tree-item-loading-width:64%;}&:nth-of-type(5n + 5){--tree-item-loading-width:50%;}"], shimmer, (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('colors.neutral.subtle'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_4__.get)('colors.neutral.subtle'));
const LoadingItem = /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().forwardRef(({
count
}, ref) => {
const itemId = (0,_react_aria_ssr__WEBPACK_IMPORTED_MODULE_10__.useSSRSafeId)();
if (count) {
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(Item, {
id: itemId,
ref: ref
}, Array.from({
length: count
}).map((_, i) => {
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(SkeletonItem, {
"aria-hidden": true,
key: i
});
}), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
className: "PRIVATE_VisuallyHidden"
}, "Loading ", count, " items"));
}
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(Item, {
id: itemId,
ref: ref
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(LeadingVisual, null, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(_Spinner_js__WEBPACK_IMPORTED_MODULE_14__["default"], {
size: "small"
})), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(_Text_js__WEBPACK_IMPORTED_MODULE_15__["default"], {
sx: {
color: 'fg.muted'
}
}, "Loading..."));
});
function useSubTree(children) {
return react__WEBPACK_IMPORTED_MODULE_2___default().useMemo(() => {
const subTree = react__WEBPACK_IMPORTED_MODULE_2___default().Children.toArray(children).find(child => /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().isValidElement(child) && child.type === SubTree);
const childrenWithoutSubTree = react__WEBPACK_IMPORTED_MODULE_2___default().Children.toArray(children).filter(child => !( /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().isValidElement(child) && child.type === SubTree));
return {
subTree,
childrenWithoutSubTree,
hasSubTree: Boolean(subTree)
};
}, [children]);
}
// ----------------------------------------------------------------------------
// TreeView.LeadingVisual and TreeView.TrailingVisual
const LeadingVisual = props => {
const {
isExpanded,
leadingVisualId
} = react__WEBPACK_IMPORTED_MODULE_2___default().useContext(ItemContext);
const children = typeof props.children === 'function' ? props.children({
isExpanded
}) : props.children;
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(Slot, {
name: "LeadingVisual"
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
className: "PRIVATE_VisuallyHidden",
"aria-hidden": true,
id: leadingVisualId
}, props.label), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
className: "PRIVATE_TreeView-item-visual",
"aria-hidden": true
}, children));
};
LeadingVisual.displayName = "LeadingVisual";
LeadingVisual.displayName = 'TreeView.LeadingVisual';
const TrailingVisual = props => {
const {
isExpanded,
trailingVisualId
} = react__WEBPACK_IMPORTED_MODULE_2___default().useContext(ItemContext);
const children = typeof props.children === 'function' ? props.children({
isExpanded
}) : props.children;
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(Slot, {
name: "TrailingVisual"
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
className: "PRIVATE_VisuallyHidden",
"aria-hidden": true,
id: trailingVisualId
}, props.label), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
className: "PRIVATE_TreeView-item-visual",
"aria-hidden": true
}, children));
};
TrailingVisual.displayName = "TrailingVisual";
TrailingVisual.displayName = 'TreeView.TrailingVisual';
// ----------------------------------------------------------------------------
// TreeView.DirectoryIcon
const DirectoryIcon = () => {
const {
isExpanded
} = react__WEBPACK_IMPORTED_MODULE_2___default().useContext(ItemContext);
const Icon = isExpanded ? _primer_octicons_react__WEBPACK_IMPORTED_MODULE_0__.FileDirectoryOpenFillIcon : _primer_octicons_react__WEBPACK_IMPORTED_MODULE_0__.FileDirectoryFillIcon;
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
className: "PRIVATE_TreeView-directory-icon"
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(Icon, null));
};
DirectoryIcon.displayName = "DirectoryIcon";
const ErrorDialog = ({
title = 'Error',
children,
onRetry,
onDismiss
}) => {
const {
itemId,
setIsExpanded
} = react__WEBPACK_IMPORTED_MODULE_2___default().useContext(ItemContext);
return (
/*#__PURE__*/
// eslint-disable-next-line jsx-a11y/no-static-element-interactions
react__WEBPACK_IMPORTED_MODULE_2___default().createElement("div", {
onKeyDown: event => {
if (['Backspace', 'ArrowLeft', 'ArrowRight', 'ArrowUp', 'ArrowDown', 'Enter'].includes(event.key)) {
// Prevent keyboard events from bubbling up to the TreeView
// and interfering with keyboard navigation
event.stopPropagation();
}
}
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_2___default().createElement(_Dialog_ConfirmationDialog_js__WEBPACK_IMPORTED_MODULE_16__.ConfirmationDialog, {
title: title,
onClose: gesture => {
// Focus parent item after the dialog is closed
setTimeout(() => {
const parentElement = document.getElementById(itemId);
parentElement === null || parentElement === void 0 ? void 0 : parentElement.focus();
});
if (gesture === 'confirm') {
onRetry === null || onRetry === void 0 ? void 0 : onRetry();
} else {
setIsExpanded(false);
onDismiss === null || onDismiss === void 0 ? void 0 : onDismiss();
}
},
confirmButtonContent: "Retry",
cancelButtonContent: "Dismiss"
}, children))
);
};
ErrorDialog.displayName = "ErrorDialog";
ErrorDialog.displayName = 'TreeView.ErrorDialog';
// ----------------------------------------------------------------------------
// Export
const TreeView = Object.assign(Root, {
Item,
SubTree,
LeadingVisual,
TrailingVisual,
DirectoryIcon,
ErrorDialog
});
/***/ }),
/***/ "../../node_modules/@primer/react/lib-esm/TreeView/shared.js":
/*!*******************************************************************!*\
!*** ../../node_modules/@primer/react/lib-esm/TreeView/shared.js ***!
\*******************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "getAccessibleName": () => (/* binding */ getAccessibleName)
/* harmony export */ });
/**
* Returns the accessible name of an element
*/
function getAccessibleName(element) {
var _document$getElementB, _document$getElementB2, _element$textContent;
const label = element.getAttribute('aria-label');
const labelledby = element.getAttribute('aria-labelledby');
if (label) return label;
if (labelledby) return (_document$getElementB = (_document$getElementB2 = document.getElementById(labelledby)) === null || _document$getElementB2 === void 0 ? void 0 : _document$getElementB2.textContent) !== null && _document$getElementB !== void 0 ? _document$getElementB : '';
return (_element$textContent = element.textContent) !== null && _element$textContent !== void 0 ? _element$textContent : '';
}
/***/ }),
/***/ "../../node_modules/@primer/react/lib-esm/TreeView/useRovingTabIndex.js":
/*!******************************************************************************!*\
!*** ../../node_modules/@primer/react/lib-esm/TreeView/useRovingTabIndex.js ***!
\******************************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "getElementState": () => (/* binding */ getElementState),
/* harmony export */ "getFirstChildElement": () => (/* binding */ getFirstChildElement),
/* harmony export */ "getFirstElement": () => (/* binding */ getFirstElement),
/* harmony export */ "getLastElement": () => (/* binding */ getLastElement),
/* harmony export */ "getNextFocusableElement": () => (/* binding */ getNextFocusableElement),
/* harmony export */ "getParentElement": () => (/* binding */ getParentElement),
/* harmony export */ "getVisibleElement": () => (/* binding */ getVisibleElement),
/* harmony export */ "useRovingTabIndex": () => (/* binding */ useRovingTabIndex)
/* harmony export */ });
/* harmony import */ var _hooks_useFocusZone_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ../hooks/useFocusZone.js */ "../../node_modules/@primer/react/lib-esm/hooks/useFocusZone.js");
/* harmony import */ var _utils_scroll_js__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ../utils/scroll.js */ "../../node_modules/@primer/react/lib-esm/utils/scroll.js");
/* harmony import */ var _primer_behaviors__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! @primer/behaviors */ "../../node_modules/@primer/behaviors/dist/esm/focus-zone.js");
function useRovingTabIndex({
containerRef
}) {
// TODO: Initialize focus to the aria-current item if it exists
(0,_hooks_useFocusZone_js__WEBPACK_IMPORTED_MODULE_0__.useFocusZone)({
containerRef,
bindKeys: _primer_behaviors__WEBPACK_IMPORTED_MODULE_1__.FocusKeys.ArrowVertical | _primer_behaviors__WEBPACK_IMPORTED_MODULE_1__.FocusKeys.ArrowHorizontal | _primer_behaviors__WEBPACK_IMPORTED_MODULE_1__.FocusKeys.HomeAndEnd | _primer_behaviors__WEBPACK_IMPORTED_MODULE_1__.FocusKeys.Backspace | _primer_behaviors__WEBPACK_IMPORTED_MODULE_1__.FocusKeys.PageUpDown,
preventScroll: true,
getNextFocusable: (direction, from, event) => {
var _getNextFocusableElem;
if (!(from instanceof HTMLElement)) return;
return (_getNextFocusableElem = getNextFocusableElement(from, event)) !== null && _getNextFocusableElem !== void 0 ? _getNextFocusableElem : from;
}
});
}
// DOM utilities used for focus management
function getNextFocusableElement(activeElement, event) {
const elementState = getElementState(activeElement);
// Reference: https://www.w3.org/WAI/ARIA/apg/patterns/treeview/#keyboard-interaction-24
switch (`${elementState} ${event.key}`) {
case 'open ArrowRight':
// Focus first child node
return getFirstChildElement(activeElement);
case 'open ArrowLeft':
// Close node; don't change focus
return;
case 'closed ArrowRight':
// Open node; don't change focus
return;
case 'closed ArrowLeft':
// Focus parent element
return getParentElement(activeElement);
case 'end ArrowRight':
// Do nothing
return;
case 'end ArrowLeft':
// Focus parent element
return getParentElement(activeElement);
}
// ArrowUp, ArrowDown, Home, and End behavior are the same regarless of element state
switch (event.key) {
case 'ArrowUp':
// Focus previous visible element
return getVisibleElement(activeElement, 'previous');
case 'ArrowDown':
// Focus next visible element
return getVisibleElement(activeElement, 'next');
case 'Backspace':
return getParentElement(activeElement);
case 'Home':
// Focus first visible element
return getFirstElement(activeElement);
case 'End':
// Focus last visible element
return getLastElement(activeElement);
case 'PageUp':
return getPreviousPageElement(activeElement);
case 'PageDown':
return getNextPageElement(activeElement);
}
}
function getElementState(element) {
if (element.getAttribute('role') !== 'treeitem') {
throw new Error('Element is not a treeitem');
}
switch (element.getAttribute('aria-expanded')) {
case 'true':
return 'open';
case 'false':
return 'closed';
default:
return 'end';
}
}
function getVisibleElement(element, direction) {
const root = element.closest('[role=tree]');
if (!root) return;
const walker = document.createTreeWalker(root, NodeFilter.SHOW_ELEMENT, node => {
if (!(node instanceof HTMLElement)) return NodeFilter.FILTER_SKIP;
return node.getAttribute('role') === 'treeitem' ? NodeFilter.FILTER_ACCEPT : NodeFilter.FILTER_SKIP;
});
let current = walker.firstChild();
while (current !== element) {
current = walker.nextNode();
}
let next = direction === 'next' ? walker.nextNode() : walker.previousNode();
// If next element is nested inside a collapsed subtree, continue iterating
while (next instanceof HTMLElement && (_next$parentElement = next.parentElement) !== null && _next$parentElement !== void 0 && _next$parentElement.closest('[role=treeitem][aria-expanded=false]')) {
var _next$parentElement;
next = direction === 'next' ? walker.nextNode() : walker.previousNode();
}
return next instanceof HTMLElement ? next : undefined;
}
function getFirstChildElement(element) {
const firstChild = element.querySelector('[role=treeitem]');
return firstChild instanceof HTMLElement ? firstChild : undefined;
}
function getParentElement(element) {
const group = element.closest('[role=group]');
const parent = group === null || group === void 0 ? void 0 : group.closest('[role=treeitem]');
return parent instanceof HTMLElement ? parent : undefined;
}
function getFirstElement(element) {
const root = element.closest('[role=tree]');
const first = root === null || root === void 0 ? void 0 : root.querySelector('[role=treeitem]');
return first instanceof HTMLElement ? first : undefined;
}
function getLastElement(element) {
const root = element.closest('[role=tree]');
const items = Array.from((root === null || root === void 0 ? void 0 : root.querySelectorAll('[role=treeitem]')) || []);
// If there are no items, return undefined
if (items.length === 0) return;
let index = items.length - 1;
let last = items[index];
// If last element is nested inside a collapsed subtree, continue iterating
while (index > 0 && last instanceof HTMLElement && (_last$parentElement = last.parentElement) !== null && _last$parentElement !== void 0 && _last$parentElement.closest('[role=treeitem][aria-expanded=false]')) {
var _last$parentElement;
index -= 1;
last = items[index];
}
return last instanceof HTMLElement ? last : undefined;
}
const defaultSize = {
height: 32
};
/**
* Determine the page size for the given tree based on an item in the tree. We
* estimate this size by trying to see how many items will fit in the given
* tree. If the tree is within a scroll container, we will use the height of
* that container. Otherwise, we'll use the current window height
*/
function getPageSize(root, item) {
var _item$getBoundingClie, _scrollContainer$clie;
const scrollContainer = (0,_utils_scroll_js__WEBPACK_IMPORTED_MODULE_2__.getScrollContainer)(root);
const {
height: itemHeight
} = (_item$getBoundingClie = item === null || item === void 0 ? void 0 : item.getBoundingClientRect()) !== null && _item$getBoundingClie !== void 0 ? _item$getBoundingClie : defaultSize;
const availableHeight = (_scrollContainer$clie = scrollContainer === null || scrollContainer === void 0 ? void 0 : scrollContainer.clientHeight) !== null && _scrollContainer$clie !== void 0 ? _scrollContainer$clie : window.innerHeight;
return Math.floor(availableHeight / itemHeight);
}
function getNextPageElement(element) {
const root = element.closest('[role="tree"]');
if (!root) {
return;
}
const items = Array.from(root.querySelectorAll('[role="treeitem"]'));
if (items.length === 0) {
return;
}
const itemLabel = items[0].firstElementChild;
const pageSize = getPageSize(root, itemLabel);
const page = Math.floor(items.indexOf(element) / pageSize);
const offset = items.indexOf(element) - pageSize * page;
return items[Math.min(items.length - 1, (page + 1) * pageSize + offset)];
}
function getPreviousPageElement(element) {
const root = element.closest('[role="tree"]');
if (!root) {
return;
}
const items = Array.from(root.querySelectorAll('[role="treeitem"]'));
if (items.length === 0) {
return;
}
const itemLabel = items[0].firstElementChild;
const pageSize = getPageSize(root, itemLabel);
const page = Math.floor(items.indexOf(element) / pageSize);
const offset = items.indexOf(element) - pageSize * page;
return items[Math.max(0, (page - 1) * pageSize + offset)];
}
/***/ }),
/***/ "../../node_modules/@primer/react/lib-esm/TreeView/useTypeahead.js":
/*!*************************************************************************!*\
!*** ../../node_modules/@primer/react/lib-esm/TreeView/useTypeahead.js ***!
\*************************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "useTypeahead": () => (/* binding */ useTypeahead)
/* harmony export */ });
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react */ "webpack/sharing/consume/default/react");
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(react__WEBPACK_IMPORTED_MODULE_0__);
/* harmony import */ var _hooks_useSafeTimeout_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../hooks/useSafeTimeout.js */ "../../node_modules/@primer/react/lib-esm/hooks/useSafeTimeout.js");
/* harmony import */ var _shared_js__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./shared.js */ "../../node_modules/@primer/react/lib-esm/TreeView/shared.js");
function useTypeahead({
containerRef,
onFocusChange
}) {
const [searchValue, setSearchValue] = react__WEBPACK_IMPORTED_MODULE_0___default().useState('');
const timeoutRef = react__WEBPACK_IMPORTED_MODULE_0___default().useRef(0);
const onFocusChangeRef = react__WEBPACK_IMPORTED_MODULE_0___default().useRef(onFocusChange);
const {
safeSetTimeout,
safeClearTimeout
} = (0,_hooks_useSafeTimeout_js__WEBPACK_IMPORTED_MODULE_1__["default"])();
// Update the ref when the callback changes
react__WEBPACK_IMPORTED_MODULE_0___default().useEffect(() => {
onFocusChangeRef.current = onFocusChange;
}, [onFocusChange]);
// Update the search value when the user types
react__WEBPACK_IMPORTED_MODULE_0___default().useEffect(() => {
if (!containerRef.current) return;
const container = containerRef.current;
function onKeyDown(event) {
// Ignore key presses that don't produce a character value
if (!event.key || event.key.length > 1) return;
// Ignore key presses that occur with a modifier
if (event.ctrlKey || event.altKey || event.metaKey) return;
// Update the existing search value with the new key press
setSearchValue(value => value + event.key);
// Reset the timeout
safeClearTimeout(timeoutRef.current);
timeoutRef.current = safeSetTimeout(() => setSearchValue(''), 300);
// Prevent default behavior
event.preventDefault();
event.stopPropagation();
}
container.addEventListener('keydown', onKeyDown);
return () => container.removeEventListener('keydown', onKeyDown);
}, [containerRef, safeClearTimeout, safeSetTimeout]);
// Update focus when the search value changes
react__WEBPACK_IMPORTED_MODULE_0___default().useEffect(() => {
// Don't change focus if the search value is empty
if (!searchValue) return;
if (!containerRef.current) return;
const container = containerRef.current;
// Get focusable elements
const elements = Array.from(container.querySelectorAll('[role="treeitem"]'))
// Filter out collapsed items
.filter(element => {
var _element$parentElemen;
return !((_element$parentElemen = element.parentElement) !== null && _element$parentElemen !== void 0 && _element$parentElemen.closest('[role=treeitem][aria-expanded=false]'));
});
// Get the index of active element
const activeIndex = elements.findIndex(element => element === document.activeElement);
// Wrap the array elements such that the active descendant is at the beginning
let sortedElements = wrapArray(elements, activeIndex);
// Remove the active descendant from the beginning of the array
// when the user initiates a new search
if (searchValue.length === 1) {
sortedElements = sortedElements.slice(1);
}
// Find the first element that matches the search value
const nextElement = sortedElements.find(element => {
const name = (0,_shared_js__WEBPACK_IMPORTED_MODULE_2__.getAccessibleName)(element).toLowerCase();
return name.startsWith(searchValue.toLowerCase());
});
// If a match is found, focus it
if (nextElement) {
onFocusChangeRef.current(nextElement);
}
}, [searchValue, containerRef]);
}
/**
* Wraps an array around itself at a given start index
*
* @example
* wrapArray(['a', 'b', 'c', 'd'], 2) // ['c', 'd', 'a', 'b']
*/
function wrapArray(array, startIndex) {
return array.map((_, index) => array[(startIndex + index) % array.length]);
}
/***/ }),
/***/ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/LoadingCounter.js":
/*!********************************************************************************!*\
!*** ../../node_modules/@primer/react/lib-esm/UnderlineNav2/LoadingCounter.js ***!
\********************************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "LoadingCounter": () => (/* binding */ LoadingCounter)
/* harmony export */ });
/* harmony import */ var styled_components__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! styled-components */ "webpack/sharing/consume/default/styled-components/styled-components");
/* harmony import */ var styled_components__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(styled_components__WEBPACK_IMPORTED_MODULE_0__);
/* harmony import */ var _constants_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ../constants.js */ "../../node_modules/@primer/react/lib-esm/constants.js");
const loading = (0,styled_components__WEBPACK_IMPORTED_MODULE_0__.keyframes)(["from{opacity:1;}to{opacity:0.2;}"]);
const LoadingCounter = styled_components__WEBPACK_IMPORTED_MODULE_0___default().span.withConfig({
displayName: "LoadingCounter",
componentId: "sc-ouonic-0"
})(["animation:", " 1.2s ease-in-out infinite alternate;background-color:", ";border-color:", ";width:1.5rem;height:1rem;display:inline-block;border-radius:20px;"], loading, (0,_constants_js__WEBPACK_IMPORTED_MODULE_1__.get)('colors.neutral.muted'), (0,_constants_js__WEBPACK_IMPORTED_MODULE_1__.get)('colors.border.default'));
/***/ }),
/***/ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/UnderlineNav.js":
/*!******************************************************************************!*\
!*** ../../node_modules/@primer/react/lib-esm/UnderlineNav2/UnderlineNav.js ***!
\******************************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "UnderlineNav": () => (/* binding */ UnderlineNav)
/* harmony export */ });
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react */ "webpack/sharing/consume/default/react");
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(react__WEBPACK_IMPORTED_MODULE_0__);
/* harmony import */ var _Box_js__WEBPACK_IMPORTED_MODULE_13__ = __webpack_require__(/*! ../Box.js */ "../../node_modules/@primer/react/lib-esm/Box.js");
/* harmony import */ var _sx_js__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! ../sx.js */ "../../node_modules/@primer/react/lib-esm/sx.js");
/* harmony import */ var _UnderlineNavContext_js__WEBPACK_IMPORTED_MODULE_11__ = __webpack_require__(/*! ./UnderlineNavContext.js */ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/UnderlineNavContext.js");
/* harmony import */ var _hooks_useResizeObserver_js__WEBPACK_IMPORTED_MODULE_8__ = __webpack_require__(/*! ../hooks/useResizeObserver.js */ "../../node_modules/@primer/react/lib-esm/hooks/useResizeObserver.js");
/* harmony import */ var _CounterLabel_js__WEBPACK_IMPORTED_MODULE_17__ = __webpack_require__(/*! ../CounterLabel.js */ "../../node_modules/@primer/react/lib-esm/CounterLabel.js");
/* harmony import */ var _ThemeProvider_js__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! ../ThemeProvider.js */ "../../node_modules/@primer/react/lib-esm/ThemeProvider.js");
/* harmony import */ var _VisuallyHidden_js__WEBPACK_IMPORTED_MODULE_12__ = __webpack_require__(/*! ../_VisuallyHidden.js */ "../../node_modules/@primer/react/lib-esm/_VisuallyHidden.js");
/* harmony import */ var _styles_js__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! ./styles.js */ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/styles.js");
/* harmony import */ var styled_components__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! styled-components */ "webpack/sharing/consume/default/styled-components/styled-components");
/* harmony import */ var styled_components__WEBPACK_IMPORTED_MODULE_1___default = /*#__PURE__*/__webpack_require__.n(styled_components__WEBPACK_IMPORTED_MODULE_1__);
/* harmony import */ var _LoadingCounter_js__WEBPACK_IMPORTED_MODULE_16__ = __webpack_require__(/*! ./LoadingCounter.js */ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/LoadingCounter.js");
/* harmony import */ var _Button_index_js__WEBPACK_IMPORTED_MODULE_14__ = __webpack_require__(/*! ../Button/index.js */ "../../node_modules/@primer/react/lib-esm/Button/index.js");
/* harmony import */ var _primer_octicons_react__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! @primer/octicons-react */ "webpack/sharing/consume/default/@primer/octicons-react/@primer/octicons-react?372a");
/* harmony import */ var _primer_octicons_react__WEBPACK_IMPORTED_MODULE_2___default = /*#__PURE__*/__webpack_require__.n(_primer_octicons_react__WEBPACK_IMPORTED_MODULE_2__);
/* harmony import */ var _hooks_useOnEscapePress_js__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(/*! ../hooks/useOnEscapePress.js */ "../../node_modules/@primer/react/lib-esm/hooks/useOnEscapePress.js");
/* harmony import */ var _hooks_useOnOutsideClick_js__WEBPACK_IMPORTED_MODULE_10__ = __webpack_require__(/*! ../hooks/useOnOutsideClick.js */ "../../node_modules/@primer/react/lib-esm/hooks/useOnOutsideClick.js");
/* harmony import */ var _ActionList_index_js__WEBPACK_IMPORTED_MODULE_15__ = __webpack_require__(/*! ../ActionList/index.js */ "../../node_modules/@primer/react/lib-esm/ActionList/index.js");
/* harmony import */ var _react_aria_ssr__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(/*! @react-aria/ssr */ "../../node_modules/@react-aria/ssr/dist/module.js");
/* harmony import */ var deepmerge__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! deepmerge */ "../../node_modules/deepmerge/dist/cjs.js");
/* harmony import */ var deepmerge__WEBPACK_IMPORTED_MODULE_3___default = /*#__PURE__*/__webpack_require__.n(deepmerge__WEBPACK_IMPORTED_MODULE_3__);
function _extends() { _extends = Object.assign ? Object.assign.bind() : function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); }
// When page is loaded, we don't have ref for the more button as it is not on the DOM yet.
// However, we need to calculate number of possible items when the more button present as well. So using the width of the more button as a constant.
const MORE_BTN_WIDTH = 86;
// Needed this because passing a ref using HTMLULListElement to `Box` causes a type error
const NavigationList = styled_components__WEBPACK_IMPORTED_MODULE_1___default().ul.withConfig({
displayName: "UnderlineNav__NavigationList",
componentId: "sc-3wwkh2-0"
})(["", ";"], _sx_js__WEBPACK_IMPORTED_MODULE_4__["default"]);
const MoreMenuListItem = styled_components__WEBPACK_IMPORTED_MODULE_1___default().li.withConfig({
displayName: "UnderlineNav__MoreMenuListItem",
componentId: "sc-3wwkh2-1"
})(["display:flex;"]);
const overflowEffect = (navWidth, moreMenuWidth, childArray, childWidthArray, noIconChildWidthArray, updateListAndMenu) => {
let iconsVisible = true;
if (childWidthArray.length === 0) {
updateListAndMenu({
items: childArray,
actions: []
}, iconsVisible);
}
const numberOfItemsPossible = calculatePossibleItems(childWidthArray, navWidth);
const numberOfItemsWithoutIconPossible = calculatePossibleItems(noIconChildWidthArray, navWidth);
// We need to take more menu width into account when calculating the number of items possible
const numberOfItemsPossibleWithMoreMenu = calculatePossibleItems(noIconChildWidthArray, navWidth, moreMenuWidth || MORE_BTN_WIDTH);
const items = [];
const actions = [];
// First, we check if we can fit all the items with their icons
if (childArray.length <= numberOfItemsPossible) {
items.push(...childArray);
} else if (childArray.length <= numberOfItemsWithoutIconPossible) {
// if we can't fit all the items with their icons, we check if we can fit all the items without their icons
iconsVisible = false;
items.push(...childArray);
} else {
// if we can't fit all the items without their icons, we keep the icons hidden and show the ones that doesn't fit into the list in the overflow menu
iconsVisible = false;
/* Below is an accessibiility requirement. Never show only one item in the overflow menu.
* If there is only one item left to display in the overflow menu according to the calculation,
* we need to pull another item from the list into the overflow menu.
*/
const numberOfItemsInMenu = childArray.length - numberOfItemsPossibleWithMoreMenu;
const numberOfListItems = numberOfItemsInMenu === 1 ? numberOfItemsPossibleWithMoreMenu - 1 : numberOfItemsPossibleWithMoreMenu;
for (const [index, child] of childArray.entries()) {
if (index < numberOfListItems) {
items.push(child);
} else {
const ariaCurrent = child.props['aria-current'];
const isCurrent = Boolean(ariaCurrent) && ariaCurrent !== 'false';
// We need to make sure to keep the selected item always visible.
if (isCurrent) {
// If selected item couldn't make in to the list, we swap it with the last item in the list.
const indexToReplaceAt = numberOfListItems - 1; // because we are replacing the last item in the list
// splice method modifies the array by removing 1 item here at the given index and replace it with the "child" element then returns the removed item.
const propsectiveAction = items.splice(indexToReplaceAt, 1, child)[0];
actions.push(propsectiveAction);
} else {
actions.push(child);
}
}
}
}
updateListAndMenu({
items,
actions
}, iconsVisible);
};
const getValidChildren = children => {
return react__WEBPACK_IMPORTED_MODULE_0___default().Children.toArray(children).filter(child => /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().isValidElement(child));
};
const calculatePossibleItems = (childWidthArray, navWidth, moreMenuWidth = 0) => {
const widthToFit = navWidth - moreMenuWidth;
let breakpoint = childWidthArray.length - 1;
let sumsOfChildWidth = 0;
for (const [index, childWidth] of childWidthArray.entries()) {
if (sumsOfChildWidth > widthToFit) {
breakpoint = index - 1;
break;
} else {
// The the gap between items into account when calculating the number of items possible
sumsOfChildWidth = sumsOfChildWidth + childWidth.width + _styles_js__WEBPACK_IMPORTED_MODULE_5__.GAP;
}
}
return breakpoint;
};
const UnderlineNav = /*#__PURE__*/(0,react__WEBPACK_IMPORTED_MODULE_0__.forwardRef)(({
as = 'nav',
align,
'aria-label': ariaLabel,
sx: sxProp = {},
afterSelect,
variant = 'default',
loadingCounters = false,
children
}, forwardedRef) => {
const backupRef = (0,react__WEBPACK_IMPORTED_MODULE_0__.useRef)(null);
const navRef = forwardedRef !== null && forwardedRef !== void 0 ? forwardedRef : backupRef;
const listRef = (0,react__WEBPACK_IMPORTED_MODULE_0__.useRef)(null);
const moreMenuRef = (0,react__WEBPACK_IMPORTED_MODULE_0__.useRef)(null);
const moreMenuBtnRef = (0,react__WEBPACK_IMPORTED_MODULE_0__.useRef)(null);
const containerRef = react__WEBPACK_IMPORTED_MODULE_0___default().useRef(null);
const disclosureWidgetId = (0,_react_aria_ssr__WEBPACK_IMPORTED_MODULE_6__.useSSRSafeId)();
const {
theme
} = (0,_ThemeProvider_js__WEBPACK_IMPORTED_MODULE_7__.useTheme)();
function getItemsWidth(itemText) {
var _noIconChildWidthArra;
return ((_noIconChildWidthArra = noIconChildWidthArray.find(item => item.text === itemText)) === null || _noIconChildWidthArra === void 0 ? void 0 : _noIconChildWidthArra.width) || 0;
}
const swapMenuItemWithListItem = (prospectiveListItem, indexOfProspectiveListItem, event, callback) => {
var _listRef$current;
// get the selected menu item's width
const widthToFitIntoList = getItemsWidth(prospectiveListItem.props.children);
// Check if there is any empty space on the right side of the list
const availableSpace = navRef.current.getBoundingClientRect().width - (((_listRef$current = listRef.current) === null || _listRef$current === void 0 ? void 0 : _listRef$current.getBoundingClientRect().width) || 0);
// Calculate how many items need to be pulled in to the menu to make room for the selected menu item
// I.e. if we need to pull 2 items in (index 0 and index 1), breakpoint (index) will return 1.
const index = getBreakpointForItemSwapping(widthToFitIntoList, availableSpace);
const indexToSliceAt = responsiveProps.items.length - 1 - index;
// Form the new list of items
const itemsLeftInList = [...responsiveProps.items].slice(0, indexToSliceAt);
const updatedItemList = [...itemsLeftInList, prospectiveListItem];
// Form the new menu items
const itemsToAddToMenu = [...responsiveProps.items].slice(indexToSliceAt);
const updatedMenuItems = [...actions];
// Add itemsToAddToMenu array's items to the menu at the index of the prospectiveListItem and remove 1 count of items (prospectiveListItem)
updatedMenuItems.splice(indexOfProspectiveListItem, 1, ...itemsToAddToMenu);
setSelectedLinkText(prospectiveListItem.props.children);
callback({
items: updatedItemList,
actions: updatedMenuItems
}, false);
};
// How many items do we need to pull in to the menu to make room for the selected menu item.
function getBreakpointForItemSwapping(widthToFitIntoList, availableSpace) {
let widthToSwap = 0;
let breakpoint = 0;
for (const [index, item] of [...responsiveProps.items].reverse().entries()) {
widthToSwap += getItemsWidth(item.props.children);
if (widthToFitIntoList < widthToSwap + availableSpace) {
breakpoint = index;
break;
}
}
return breakpoint;
}
const [selectedLink, setSelectedLink] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(undefined);
// selectedLinkText is needed to be able set the selected menu item as selectedLink.
// This is needed because setSelectedLink only accepts ref but at the time of setting selected menu item as selectedLink, its ref as a list item is not available
const [selectedLinkText, setSelectedLinkText] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)('');
// Capture the mouse/keyboard event when a menu item is selected so that we can use it to fire the onSelect callback after the menu item is swapped with the list item
const [selectEvent, setSelectEvent] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(null);
const [iconsVisible, setIconsVisible] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(true);
const afterSelectHandler = event => {
if (!event.defaultPrevented) {
if (typeof afterSelect === 'function') afterSelect(event);
closeOverlay();
}
};
const [responsiveProps, setResponsiveProps] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)({
items: getValidChildren(children),
actions: []
});
/*
* This is needed to make sure responsiveProps.items and ResponsiveProps.actions are updated when children are changed
* Particually when an item is selected. It adds 'aria-current="page"' attribute to the child and we need to make sure
* responsiveProps.items and ResponsiveProps.actions are updated with that attribute
*/
(0,react__WEBPACK_IMPORTED_MODULE_0__.useEffect)(() => {
const childArray = getValidChildren(children);
const updatedItems = responsiveProps.items.map(item => {
return childArray.find(child => child.key === item.key) || item;
});
const updatedActions = responsiveProps.actions.map(action => {
return childArray.find(child => child.key === action.key) || action;
});
setResponsiveProps({
items: updatedItems,
actions: updatedActions
});
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [children]);
const updateListAndMenu = (0,react__WEBPACK_IMPORTED_MODULE_0__.useCallback)((props, displayIcons) => {
setResponsiveProps(props);
setIconsVisible(displayIcons);
}, []);
const actions = responsiveProps.actions;
const [childWidthArray, setChildWidthArray] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)([]);
const setChildrenWidth = (0,react__WEBPACK_IMPORTED_MODULE_0__.useCallback)(size => {
setChildWidthArray(arr => {
const newArr = [...arr, size];
return newArr;
});
}, []);
const [noIconChildWidthArray, setNoIconChildWidthArray] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)([]);
const setNoIconChildrenWidth = (0,react__WEBPACK_IMPORTED_MODULE_0__.useCallback)(size => {
setNoIconChildWidthArray(arr => {
const newArr = [...arr, size];
return newArr;
});
}, []);
(0,_hooks_useResizeObserver_js__WEBPACK_IMPORTED_MODULE_8__.useResizeObserver)(resizeObserverEntries => {
var _moreMenuRef$current$, _moreMenuRef$current;
const childArray = getValidChildren(children);
const navWidth = resizeObserverEntries[0].contentRect.width;
const moreMenuWidth = (_moreMenuRef$current$ = (_moreMenuRef$current = moreMenuRef.current) === null || _moreMenuRef$current === void 0 ? void 0 : _moreMenuRef$current.getBoundingClientRect().width) !== null && _moreMenuRef$current$ !== void 0 ? _moreMenuRef$current$ : 0;
navWidth !== 0 && overflowEffect(navWidth, moreMenuWidth, childArray, childWidthArray, noIconChildWidthArray, updateListAndMenu);
}, navRef);
if (!ariaLabel) {
// eslint-disable-next-line no-console
console.warn('Use the `aria-label` prop to provide an accessible label for assistive technology');
}
const [isWidgetOpen, setIsWidgetOpen] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(false);
const closeOverlay = react__WEBPACK_IMPORTED_MODULE_0___default().useCallback(() => {
setIsWidgetOpen(false);
}, [setIsWidgetOpen]);
const focusOnMoreMenuBtn = react__WEBPACK_IMPORTED_MODULE_0___default().useCallback(() => {
var _moreMenuBtnRef$curre;
(_moreMenuBtnRef$curre = moreMenuBtnRef.current) === null || _moreMenuBtnRef$curre === void 0 ? void 0 : _moreMenuBtnRef$curre.focus();
}, []);
(0,_hooks_useOnEscapePress_js__WEBPACK_IMPORTED_MODULE_9__.useOnEscapePress)(event => {
if (isWidgetOpen) {
event.preventDefault();
closeOverlay();
focusOnMoreMenuBtn();
}
}, [isWidgetOpen]);
(0,_hooks_useOnOutsideClick_js__WEBPACK_IMPORTED_MODULE_10__.useOnOutsideClick)({
onClickOutside: closeOverlay,
containerRef,
ignoreClickRefs: [moreMenuBtnRef]
});
const onAnchorClick = (0,react__WEBPACK_IMPORTED_MODULE_0__.useCallback)(event => {
if (event.defaultPrevented || event.button !== 0) {
return;
}
setIsWidgetOpen(isWidgetOpen => !isWidgetOpen);
}, []);
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_UnderlineNavContext_js__WEBPACK_IMPORTED_MODULE_11__.UnderlineNavContext.Provider, {
value: {
theme,
setChildrenWidth,
setNoIconChildrenWidth,
selectedLink,
setSelectedLink,
selectedLinkText,
setSelectedLinkText,
selectEvent,
afterSelect: afterSelectHandler,
variant,
loadingCounters,
iconsVisible
}
}, ariaLabel && /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_VisuallyHidden_js__WEBPACK_IMPORTED_MODULE_12__["default"], {
as: "h2"
}, `${ariaLabel} navigation`), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_13__["default"], {
as: as,
sx: deepmerge__WEBPACK_IMPORTED_MODULE_3___default()((0,_styles_js__WEBPACK_IMPORTED_MODULE_5__.getNavStyles)(theme, {
align
}), sxProp),
"aria-label": ariaLabel,
ref: navRef
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(NavigationList, {
sx: _styles_js__WEBPACK_IMPORTED_MODULE_5__.ulStyles,
ref: listRef,
role: "list"
}, responsiveProps.items, actions.length > 0 && /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(MoreMenuListItem, {
ref: moreMenuRef
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_13__["default"], {
sx: (0,_styles_js__WEBPACK_IMPORTED_MODULE_5__.getDividerStyle)(theme)
}), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Button_index_js__WEBPACK_IMPORTED_MODULE_14__.Button, {
ref: moreMenuBtnRef,
sx: _styles_js__WEBPACK_IMPORTED_MODULE_5__.moreBtnStyles,
"aria-controls": disclosureWidgetId,
"aria-expanded": isWidgetOpen,
onClick: onAnchorClick,
trailingIcon: _primer_octicons_react__WEBPACK_IMPORTED_MODULE_2__.TriangleDownIcon
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_13__["default"], {
as: "span"
}, "More", /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_VisuallyHidden_js__WEBPACK_IMPORTED_MODULE_12__["default"], {
as: "span"
}, "\xA0", `${ariaLabel} items`))), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_ActionList_index_js__WEBPACK_IMPORTED_MODULE_15__.ActionList, {
selectionVariant: "single",
ref: containerRef,
id: disclosureWidgetId,
sx: _styles_js__WEBPACK_IMPORTED_MODULE_5__.menuStyles,
style: {
display: isWidgetOpen ? 'block' : 'none'
}
}, actions.map((action, index) => {
const {
children: actionElementChildren,
...actionElementProps
} = action.props;
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_13__["default"], {
key: actionElementChildren,
as: "li"
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_ActionList_index_js__WEBPACK_IMPORTED_MODULE_15__.ActionList.Item, _extends({}, actionElementProps, {
as: action.props.as || 'a',
sx: _styles_js__WEBPACK_IMPORTED_MODULE_5__.menuItemStyles,
onSelect: event => {
swapMenuItemWithListItem(action, index, event, updateListAndMenu);
setSelectEvent(event);
closeOverlay();
focusOnMoreMenuBtn();
}
}), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_13__["default"], {
as: "span",
sx: {
display: 'flex',
alignItems: 'center',
justifyContent: 'space-between'
}
}, actionElementChildren, loadingCounters ? /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_LoadingCounter_js__WEBPACK_IMPORTED_MODULE_16__.LoadingCounter, null) : actionElementProps.counter !== undefined && /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_13__["default"], {
as: "span",
"data-component": "counter"
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_CounterLabel_js__WEBPACK_IMPORTED_MODULE_17__["default"], null, actionElementProps.counter)))));
}))))));
});
UnderlineNav.displayName = 'UnderlineNav';
/***/ }),
/***/ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/UnderlineNavContext.js":
/*!*************************************************************************************!*\
!*** ../../node_modules/@primer/react/lib-esm/UnderlineNav2/UnderlineNavContext.js ***!
\*************************************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "UnderlineNavContext": () => (/* binding */ UnderlineNavContext)
/* harmony export */ });
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react */ "webpack/sharing/consume/default/react");
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(react__WEBPACK_IMPORTED_MODULE_0__);
const UnderlineNavContext = /*#__PURE__*/(0,react__WEBPACK_IMPORTED_MODULE_0__.createContext)({
theme: {},
setChildrenWidth: () => null,
setNoIconChildrenWidth: () => null,
selectedLink: undefined,
setSelectedLink: () => null,
selectedLinkText: '',
setSelectedLinkText: () => null,
selectEvent: null,
variant: 'default',
loadingCounters: false,
iconsVisible: true
});
/***/ }),
/***/ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/UnderlineNavItem.js":
/*!**********************************************************************************!*\
!*** ../../node_modules/@primer/react/lib-esm/UnderlineNav2/UnderlineNavItem.js ***!
\**********************************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "UnderlineNavItem": () => (/* binding */ UnderlineNavItem)
/* harmony export */ });
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react */ "webpack/sharing/consume/default/react");
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(react__WEBPACK_IMPORTED_MODULE_0__);
/* harmony import */ var _Box_js__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! ../Box.js */ "../../node_modules/@primer/react/lib-esm/Box.js");
/* harmony import */ var _UnderlineNavContext_js__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./UnderlineNavContext.js */ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/UnderlineNavContext.js");
/* harmony import */ var _CounterLabel_js__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(/*! ../CounterLabel.js */ "../../node_modules/@primer/react/lib-esm/CounterLabel.js");
/* harmony import */ var _styles_js__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! ./styles.js */ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/styles.js");
/* harmony import */ var _LoadingCounter_js__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(/*! ./LoadingCounter.js */ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/LoadingCounter.js");
/* harmony import */ var _utils_useIsomorphicLayoutEffect_js__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! ../utils/useIsomorphicLayoutEffect.js */ "../../node_modules/@primer/react/lib-esm/utils/useIsomorphicLayoutEffect.js");
/* harmony import */ var deepmerge__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! deepmerge */ "../../node_modules/deepmerge/dist/cjs.js");
/* harmony import */ var deepmerge__WEBPACK_IMPORTED_MODULE_1___default = /*#__PURE__*/__webpack_require__.n(deepmerge__WEBPACK_IMPORTED_MODULE_1__);
function _extends() { _extends = Object.assign ? Object.assign.bind() : function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); }
// adopted from React.AnchorHTMLAttributes
const UnderlineNavItem = /*#__PURE__*/(0,react__WEBPACK_IMPORTED_MODULE_0__.forwardRef)(({
sx: sxProp = {},
as: Component = 'a',
href = '#',
children,
counter,
onSelect,
'aria-current': ariaCurrent,
icon: Icon,
...props
}, forwardedRef) => {
const backupRef = (0,react__WEBPACK_IMPORTED_MODULE_0__.useRef)(null);
const ref = forwardedRef !== null && forwardedRef !== void 0 ? forwardedRef : backupRef;
const {
theme,
setChildrenWidth,
setNoIconChildrenWidth,
selectedLink,
setSelectedLink,
selectedLinkText,
setSelectedLinkText,
selectEvent,
afterSelect,
variant,
loadingCounters,
iconsVisible
} = (0,react__WEBPACK_IMPORTED_MODULE_0__.useContext)(_UnderlineNavContext_js__WEBPACK_IMPORTED_MODULE_2__.UnderlineNavContext);
(0,_utils_useIsomorphicLayoutEffect_js__WEBPACK_IMPORTED_MODULE_3__["default"])(() => {
if (ref.current) {
const domRect = ref.current.getBoundingClientRect();
const icon = Array.from(ref.current.children[0].children).find(child => child.getAttribute('data-component') === 'icon');
const content = Array.from(ref.current.children[0].children).find(child => child.getAttribute('data-component') === 'text');
const text = content.textContent;
const iconWidthWithMargin = icon ? icon.getBoundingClientRect().width + Number(getComputedStyle(icon).marginRight.slice(0, -2)) + Number(getComputedStyle(icon).marginLeft.slice(0, -2)) : 0;
setChildrenWidth({
text,
width: domRect.width
});
setNoIconChildrenWidth({
text,
width: domRect.width - iconWidthWithMargin
});
if (selectedLink === undefined && Boolean(ariaCurrent) && ariaCurrent !== 'false') {
setSelectedLink(ref);
}
// Only runs when a menu item is selected (swapping the menu item with the list item to keep it visible)
if (selectedLinkText === text) {
setSelectedLink(ref);
if (typeof onSelect === 'function' && selectEvent !== null) onSelect(selectEvent);
setSelectedLinkText('');
}
}
}, [ref, ariaCurrent, selectedLink, selectedLinkText, setSelectedLinkText, setSelectedLink, setChildrenWidth, setNoIconChildrenWidth, onSelect, selectEvent]);
const keyPressHandler = react__WEBPACK_IMPORTED_MODULE_0___default().useCallback(event => {
if (event.key === ' ' || event.key === 'Enter') {
if (!event.defaultPrevented && typeof onSelect === 'function') onSelect(event);
if (!event.defaultPrevented && typeof afterSelect === 'function') afterSelect(event);
setSelectedLink(ref);
}
}, [onSelect, afterSelect, ref, setSelectedLink]);
const clickHandler = react__WEBPACK_IMPORTED_MODULE_0___default().useCallback(event => {
if (!event.defaultPrevented) {
if (typeof onSelect === 'function') onSelect(event);
if (typeof afterSelect === 'function') afterSelect(event);
}
setSelectedLink(ref);
}, [onSelect, afterSelect, ref, setSelectedLink]);
return /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_4__["default"], {
as: "li",
sx: {
display: 'flex',
flexDirection: 'column',
alignItems: 'center'
}
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_4__["default"], _extends({
as: Component,
href: href,
onKeyPress: keyPressHandler,
onClick: clickHandler,
"aria-current": ariaCurrent,
sx: deepmerge__WEBPACK_IMPORTED_MODULE_1___default()((0,_styles_js__WEBPACK_IMPORTED_MODULE_5__.getLinkStyles)(theme, {
variant
}, selectedLink, ref), sxProp)
}, props, {
ref: ref
}), /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_4__["default"], {
as: "div",
"data-component": "wrapper",
sx: _styles_js__WEBPACK_IMPORTED_MODULE_5__.wrapperStyles
}, iconsVisible && Icon && /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_4__["default"], {
as: "span",
"data-component": "icon",
sx: _styles_js__WEBPACK_IMPORTED_MODULE_5__.iconWrapStyles
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(Icon, null)), children && /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_4__["default"], {
as: "span",
"data-component": "text",
"data-content": children,
sx: selectedLink === ref ? {
fontWeight: 600
} : {}
}, children), loadingCounters ? /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_4__["default"], {
as: "span",
"data-component": "counter",
sx: _styles_js__WEBPACK_IMPORTED_MODULE_5__.counterStyles
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_LoadingCounter_js__WEBPACK_IMPORTED_MODULE_6__.LoadingCounter, null)) : counter !== undefined && /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_Box_js__WEBPACK_IMPORTED_MODULE_4__["default"], {
as: "span",
"data-component": "counter",
sx: _styles_js__WEBPACK_IMPORTED_MODULE_5__.counterStyles
}, /*#__PURE__*/react__WEBPACK_IMPORTED_MODULE_0___default().createElement(_CounterLabel_js__WEBPACK_IMPORTED_MODULE_7__["default"], null, counter)))));
});
UnderlineNavItem.displayName = 'UnderlineNavItem';
/***/ }),
/***/ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/index.js":
/*!***********************************************************************!*\
!*** ../../node_modules/@primer/react/lib-esm/UnderlineNav2/index.js ***!
\***********************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "UnderlineNav": () => (/* binding */ UnderlineNav)
/* harmony export */ });
/* harmony import */ var _UnderlineNav_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./UnderlineNav.js */ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/UnderlineNav.js");
/* harmony import */ var _UnderlineNavItem_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./UnderlineNavItem.js */ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/UnderlineNavItem.js");
const UnderlineNav = Object.assign(_UnderlineNav_js__WEBPACK_IMPORTED_MODULE_0__.UnderlineNav, {
Item: _UnderlineNavItem_js__WEBPACK_IMPORTED_MODULE_1__.UnderlineNavItem
});
/***/ }),
/***/ "../../node_modules/@primer/react/lib-esm/UnderlineNav2/styles.js":
/*!************************************************************************!*\
!*** ../../node_modules/@primer/react/lib-esm/UnderlineNav2/styles.js ***!
\************************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "GAP": () => (/* binding */ GAP),
/* harmony export */ "counterStyles": () => (/* binding */ counterStyles),
/* harmony export */ "getDividerStyle": () => (/* binding */ getDividerStyle),
/* harmony export */ "getLinkStyles": () => (/* binding */ getLinkStyles),
/* harmony export */ "getNavStyles": () => (/* binding */ getNavStyles),
/* harmony export */ "iconWrapStyles": () => (/* binding */ iconWrapStyles),
/* harmony export */ "menuItemStyles": () => (/* binding */ menuItemStyles),
/* harmony export */ "menuStyles": () => (/* binding */ menuStyles),
/* harmony export */ "moreBtnStyles": () => (/* binding */ moreBtnStyles),
/* harmony export */ "ulStyles": () => (/* binding */ ulStyles),
/* harmony export */ "wrapperStyles": () => (/* binding */ wrapperStyles)
/* harmony export */ });
// The gap between the list items. It is a constant because the gap is used to calculate the possible number of items that can fit in the container.
const GAP = 8;
const iconWrapStyles = {
alignItems: 'center',
display: 'inline-flex',
marginRight: 2
};
const wrapperStyles = {
display: 'inline-flex',
paddingY: 1,
paddingX: 2,
borderRadius: 2
};
const smallVariantLinkStyles = {
paddingY: 1,
fontSize: 0
};
const defaultVariantLinkStyles = {
paddingY: 2,
fontSize: 1
};
const counterStyles = {
marginLeft: 2,
display: 'flex',
alignItems: 'center'
};
const getNavStyles = (theme, props) => ({
display: 'flex',
paddingX: 3,
justifyContent: (props === null || props === void 0 ? void 0 : props.align) === 'right' ? 'flex-end' : 'flex-start',
borderBottom: '1px solid',
borderBottomColor: `${theme === null || theme === void 0 ? void 0 : theme.colors.border.muted}`,
align: 'row',
alignItems: 'center'
});
const ulStyles = {
display: 'flex',
listStyle: 'none',
whiteSpace: 'nowrap',
paddingY: 0,
paddingX: 0,
margin: 0,
marginBottom: '-1px',
alignItems: 'center',
gap: `${GAP}px`,
position: 'relative'
};
const getDividerStyle = theme => ({
display: 'inline-block',
borderLeft: '1px solid',
width: '1px',
borderLeftColor: `${theme === null || theme === void 0 ? void 0 : theme.colors.border.muted}`,
marginRight: 1
});
const moreBtnStyles = {
//set margin 0 here because safari puts extra margin around the button, rest is to reset style to make it look like a list element
margin: 0,
border: 0,
background: 'transparent',
fontWeight: 'normal',
boxShadow: 'none',
paddingY: 1,
paddingX: 2,
'& > span[data-component="trailingIcon"]': {
marginLeft: 0
}
};
const getLinkStyles = (theme, props, selectedLink, ref) => ({
position: 'relative',
display: 'inline-flex',
color: 'fg.default',
textAlign: 'center',
textDecoration: 'none',
...((props === null || props === void 0 ? void 0 : props.variant) === 'small' ? smallVariantLinkStyles : defaultVariantLinkStyles),
'@media (hover:hover)': {
'&:hover > div[data-component="wrapper"] ': {
backgroundColor: theme === null || theme === void 0 ? void 0 : theme.colors.neutral.muted,
transition: 'background .12s ease-out'
}
},
'&:focus': {
outline: 0,
'& > div[data-component="wrapper"]': {
boxShadow: `inset 0 0 0 2px ${theme === null || theme === void 0 ? void 0 : theme.colors.accent.fg}`
},
// where focus-visible is supported, remove the focus box-shadow
'&:not(:focus-visible) > div[data-component="wrapper"]': {
boxShadow: 'none'
}
},
'&:focus-visible > div[data-component="wrapper"]': {
boxShadow: `inset 0 0 0 2px ${theme === null || theme === void 0 ? void 0 : theme.colors.accent.fg}`
},
// renders a visibly hidden "copy" of the label in bold, reserving box space for when label becomes bold on selected
'& span[data-content]::before': {
content: 'attr(data-content)',
display: 'block',
height: 0,
fontWeight: '600',
visibility: 'hidden',
whiteSpace: 'nowrap'
},
// selected state styles
'&::after': {
position: 'absolute',
right: '50%',
bottom: 0,
width: '100%',
height: 2,
content: '""',
bg: selectedLink === ref ? theme === null || theme === void 0 ? void 0 : theme.colors.primer.border.active : 'transparent',
borderRadius: 0,
transform: 'translate(50%, -50%)'
},
'@media (forced-colors: active)': {
'::after': {
// Support for Window Force Color Mode https://learn.microsoft.com/en-us/fluent-ui/web-components/design-system/high-contrast
bg: selectedLink === ref ? 'LinkText' : 'transparent'
}
}
});
const menuItemStyles = {
// This is needed to hide the selected check icon on the menu item. https://github.com/primer/react/blob/main/src/ActionList/Selection.tsx#L32
'& > span': {
display: 'none'
},
// To reset the style when the menu items are rendered as react router links
textDecoration: 'none'
};
const menuStyles = {
position: 'absolute',
top: '90%',
right: '0',
boxShadow: '0 1px 3px rgba(0, 0, 0, 0.12), 0 1px 2px rgba(0, 0, 0, 0.24)',
borderRadius: '12px',
backgroundColor: 'canvas.overlay',
listStyle: 'none',
// Values are from ActionMenu
minWidth: '192px',
maxWidth: '640px'
};
/***/ }),
/***/ "../../node_modules/@primer/react/lib-esm/hooks/useControllableState.js":
/*!******************************************************************************!*\
!*** ../../node_modules/@primer/react/lib-esm/hooks/useControllableState.js ***!
\******************************************************************************/
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => {
__webpack_require__.r(__webpack_exports__);
/* harmony export */ __webpack_require__.d(__webpack_exports__, {
/* harmony export */ "useControllableState": () => (/* binding */ useControllableState)
/* harmony export */ });
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react */ "webpack/sharing/consume/default/react");
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(react__WEBPACK_IMPORTED_MODULE_0__);
/**
* This custom hook simplifies the behavior of a component if it has state that
* can be both controlled and uncontrolled. It functions identical to a
* useState() hook and provides [state, setState] for you to use. You can use
* the `onChange` argument to allow updates to the `state` to be communicated to
* owners of controlled components.
*
* Note: This hook will warn if a component is switching from controlled to
* uncontrolled, or vice-versa.
*/
function useControllableState({
name = 'custom',
defaultValue,
value,
onChange
}) {
const [state, internalSetState] = react__WEBPACK_IMPORTED_MODULE_0___default().useState(value !== null && value !== void 0 ? value : defaultValue);
const controlled = react__WEBPACK_IMPORTED_MODULE_0___default().useRef(null);
const stableOnChange = react__WEBPACK_IMPORTED_MODULE_0___default().useRef(onChange);
react__WEBPACK_IMPORTED_MODULE_0___default().useEffect(() => {
stableOnChange.current = onChange;
});
if (controlled.current === null) {
controlled.current = value !== undefined;
}
const setState = react__WEBPACK_IMPORTED_MODULE_0___default().useCallback(stateOrUpdater => {
var _stableOnChange$curre;
const value = typeof stateOrUpdater === 'function' ?
// @ts-ignore stateOrUpdater is a function
stateOrUpdater(state) : stateOrUpdater;
if (controlled.current === false) {
internalSetState(value);
}
(_stableOnChange$curre = stableOnChange.current) === null || _stableOnChange$curre === void 0 ? void 0 : _stableOnChange$curre.call(stableOnChange, value);
}, [state]);
react__WEBPACK_IMPORTED_MODULE_0___default().useEffect(() => {
const controlledValue = value !== undefined;
// Uncontrolled -> Controlled
// If the component prop is uncontrolled, the prop value should be undefined
if (controlled.current === false && controlledValue) {
warn('A component is changing an uncontrolled %s component to be controlled. ' + 'This is likely caused by the value changing to a defined value ' + 'from undefined. Decide between using a controlled or uncontrolled ' + 'value for the lifetime of the component. ' + 'More info: https://reactjs.org/link/controlled-components', name);
}
// Controlled -> Uncontrolled
// If the component prop is controlled, the prop value should be defined
if (controlled.current === true && !controlledValue) {
warn('A component is changing a controlled %s component to be uncontrolled. ' + 'This is likely caused by the value changing to an undefined value ' + 'from a defined one. Decide between using a controlled or ' + 'uncontrolled value for the lifetime of the component. ' + 'More info: https://reactjs.org/link/controlled-components', name);
}
}, [name, value]);
if (controlled.current === true) {
return [value, setState];
}
return [state, setState];
}
/** Warn when running in a development environment */
const warn = true ?
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function warn(format, ...args) {
let index = 0;
const message = format.replace(/%s/g, () => {
return args[index++];
});
// eslint-disable-next-line no-console
console.warn(`Warning: ${message}`);
} : 0;
/***/ })
}]);
//# sourceMappingURL=vendors-node_modules_primer_react_lib-esm_TreeView_TreeView_js-node_modules_primer_react_lib--75b6cc.f37e1d649cb354894b98.js.map | PypiClean |
/nni-3.0rc1-py3-none-macosx_10_9_x86_64.whl/nni_node/node_modules/moment/locale/ru.js |
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, (function (moment) { 'use strict';
//! moment.js locale configuration
function plural(word, num) {
var forms = word.split('_');
return num % 10 === 1 && num % 100 !== 11
? forms[0]
: num % 10 >= 2 && num % 10 <= 4 && (num % 100 < 10 || num % 100 >= 20)
? forms[1]
: forms[2];
}
function relativeTimeWithPlural(number, withoutSuffix, key) {
var format = {
ss: withoutSuffix ? 'секунда_секунды_секунд' : 'секунду_секунды_секунд',
mm: withoutSuffix ? 'минута_минуты_минут' : 'минуту_минуты_минут',
hh: 'час_часа_часов',
dd: 'день_дня_дней',
ww: 'неделя_недели_недель',
MM: 'месяц_месяца_месяцев',
yy: 'год_года_лет',
};
if (key === 'm') {
return withoutSuffix ? 'минута' : 'минуту';
} else {
return number + ' ' + plural(format[key], +number);
}
}
var monthsParse = [
/^янв/i,
/^фев/i,
/^мар/i,
/^апр/i,
/^ма[йя]/i,
/^июн/i,
/^июл/i,
/^авг/i,
/^сен/i,
/^окт/i,
/^ноя/i,
/^дек/i,
];
// http://new.gramota.ru/spravka/rules/139-prop : § 103
// Сокращения месяцев: http://new.gramota.ru/spravka/buro/search-answer?s=242637
// CLDR data: http://www.unicode.org/cldr/charts/28/summary/ru.html#1753
var ru = moment.defineLocale('ru', {
months: {
format: 'января_февраля_марта_апреля_мая_июня_июля_августа_сентября_октября_ноября_декабря'.split(
'_'
),
standalone:
'январь_февраль_март_апрель_май_июнь_июль_август_сентябрь_октябрь_ноябрь_декабрь'.split(
'_'
),
},
monthsShort: {
// по CLDR именно "июл." и "июн.", но какой смысл менять букву на точку?
format: 'янв._февр._мар._апр._мая_июня_июля_авг._сент._окт._нояб._дек.'.split(
'_'
),
standalone:
'янв._февр._март_апр._май_июнь_июль_авг._сент._окт._нояб._дек.'.split(
'_'
),
},
weekdays: {
standalone:
'воскресенье_понедельник_вторник_среда_четверг_пятница_суббота'.split(
'_'
),
format: 'воскресенье_понедельник_вторник_среду_четверг_пятницу_субботу'.split(
'_'
),
isFormat: /\[ ?[Вв] ?(?:прошлую|следующую|эту)? ?] ?dddd/,
},
weekdaysShort: 'вс_пн_вт_ср_чт_пт_сб'.split('_'),
weekdaysMin: 'вс_пн_вт_ср_чт_пт_сб'.split('_'),
monthsParse: monthsParse,
longMonthsParse: monthsParse,
shortMonthsParse: monthsParse,
// полные названия с падежами, по три буквы, для некоторых, по 4 буквы, сокращения с точкой и без точки
monthsRegex:
/^(январ[ья]|янв\.?|феврал[ья]|февр?\.?|марта?|мар\.?|апрел[ья]|апр\.?|ма[йя]|июн[ья]|июн\.?|июл[ья]|июл\.?|августа?|авг\.?|сентябр[ья]|сент?\.?|октябр[ья]|окт\.?|ноябр[ья]|нояб?\.?|декабр[ья]|дек\.?)/i,
// копия предыдущего
monthsShortRegex:
/^(январ[ья]|янв\.?|феврал[ья]|февр?\.?|марта?|мар\.?|апрел[ья]|апр\.?|ма[йя]|июн[ья]|июн\.?|июл[ья]|июл\.?|августа?|авг\.?|сентябр[ья]|сент?\.?|октябр[ья]|окт\.?|ноябр[ья]|нояб?\.?|декабр[ья]|дек\.?)/i,
// полные названия с падежами
monthsStrictRegex:
/^(январ[яь]|феврал[яь]|марта?|апрел[яь]|ма[яй]|июн[яь]|июл[яь]|августа?|сентябр[яь]|октябр[яь]|ноябр[яь]|декабр[яь])/i,
// Выражение, которое соответствует только сокращённым формам
monthsShortStrictRegex:
/^(янв\.|февр?\.|мар[т.]|апр\.|ма[яй]|июн[ья.]|июл[ья.]|авг\.|сент?\.|окт\.|нояб?\.|дек\.)/i,
longDateFormat: {
LT: 'H:mm',
LTS: 'H:mm:ss',
L: 'DD.MM.YYYY',
LL: 'D MMMM YYYY г.',
LLL: 'D MMMM YYYY г., H:mm',
LLLL: 'dddd, D MMMM YYYY г., H:mm',
},
calendar: {
sameDay: '[Сегодня, в] LT',
nextDay: '[Завтра, в] LT',
lastDay: '[Вчера, в] LT',
nextWeek: function (now) {
if (now.week() !== this.week()) {
switch (this.day()) {
case 0:
return '[В следующее] dddd, [в] LT';
case 1:
case 2:
case 4:
return '[В следующий] dddd, [в] LT';
case 3:
case 5:
case 6:
return '[В следующую] dddd, [в] LT';
}
} else {
if (this.day() === 2) {
return '[Во] dddd, [в] LT';
} else {
return '[В] dddd, [в] LT';
}
}
},
lastWeek: function (now) {
if (now.week() !== this.week()) {
switch (this.day()) {
case 0:
return '[В прошлое] dddd, [в] LT';
case 1:
case 2:
case 4:
return '[В прошлый] dddd, [в] LT';
case 3:
case 5:
case 6:
return '[В прошлую] dddd, [в] LT';
}
} else {
if (this.day() === 2) {
return '[Во] dddd, [в] LT';
} else {
return '[В] dddd, [в] LT';
}
}
},
sameElse: 'L',
},
relativeTime: {
future: 'через %s',
past: '%s назад',
s: 'несколько секунд',
ss: relativeTimeWithPlural,
m: relativeTimeWithPlural,
mm: relativeTimeWithPlural,
h: 'час',
hh: relativeTimeWithPlural,
d: 'день',
dd: relativeTimeWithPlural,
w: 'неделя',
ww: relativeTimeWithPlural,
M: 'месяц',
MM: relativeTimeWithPlural,
y: 'год',
yy: relativeTimeWithPlural,
},
meridiemParse: /ночи|утра|дня|вечера/i,
isPM: function (input) {
return /^(дня|вечера)$/.test(input);
},
meridiem: function (hour, minute, isLower) {
if (hour < 4) {
return 'ночи';
} else if (hour < 12) {
return 'утра';
} else if (hour < 17) {
return 'дня';
} else {
return 'вечера';
}
},
dayOfMonthOrdinalParse: /\d{1,2}-(й|го|я)/,
ordinal: function (number, period) {
switch (period) {
case 'M':
case 'd':
case 'DDD':
return number + '-й';
case 'D':
return number + '-го';
case 'w':
case 'W':
return number + '-я';
default:
return number;
}
},
week: {
dow: 1, // Monday is the first day of the week.
doy: 4, // The week that contains Jan 4th is the first week of the year.
},
});
return ru;
}))); | PypiClean |
/tacker-9.0.0.0rc1.tar.gz/tacker-9.0.0.0rc1/doc/source/contributor/vagrant_devstack.rst | ..
Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
Devstack Installation with Vagrant
==================================
This documentation is for introducing a deployment tool for Tacker.
You can find a :doc:`basic installation </install/devstack>` for deploying
OpenStack environment using ``devstack`` as a part of
:doc:`Tacker Installation Guide</install/index>`.
This guide expects you have already setup your VMs and installed all
packages required to run OpenStack.
However, it's something annoying for beginners, or developers frequently
cleanup their environment. You may want to use a tool to shortcut such a
tasks. This tool enables you to deploy several usecases with minimal effort.
How to Use
----------
Install Required Tools
~~~~~~~~~~~~~~~~~~~~~~
This installer consists of ``vagrant`` and ``ansible``.
Please follow instructions on official sites for installation.
* `vagrant <https://learn.hashicorp.com/tutorials/vagrant/getting-started-install>`_
* `ansible <https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html>`_
.. note::
In this documentation, it's supposed you use
`VirtualBox <https://www.virtualbox.org/>`_, but you can use any other
hypervisor supported by ``vagrant``.
.. figure:: ../_images/vagrant-devstack.svg
:scale: 55
You should install plugin ``vagrant-disksize`` before launching your VMs
to enable to expand size of volume of VMs. It is because the default size
of box is fixed and not enough for deploying Tacker.
.. code-block:: console
$ vagrant plugin install vagrant-disksize
Setup Configuration File
~~~~~~~~~~~~~~~~~~~~~~~~
.. note::
Create your ssh key before running this tool to enable to direct login
with auto-generated ssh config although you can still do two step login
starts from ``vagrant ssh``. You can login to ``controller`` host with
auto-generated config ``ssh_config`` as below.
.. code-block:: console
$ ssh -F /path/to/tacker/vagrant/devstack/ssh_config controller
Setup ``machines.yml`` which is a config file defines parameters of each
VM you deploy.
You can find some templates of ``machines.yml`` in ``samples`` directory.
This config file should be placed at ``/path/to/tacker/vagrant/devstack``
while running ``vagrant up``, or failed to run the command.
.. code-block:: console
$ cd /path/to/tacker/vagrant/devstack
$ cp samples/machines.yml ./
$ YOUR_FAVORITE_EDITOR machines.yml
As named as ``machines.yml``, it defines parameters of each VMs.
There are two top sections in the file, ``global`` and ``machines``.
The former one defines common parameters among the VMs, and later one
is for each VM.
.. note::
``global`` is optional currently and only one parameter under the section
is ``ssh_pub_key`` for specifying its location explicitly. You don't need
to use it if your public key is ``$HOME/.ssh/id_rsa.pub``.
Here is an example of ``machine.yml``. It's is for single node usecase
and ``machines`` has only one entry.
.. literalinclude:: ../../../vagrant/devstack/samples/machines.yml
:language: yaml
There are several parameters for each VM supported in this tool.
.. list-table::
:widths: 30 125
:header-rows: 1
* - Attribute
- Description
* - hostname
- Any hostname for convenience, such as ``controller`` or ``compute``.
* - provider
- Vagrant box provider.
* - box
- Name of the box.
* - nof_cpus
- The number of CPUs assigned to the VM.
* - mem_size
- The size of memory assigned to the VM.
* - disk_size
- The size of disk assigned to the VM.
* - private_ips
- Series of private IPs.
* - public_ips
- Series of public IPs.
* - fwd_port_list
- Series of combination of ``guest`` and ``host`` ports for port
forwarding.
You also update entries of IP addresses in the inventory file
``hosts`` as you defined each ``private_ips`` in ``machines.yml``.
Now, you are ready to fire up the VMs and deploying OpenStack with
``ansible``.
Deploy OpenStack with Devstack
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Run ``vagrant up`` so that launches VMs and create ``stack`` user on them.
.. code-block:: console
$ vagrant up
If ``vagrant up`` is completed successfully, you can login to the VMs as
``stack`` user with your ssh public key.
This tool provides ``ansible`` playbooks for setting up ``devstack``
installation. You don't need to modify the playbooks usually, but
configurable in ``group_vars/all.yml``. See :ref:`optional_config`
describing how you configure the file.
.. code-block:: console
$ ansible-playbook -i hosts site.yaml
After finished all tasks, you can login to the launched VMs. So, login to
controller node and run ``stack.sh`` for installing OpenStack.
You will find out that ``local.conf`` is already prepared for your
environment.
See instruction how to configure ``local.conf`` described in
`DevStack Quick Start <https://docs.openstack.org/devstack/latest/>`_
if you customize it furthermore by yourself.
.. code-block:: console
$ ssh stack@192.168.56.11
$ cd devstack
$ YOUR_FAVORITE_EDITOR local.conf
$ ./stack.sh
.. _optional_config:
Options Configuration
~~~~~~~~~~~~~~~~~~~~~
There some parameters in ``group_vars/all.yml`` such as password on
devstack or optional configurations. You don't need to update it usually.
.. literalinclude:: ../../../vagrant/devstack/group_vars/all.yml
:language: yaml
Developer Tools
---------------
In the playbools, installation of vim and some extra packages is included
for developers. If you exclude such a developer tools, modify
``group_vars/all.yml`` before running ``ansible-playbook`` command.
.. list-table::
:widths: 30 125
:header-rows: 1
* - Parameter
- Description
* - use_vim_latest
- (Only for ubuntu) ``true`` or ``false`` for using the latest vim.
* - use_vim_extra_plugins
- ``true`` or ``false`` for installing vim packages including
language servers for python and bash.
* - use_extra_tools
- | ``true`` or ``false`` for using extra packages bellow.
| - jq
| - htop (Ubuntu only)
| - lnav (Ubuntu only)
| PypiClean |
/RsCMPX_LteMeas-4.0.185.tar.gz/RsCMPX_LteMeas-4.0.185/RsCMPX_LteMeas/Implementations/LteMeas/MultiEval/ListPy/Segment/Power/Cc/Average.py | from ........Internal.Core import Core
from ........Internal.CommandsGroup import CommandsGroup
from ........Internal.StructBase import StructBase
from ........Internal.ArgStruct import ArgStruct
from ........ import repcap
# noinspection PyPep8Naming,PyAttributeOutsideInit,SpellCheckingInspection
class AverageCls:
"""Average commands group definition. 2 total commands, 0 Subgroups, 2 group commands"""
def __init__(self, core: Core, parent):
self._core = core
self._cmd_group = CommandsGroup("average", core, parent)
# noinspection PyTypeChecker
class FetchStruct(StructBase):
"""Response structure. Fields: \n
- Reliability: int: 'Reliability indicator'
- Seg_Reliability: int: Reliability indicator for the segment
- Statist_Expired: int: Reached statistical length in subframes
- Out_Of_Tolerance: int: Percentage of measured subframes with failed limit check
- Tx_Power: float: TX power of the component carrier"""
__meta_args_list = [
ArgStruct.scalar_int('Reliability', 'Reliability'),
ArgStruct.scalar_int('Seg_Reliability'),
ArgStruct.scalar_int('Statist_Expired'),
ArgStruct.scalar_int('Out_Of_Tolerance'),
ArgStruct.scalar_float('Tx_Power')]
def __init__(self):
StructBase.__init__(self, self)
self.Reliability: int = None
self.Seg_Reliability: int = None
self.Statist_Expired: int = None
self.Out_Of_Tolerance: int = None
self.Tx_Power: float = None
def fetch(self, segment=repcap.Segment.Default, carrierComponentB=repcap.CarrierComponentB.Default) -> FetchStruct:
"""SCPI: FETCh:LTE:MEASurement<Instance>:MEValuation:LIST:SEGMent<nr>:POWer:CC<no>:AVERage \n
Snippet: value: FetchStruct = driver.lteMeas.multiEval.listPy.segment.power.cc.average.fetch(segment = repcap.Segment.Default, carrierComponentB = repcap.CarrierComponentB.Default) \n
Return TX power results for component carrier CC<no> and a single segment in list mode. \n
:param segment: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Segment')
:param carrierComponentB: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Cc')
:return: structure: for return value, see the help for FetchStruct structure arguments."""
segment_cmd_val = self._cmd_group.get_repcap_cmd_value(segment, repcap.Segment)
carrierComponentB_cmd_val = self._cmd_group.get_repcap_cmd_value(carrierComponentB, repcap.CarrierComponentB)
return self._core.io.query_struct(f'FETCh:LTE:MEASurement<Instance>:MEValuation:LIST:SEGMent{segment_cmd_val}:POWer:CC{carrierComponentB_cmd_val}:AVERage?', self.__class__.FetchStruct())
# noinspection PyTypeChecker
class CalculateStruct(StructBase):
"""Response structure. Fields: \n
- Reliability: int: No parameter help available
- Seg_Reliability: int: No parameter help available
- Statist_Expired: int: No parameter help available
- Out_Of_Tolerance: int: No parameter help available
- Tx_Power: float or bool: No parameter help available"""
__meta_args_list = [
ArgStruct.scalar_int('Reliability', 'Reliability'),
ArgStruct.scalar_int('Seg_Reliability'),
ArgStruct.scalar_int('Statist_Expired'),
ArgStruct.scalar_int('Out_Of_Tolerance'),
ArgStruct.scalar_float_ext('Tx_Power')]
def __init__(self):
StructBase.__init__(self, self)
self.Reliability: int = None
self.Seg_Reliability: int = None
self.Statist_Expired: int = None
self.Out_Of_Tolerance: int = None
self.Tx_Power: float or bool = None
def calculate(self, segment=repcap.Segment.Default, carrierComponentB=repcap.CarrierComponentB.Default) -> CalculateStruct:
"""SCPI: CALCulate:LTE:MEASurement<Instance>:MEValuation:LIST:SEGMent<nr>:POWer:CC<no>:AVERage \n
Snippet: value: CalculateStruct = driver.lteMeas.multiEval.listPy.segment.power.cc.average.calculate(segment = repcap.Segment.Default, carrierComponentB = repcap.CarrierComponentB.Default) \n
No command help available \n
:param segment: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Segment')
:param carrierComponentB: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Cc')
:return: structure: for return value, see the help for CalculateStruct structure arguments."""
segment_cmd_val = self._cmd_group.get_repcap_cmd_value(segment, repcap.Segment)
carrierComponentB_cmd_val = self._cmd_group.get_repcap_cmd_value(carrierComponentB, repcap.CarrierComponentB)
return self._core.io.query_struct(f'CALCulate:LTE:MEASurement<Instance>:MEValuation:LIST:SEGMent{segment_cmd_val}:POWer:CC{carrierComponentB_cmd_val}:AVERage?', self.__class__.CalculateStruct()) | PypiClean |
/tf1_tensorflow_object_detection_api-1.15-cp37-cp37m-win_amd64.whl/tf1_tensorflow_object_detection_api-1.15.0.data/purelib/object_detection/models/faster_rcnn_resnet_keras_feature_extractor.py | import tensorflow.compat.v1 as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.models.keras_models import resnet_v1
from object_detection.utils import model_util
_RESNET_MODEL_CONV4_LAST_LAYERS = {
'resnet_v1_50': 'conv4_block6_out',
'resnet_v1_101': 'conv4_block23_out',
'resnet_v1_152': 'conv4_block36_out',
}
class FasterRCNNResnetKerasFeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNKerasFeatureExtractor):
"""Faster R-CNN with Resnet feature extractor implementation."""
def __init__(self,
is_training,
resnet_v1_base_model,
resnet_v1_base_model_name,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
resnet_v1_base_model: base resnet v1 network to use. One of
the resnet_v1.resnet_v1_{50,101,152} models.
resnet_v1_base_model_name: model name under which to construct resnet v1.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 16.')
super(FasterRCNNResnetKerasFeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
weight_decay)
self.classification_backbone = None
self._variable_dict = {}
self._resnet_v1_base_model = resnet_v1_base_model
self._resnet_v1_base_model_name = resnet_v1_base_model_name
def preprocess(self, resized_inputs):
"""Faster R-CNN Resnet V1 preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
Note that if the number of channels is not equal to 3, the mean subtraction
will be skipped and the original resized_inputs will be returned.
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
if resized_inputs.shape.as_list()[3] == 3:
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
else:
return resized_inputs
def get_proposal_feature_extractor_model(self, name=None):
"""Returns a model that extracts first stage RPN features.
Extracts features using the first half of the Resnet v1 network.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes preprocessed_inputs:
A [batch, height, width, channels] float32 tensor
representing a batch of images.
And returns rpn_feature_map:
A tensor with shape [batch, height, width, depth]
"""
if not self.classification_backbone:
self.classification_backbone = self._resnet_v1_base_model(
batchnorm_training=self._train_batch_norm,
conv_hyperparams=None,
weight_decay=self._weight_decay,
classes=None,
weights=None,
include_top=False
)
with tf.name_scope(name):
with tf.name_scope('ResnetV1'):
conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[
self._resnet_v1_base_model_name]
proposal_features = self.classification_backbone.get_layer(
name=conv4_last_layer).output
keras_model = tf.keras.Model(
inputs=self.classification_backbone.inputs,
outputs=proposal_features)
for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable
return keras_model
def get_box_classifier_feature_extractor_model(self, name=None):
"""Returns a model that extracts second stage box classifier features.
This function reconstructs the "second half" of the ResNet v1
network after the part defined in `get_proposal_feature_extractor_model`.
Args:
name: A scope name to construct all variables within.
Returns:
A Keras model that takes proposal_feature_maps:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
And returns proposal_classifier_features:
A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
if not self.classification_backbone:
self.classification_backbone = self._resnet_v1_base_model(
batchnorm_training=self._train_batch_norm,
conv_hyperparams=None,
weight_decay=self._weight_decay,
classes=None,
weights=None,
include_top=False
)
with tf.name_scope(name):
with tf.name_scope('ResnetV1'):
conv4_last_layer = _RESNET_MODEL_CONV4_LAST_LAYERS[
self._resnet_v1_base_model_name]
proposal_feature_maps = self.classification_backbone.get_layer(
name=conv4_last_layer).output
proposal_classifier_features = self.classification_backbone.get_layer(
name='conv5_block3_out').output
keras_model = model_util.extract_submodel(
model=self.classification_backbone,
inputs=proposal_feature_maps,
outputs=proposal_classifier_features)
for variable in keras_model.variables:
self._variable_dict[variable.name[:-2]] = variable
return keras_model
class FasterRCNNResnet50KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor):
"""Faster R-CNN with Resnet50 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super(FasterRCNNResnet50KerasFeatureExtractor, self).__init__(
is_training=is_training,
resnet_v1_base_model=resnet_v1.resnet_v1_50,
resnet_v1_base_model_name='resnet_v1_50',
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
class FasterRCNNResnet101KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor):
"""Faster R-CNN with Resnet101 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super(FasterRCNNResnet101KerasFeatureExtractor, self).__init__(
is_training=is_training,
resnet_v1_base_model=resnet_v1.resnet_v1_101,
resnet_v1_base_model_name='resnet_v1_101',
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay)
class FasterRCNNResnet152KerasFeatureExtractor(
FasterRCNNResnetKerasFeatureExtractor):
"""Faster R-CNN with Resnet152 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride=16,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
weight_decay: See base class.
"""
super(FasterRCNNResnet152KerasFeatureExtractor, self).__init__(
is_training=is_training,
resnet_v1_base_model=resnet_v1.resnet_v1_152,
resnet_v1_base_model_name='resnet_v1_152',
first_stage_features_stride=first_stage_features_stride,
batch_norm_trainable=batch_norm_trainable,
weight_decay=weight_decay) | PypiClean |
/accelbyte_py_sdk-0.48.0.tar.gz/accelbyte_py_sdk-0.48.0/accelbyte_py_sdk/api/platform/models/payment_order_refund_result.py |
# template file: ags_py_codegen
# AccelByte Gaming Services Platform Service (4.34.0)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
from ....core import StrEnum
class StatusEnum(StrEnum):
AUTHORISED = "AUTHORISED"
AUTHORISE_FAILED = "AUTHORISE_FAILED"
CHARGEBACK = "CHARGEBACK"
CHARGEBACK_REVERSED = "CHARGEBACK_REVERSED"
CHARGED = "CHARGED"
CHARGE_FAILED = "CHARGE_FAILED"
DELETED = "DELETED"
INIT = "INIT"
NOTIFICATION_OF_CHARGEBACK = "NOTIFICATION_OF_CHARGEBACK"
REFUNDED = "REFUNDED"
REFUNDING = "REFUNDING"
REFUND_FAILED = "REFUND_FAILED"
REQUEST_FOR_INFORMATION = "REQUEST_FOR_INFORMATION"
class PaymentOrderRefundResult(Model):
"""Payment order refund result (PaymentOrderRefundResult)
Properties:
created_time: (createdTime) REQUIRED str
namespace: (namespace) REQUIRED str
payment_order_no: (paymentOrderNo) REQUIRED str
status: (status) REQUIRED Union[str, StatusEnum]
refunded_time: (refundedTime) OPTIONAL str
target_namespace: (targetNamespace) OPTIONAL str
target_user_id: (targetUserId) OPTIONAL str
"""
# region fields
created_time: str # REQUIRED
namespace: str # REQUIRED
payment_order_no: str # REQUIRED
status: Union[str, StatusEnum] # REQUIRED
refunded_time: str # OPTIONAL
target_namespace: str # OPTIONAL
target_user_id: str # OPTIONAL
# endregion fields
# region with_x methods
def with_created_time(self, value: str) -> PaymentOrderRefundResult:
self.created_time = value
return self
def with_namespace(self, value: str) -> PaymentOrderRefundResult:
self.namespace = value
return self
def with_payment_order_no(self, value: str) -> PaymentOrderRefundResult:
self.payment_order_no = value
return self
def with_status(self, value: Union[str, StatusEnum]) -> PaymentOrderRefundResult:
self.status = value
return self
def with_refunded_time(self, value: str) -> PaymentOrderRefundResult:
self.refunded_time = value
return self
def with_target_namespace(self, value: str) -> PaymentOrderRefundResult:
self.target_namespace = value
return self
def with_target_user_id(self, value: str) -> PaymentOrderRefundResult:
self.target_user_id = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "created_time"):
result["createdTime"] = str(self.created_time)
elif include_empty:
result["createdTime"] = ""
if hasattr(self, "namespace"):
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "payment_order_no"):
result["paymentOrderNo"] = str(self.payment_order_no)
elif include_empty:
result["paymentOrderNo"] = ""
if hasattr(self, "status"):
result["status"] = str(self.status)
elif include_empty:
result["status"] = Union[str, StatusEnum]()
if hasattr(self, "refunded_time"):
result["refundedTime"] = str(self.refunded_time)
elif include_empty:
result["refundedTime"] = ""
if hasattr(self, "target_namespace"):
result["targetNamespace"] = str(self.target_namespace)
elif include_empty:
result["targetNamespace"] = ""
if hasattr(self, "target_user_id"):
result["targetUserId"] = str(self.target_user_id)
elif include_empty:
result["targetUserId"] = ""
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
created_time: str,
namespace: str,
payment_order_no: str,
status: Union[str, StatusEnum],
refunded_time: Optional[str] = None,
target_namespace: Optional[str] = None,
target_user_id: Optional[str] = None,
**kwargs,
) -> PaymentOrderRefundResult:
instance = cls()
instance.created_time = created_time
instance.namespace = namespace
instance.payment_order_no = payment_order_no
instance.status = status
if refunded_time is not None:
instance.refunded_time = refunded_time
if target_namespace is not None:
instance.target_namespace = target_namespace
if target_user_id is not None:
instance.target_user_id = target_user_id
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> PaymentOrderRefundResult:
instance = cls()
if not dict_:
return instance
if "createdTime" in dict_ and dict_["createdTime"] is not None:
instance.created_time = str(dict_["createdTime"])
elif include_empty:
instance.created_time = ""
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "paymentOrderNo" in dict_ and dict_["paymentOrderNo"] is not None:
instance.payment_order_no = str(dict_["paymentOrderNo"])
elif include_empty:
instance.payment_order_no = ""
if "status" in dict_ and dict_["status"] is not None:
instance.status = str(dict_["status"])
elif include_empty:
instance.status = Union[str, StatusEnum]()
if "refundedTime" in dict_ and dict_["refundedTime"] is not None:
instance.refunded_time = str(dict_["refundedTime"])
elif include_empty:
instance.refunded_time = ""
if "targetNamespace" in dict_ and dict_["targetNamespace"] is not None:
instance.target_namespace = str(dict_["targetNamespace"])
elif include_empty:
instance.target_namespace = ""
if "targetUserId" in dict_ and dict_["targetUserId"] is not None:
instance.target_user_id = str(dict_["targetUserId"])
elif include_empty:
instance.target_user_id = ""
return instance
@classmethod
def create_many_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> Dict[str, PaymentOrderRefundResult]:
return (
{k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_}
if dict_
else {}
)
@classmethod
def create_many_from_list(
cls, list_: list, include_empty: bool = False
) -> List[PaymentOrderRefundResult]:
return (
[cls.create_from_dict(i, include_empty=include_empty) for i in list_]
if list_
else []
)
@classmethod
def create_from_any(
cls, any_: any, include_empty: bool = False, many: bool = False
) -> Union[
PaymentOrderRefundResult,
List[PaymentOrderRefundResult],
Dict[Any, PaymentOrderRefundResult],
]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"createdTime": "created_time",
"namespace": "namespace",
"paymentOrderNo": "payment_order_no",
"status": "status",
"refundedTime": "refunded_time",
"targetNamespace": "target_namespace",
"targetUserId": "target_user_id",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"createdTime": True,
"namespace": True,
"paymentOrderNo": True,
"status": True,
"refundedTime": False,
"targetNamespace": False,
"targetUserId": False,
}
@staticmethod
def get_enum_map() -> Dict[str, List[Any]]:
return {
"status": [
"AUTHORISED",
"AUTHORISE_FAILED",
"CHARGEBACK",
"CHARGEBACK_REVERSED",
"CHARGED",
"CHARGE_FAILED",
"DELETED",
"INIT",
"NOTIFICATION_OF_CHARGEBACK",
"REFUNDED",
"REFUNDING",
"REFUND_FAILED",
"REQUEST_FOR_INFORMATION",
],
}
# endregion static methods | PypiClean |
/aws_cost_optimization_1-0.1.1.tar.gz/aws_cost_optimization_1-0.1.1/aws_cost_optimization_1/__init__.py | import time
from boto3 import session
import logging
from aws_cost_optimization_1.utils import *
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
__author__ = "Dheeraj Banodha"
__version__ = '0.1.1'
class aws_client:
def __init__(self, **kwargs):
if 'aws_access_key_id' in kwargs.keys() and 'aws_secret_access_key' in kwargs.keys():
self.session = session.Session(
aws_access_key_id=kwargs['aws_access_key_id'],
aws_secret_access_key=kwargs['aws_secret_access_key'],
)
elif 'profile_name' in kwargs.keys():
self.session = session.Session(profile_name=kwargs['profile_name'])
self.regions = get_regions(self.session)
self.aws_region_map = {
'ca-central-1': 'Canada (Central)',
'ap-northeast-3': 'Asia Pacific (Osaka-Local)',
'us-east-1': 'US East (N. Virginia)',
'ap-northeast-2': 'Asia Pacific (Seoul)',
'us-gov-west-1': 'AWS GovCloud (US)',
'us-east-2': 'US East (Ohio)',
'ap-northeast-1': 'Asia Pacific (Tokyo)',
'ap-south-1': 'Asia Pacific (Mumbai)',
'ap-southeast-2': 'Asia Pacific (Sydney)',
'ap-southeast-1': 'Asia Pacific (Singapore)',
'sa-east-1': 'South America (Sao Paulo)',
'us-west-2': 'US West (Oregon)',
'eu-west-1': 'EU (Ireland)',
'eu-west-3': 'EU (Paris)',
'eu-west-2': 'EU (London)',
'us-west-1': 'US West (N. California)',
'eu-central-1': 'EU (Frankfurt)',
'eu-north-1': 'EU (Stockholm)'
}
def gp2_to_gp3(self) -> list:
"""
:return: list of cost saving recommendations
"""
logger.info(" ---Inside aws_client :: gp2_to_gp3()--- ")
recommendations = []
volumes = list_volumes(self.session, self.regions)
for region, volume_list in volumes.items():
resolved_region = self.aws_region_map[region]
for volume in volume_list:
if volume['VolumeType'] == 'gp2':
Filters = [
{'Type': 'TERM_MATCH', 'Field': 'volumeType', 'Value': 'General Purpose'},
{'Type': 'TERM_MATCH', 'Field': 'location', 'Value': resolved_region}
]
price = get_pricing(self.session, region, 'AmazonEC2', Filters=Filters)
current_cost = float(price['gp2']) * float(volume['Size'])
effective_cost = float(price['gp3']) * float(volume['Size'])
recommendation = {
'Region': region,
'Volume Id': volume['VolumeId'],
'Current Cost': current_cost,
'Effective Cost': effective_cost,
'Savings': current_cost - effective_cost,
'Savings %': ((current_cost - effective_cost)/current_cost)*100
}
recommendations.append(recommendation)
return recommendations
def rds_upgrades(self) -> list:
"""
:return: list of cost saving recommendations
"""
logger.info(" ---Inside aws_client :: rds_upgrades()--- ")
recommendations = []
rds_instances = list_rds_instances(self.session, self.regions)
for region, rds_list in rds_instances.items():
resolved_region = self.aws_region_map[region]
for instance in rds_list:
instance_type = instance['DBInstanceClass']
instance_family = instance_type.split('.')[1]
Filters = [
{'Type': 'TERM_MATCH', 'Field': 'instanceType', 'Value': instance_type},
{'Type': 'TERM_MATCH', 'Field': 'databaseEngine', 'Value': instance['Engine']},
{'Type': 'TERM_MATCH', 'Field': 'deploymentOption', 'Value': 'Single-AZ' if instance['MultiAZ'] else 'Multi-AZ'},
{'Type': 'TERM_MATCH', 'Field': 'productFamily', 'Value': 'Database Instance'},
{'Type': 'TERM_MATCH', 'Field': 'location', 'Value': resolved_region}
]
def evaluate(frm: str, to: str):
price_from = get_pricing(
self.session, region, 'AmazonRDS',
Filters
)
print(price_from)
Filters[0]['Value'] = instance_type.replace(frm, to)
price_to = get_pricing(
self.session, region, 'AmazonRDS', Filters
)
print(price_to)
current_cost = float(price_from[instance_type]) * 730
effective_cost = float(price_to[instance_type.replace(frm, to)]) * 730
recommendation = {
'Region': region,
'Instance Id': instance['DBInstanceIdentifier'],
'Instance Type': instance_type,
'Upgrade To': instance_type.replace(frm, to),
'Current Cost': current_cost,
'Effective Cost': effective_cost,
'Savings': current_cost - effective_cost,
'Savings %': ((current_cost - effective_cost) / current_cost) * 100
}
return recommendation
match instance_family:
case 'm3':
recommendations.append(evaluate('m3', 'm5'))
case 'r3':
recommendations.append(evaluate('r3', 'r5'))
case 'm1':
recommendations.append(evaluate('m1', 't2'))
return recommendations | PypiClean |
/in2xl-0.2.3.tar.gz/in2xl-0.2.3/docs/build/html/_static/js/html5shiv-printshiv.min.js | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); | PypiClean |
/plonesocial.activitystream-0.5.6.zip/plonesocial.activitystream-0.5.6/plonesocial/activitystream/browser/stream_provider.py | import itertools
from zope.interface import Interface
from zope.interface import implements
from zope.component import adapts
from zope.component import getMultiAdapter
from zope.component.hooks import getSite
from Acquisition import aq_inner
from AccessControl import Unauthorized
from AccessControl import getSecurityManager
from zExceptions import NotFound
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plonesocial.activitystream.interfaces import IActivity
from .interfaces import IPlonesocialActivitystreamLayer
from .interfaces import IStreamProvider
from .interfaces import IActivityProvider
from plonesocial.activitystream.integration import PLONESOCIAL
import logging
logger = logging.getLogger(__name__)
def date_key(item):
if hasattr(item, 'effective'):
# catalog brain
return max(item.effective, item.created)
# Activity
return item.date
class StreamProvider(object):
"""Render activitystreams
This is the core rendering logic that powers
@@stream and @@activitystream_portal, and also
plonesocial.networking @@profile
"""
implements(IStreamProvider)
adapts(Interface, IPlonesocialActivitystreamLayer, Interface)
index = ViewPageTemplateFile("templates/stream_provider.pt")
def __init__(self, context, request, view):
self.context = context
self.request = request
self.view = self.__parent__ = view
# @@activitystream_portal renders this as a portlet
self.portlet_data = None
# @@stream renders this optionally with a tag filter
self.tag = None
# @@stream and plonesocial.network:@@profile
# render this optionally with a users filter
self.users = None
self.microblog_context = PLONESOCIAL.context(context)
def update(self):
pass
def render(self):
return self.index()
__call__ = render
def activities(self):
brains = self._activities_brains()
statuses = self._activities_statuses()
items = itertools.chain(brains, statuses)
# see date_key sorting function above
items = sorted(items, key=date_key, reverse=True)
i = 0
for item in items:
if i >= self.count:
break
try:
activity = IActivity(item)
except Unauthorized:
continue
except NotFound:
logger.exception("NotFound: %s" % item.getURL())
continue
if self._activity_visible(activity):
yield activity
i += 1
def _activity_visible(self, activity):
if activity.is_status and self.show_microblog:
return True
if activity.is_content and self.show_content:
return True
if activity.is_discussion and self.show_discussion:
return True
return False
def _activities_brains(self):
if not self.show_content and not self.show_discussion:
return []
catalog = getToolByName(self.context, 'portal_catalog')
# fetch more than we need because of later filtering
contentfilter = dict(sort_on='Date',
sort_order='reverse',
sort_limit=self.count * 10)
if self.tag:
contentfilter["Subject"] = self.tag
# filter on users OR context, not both
if self.users:
contentfilter["Creator"] = self.users
elif self.microblog_context:
contentfilter['path'] = \
'/'.join(self.microblog_context.getPhysicalPath())
return catalog.searchResults(**contentfilter)
def _activities_statuses(self):
if not self.show_microblog:
return []
container = PLONESOCIAL.microblog
# show_microblog yet no container can happen on microblog uninstall
if not container:
return []
try:
# filter on users OR context, not both
if self.users:
# support plonesocial.network integration
return container.user_values(self.users,
limit=self.count,
tag=self.tag)
elif self.microblog_context:
# support collective.local integration
return container.context_values(self.microblog_context,
limit=self.count,
tag=self.tag)
else:
# default implementation
return container.values(limit=self.count,
tag=self.tag)
except Unauthorized:
return []
def activity_providers(self):
for activity in self.activities():
if not self.can_view(activity):
# discussion parent inaccessible
continue
yield getMultiAdapter(
(activity, self.request, self.view),
IActivityProvider,
name="plonesocial.activitystream.activity_provider")
def can_view(self, activity):
"""Returns true if current user has the 'View' permission.
"""
sm = getSecurityManager()
if activity.is_status:
permission = "Plone Social: View Microblog Status Update"
return sm.checkPermission(permission, self.context)
elif activity.is_discussion:
# check both the activity itself and it's page context
return sm.checkPermission(
'View', aq_inner(activity.context)) \
and sm.checkPermission(
'View',
aq_inner(activity.context).__parent__.__parent__)
elif activity.is_content:
return sm.checkPermission('View',
aq_inner(activity.context))
def is_anonymous(self):
portal_membership = getToolByName(getSite(),
'portal_membership',
None)
return portal_membership.isAnonymousUser()
@property
def count(self):
if self.portlet_data:
return self.portlet_data.count
return 15
@property
def show_microblog(self):
if self.portlet_data:
return self.portlet_data.show_microblog
return True
@property
def show_content(self):
if self.portlet_data:
return self.portlet_data.show_content
return True
@property
def show_discussion(self):
if self.portlet_data:
return self.portlet_data.show_discussion
return True | PypiClean |
/repl_openbb-2.1.7-py3-none-any.whl/openbb_terminal/forecast/trans_view.py | __docformat__ = "numpy"
import logging
from typing import Union, Optional, List
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
from openbb_terminal.forecast import trans_model
from openbb_terminal.decorators import log_start_end
from openbb_terminal.forecast import helpers
logger = logging.getLogger(__name__)
# pylint: disable=too-many-arguments
@log_start_end(log=logger)
def display_trans_forecast(
data: Union[pd.Series, pd.DataFrame],
target_column: str = "close",
dataset_name: str = "",
n_predict: int = 5,
past_covariates: str = None,
train_split: float = 0.85,
forecast_horizon: int = 5,
input_chunk_length: int = 14,
output_chunk_length: int = 5,
d_model: int = 64,
nhead: int = 4,
num_encoder_layers: int = 3,
num_decoder_layers: int = 3,
dim_feedforward: int = 512,
activation: str = "relu",
dropout: float = 0.0,
batch_size: int = 32,
n_epochs: int = 300,
learning_rate: float = 1e-3,
model_save_name: str = "trans_model",
force_reset: bool = True,
save_checkpoints: bool = True,
export: str = "",
residuals: bool = False,
forecast_only: bool = False,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
naive: bool = False,
export_pred_raw: bool = False,
external_axes: Optional[List[plt.axes]] = None,
):
"""Display Transformer forecast
Parameters
----------
data: Union[pd.Series, pd.DataFrame]
Input Data
dataset_name: str
The name of the ticker to be predicted
n_predict: int
Days to predict. Defaults to 5.
target_column: str
Target column to forecast. Defaults to "close".
train_split: float
Train/val split. Defaults to 0.85.
past_covariates: str
Multiple secondary columns to factor in when forecasting. Defaults to None.
forecast_horizon: int
Forecast horizon when performing historical forecasting. Defaults to 5.
input_chunk_length: int
Number of past time steps that are fed to the forecasting module at prediction time. Defaults to 14.
output_chunk_length: int
The length of the forecast of the model. Defaults to 5.
d_model: int
The number of expected features in the encoder/decoder inputs. Defaults to 64.
nhead: int
The number of heads in the multi-head attention mechanism. Defaults to 4.
num_encoder_layers: int
The number of encoder layers in the encoder. Defaults to 3.
num_decoder_layers: int
The number of decoder layers in the encoder. Defaults to 3.
dim_feedforward: int
The dimension of the feedforward network model. Defaults to 512.
activation: str
The activation function of encoder/decoder intermediate layer, ‘relu’ or ‘gelu’. Defaults to 'relu'.
dropout: float
Fraction of neurons affected by Dropout. Defaults to 0.1.
batch_size_: int
Number of time series (input and output sequences) used in each training pass. Defaults to 32.
n_epochs: int
Number of epochs over which to train the model. Defaults to 100.
model_save_name: str
Name for model. Defaults to "brnn_model".
force_reset: bool
If set to True, any previously-existing model with the same name will be reset
(all checkpoints will be discarded). Defaults to True.
save_checkpoints: bool
Whether or not to automatically save the untrained model and checkpoints from training.
Defaults to True.
export: str
Format to export data
residuals: bool
Whether to show residuals for the model. Defaults to False.
forecast_only: bool
Whether to only show dates in the forecasting range. Defaults to False.
start_date: Optional[datetime]
The starting date to perform analysis, data before this is trimmed. Defaults to None.
end_date: Optional[datetime]
The ending date to perform analysis, data after this is trimmed. Defaults to None.
naive: bool
Whether to show the naive baseline. This just assumes the closing price will be the same
as the previous day's closing price. Defaults to False.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
data = helpers.clean_data(
data, start_date, end_date, target_column, past_covariates
)
if not helpers.check_data(data, target_column, past_covariates):
return
output_chunk_length = helpers.check_output(
output_chunk_length, n_predict, bool(past_covariates)
)
(
ticker_series,
historical_fcast,
predicted_values,
precision,
_model,
) = trans_model.get_trans_data(
data=data,
n_predict=n_predict,
target_column=target_column,
past_covariates=past_covariates,
train_split=train_split,
forecast_horizon=forecast_horizon,
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
d_model=d_model,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
dim_feedforward=dim_feedforward,
activation=activation,
dropout=dropout,
batch_size=batch_size,
n_epochs=n_epochs,
learning_rate=learning_rate,
model_save_name=model_save_name,
force_reset=force_reset,
save_checkpoints=save_checkpoints,
)
if ticker_series == []:
return
probabilistic = False
helpers.plot_forecast(
name="TRANS",
target_col=target_column,
historical_fcast=historical_fcast,
predicted_values=predicted_values,
ticker_series=ticker_series,
ticker_name=dataset_name,
data=data,
n_predict=n_predict,
forecast_horizon=forecast_horizon,
past_covariates=past_covariates,
precision=precision,
probabilistic=probabilistic,
export=export,
forecast_only=forecast_only,
naive=naive,
export_pred_raw=export_pred_raw,
external_axes=external_axes,
)
if residuals:
helpers.plot_residuals(
_model, past_covariates, ticker_series, forecast_horizon=forecast_horizon
) | PypiClean |
/ais_dom-2023.7.2-py3-none-any.whl/homeassistant/components/canary/coordinator.py | from __future__ import annotations
from collections.abc import ValuesView
from datetime import timedelta
import logging
from async_timeout import timeout
from canary.api import Api
from canary.model import Location, Reading
from requests.exceptions import ConnectTimeout, HTTPError
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DOMAIN
from .model import CanaryData
_LOGGER = logging.getLogger(__name__)
class CanaryDataUpdateCoordinator(DataUpdateCoordinator[CanaryData]):
"""Class to manage fetching Canary data."""
def __init__(self, hass: HomeAssistant, *, api: Api) -> None:
"""Initialize global Canary data updater."""
self.canary = api
update_interval = timedelta(seconds=30)
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=update_interval,
)
def _update_data(self) -> CanaryData:
"""Fetch data from Canary via sync functions."""
locations_by_id: dict[str, Location] = {}
readings_by_device_id: dict[str, ValuesView[Reading]] = {}
for location in self.canary.get_locations():
location_id = location.location_id
locations_by_id[location_id] = location
for device in location.devices:
if device.is_online:
readings_by_device_id[
device.device_id
] = self.canary.get_latest_readings(device.device_id)
return {
"locations": locations_by_id,
"readings": readings_by_device_id,
}
async def _async_update_data(self) -> CanaryData:
"""Fetch data from Canary."""
try:
async with timeout(15):
return await self.hass.async_add_executor_job(self._update_data)
except (ConnectTimeout, HTTPError) as error:
raise UpdateFailed(f"Invalid response from API: {error}") from error | PypiClean |
/nni-3.0rc1-py3-none-macosx_10_9_x86_64.whl/nni_node/node_modules/rx/dist/rx.coincidence.js |
;(function (factory) {
var objectTypes = {
'function': true,
'object': true
};
function checkGlobal(value) {
return (value && value.Object === Object) ? value : null;
}
var freeExports = (objectTypes[typeof exports] && exports && !exports.nodeType) ? exports : null;
var freeModule = (objectTypes[typeof module] && module && !module.nodeType) ? module : null;
var freeGlobal = checkGlobal(freeExports && freeModule && typeof global === 'object' && global);
var freeSelf = checkGlobal(objectTypes[typeof self] && self);
var freeWindow = checkGlobal(objectTypes[typeof window] && window);
var moduleExports = (freeModule && freeModule.exports === freeExports) ? freeExports : null;
var thisGlobal = checkGlobal(objectTypes[typeof this] && this);
var root = freeGlobal || ((freeWindow !== (thisGlobal && thisGlobal.window)) && freeWindow) || freeSelf || thisGlobal || Function('return this')();
// Because of build optimizers
if (typeof define === 'function' && define.amd) {
define(['./rx'], function (Rx, exports) {
return factory(root, exports, Rx);
});
} else if (typeof module === 'object' && module && module.exports === freeExports) {
module.exports = factory(root, module.exports, require('./rx'));
} else {
root.Rx = factory(root, {}, root.Rx);
}
}.call(this, function (root, exp, Rx, undefined) {
var Observable = Rx.Observable,
ObservableBase = Rx.ObservableBase,
AbstractObserver = Rx.internals.AbstractObserver,
CompositeDisposable = Rx.CompositeDisposable,
BinaryDisposable = Rx.BinaryDisposable,
RefCountDisposable = Rx.RefCountDisposable,
SingleAssignmentDisposable = Rx.SingleAssignmentDisposable,
SerialDisposable = Rx.SerialDisposable,
Subject = Rx.Subject,
observableProto = Observable.prototype,
observableEmpty = Observable.empty,
observableNever = Observable.never,
AnonymousObservable = Rx.AnonymousObservable,
addRef = Rx.internals.addRef,
inherits = Rx.internals.inherits,
bindCallback = Rx.internals.bindCallback,
noop = Rx.helpers.noop,
isPromise = Rx.helpers.isPromise,
isFunction = Rx.helpers.isFunction,
observableFromPromise = Observable.fromPromise;
var errorObj = {e: {}};
function tryCatcherGen(tryCatchTarget) {
return function tryCatcher() {
try {
return tryCatchTarget.apply(this, arguments);
} catch (e) {
errorObj.e = e;
return errorObj;
}
};
}
var tryCatch = Rx.internals.tryCatch = function tryCatch(fn) {
if (!isFunction(fn)) { throw new TypeError('fn must be a function'); }
return tryCatcherGen(fn);
};
function thrower(e) {
throw e;
}
var Map = root.Map || (function () {
function Map() {
this.size = 0;
this._values = [];
this._keys = [];
}
Map.prototype['delete'] = function (key) {
var i = this._keys.indexOf(key);
if (i === -1) { return false; }
this._values.splice(i, 1);
this._keys.splice(i, 1);
this.size--;
return true;
};
Map.prototype.get = function (key) {
var i = this._keys.indexOf(key);
return i === -1 ? undefined : this._values[i];
};
Map.prototype.set = function (key, value) {
var i = this._keys.indexOf(key);
if (i === -1) {
this._keys.push(key);
this._values.push(value);
this.size++;
} else {
this._values[i] = value;
}
return this;
};
Map.prototype.forEach = function (cb, thisArg) {
for (var i = 0; i < this.size; i++) {
cb.call(thisArg, this._values[i], this._keys[i]);
}
};
return Map;
}());
/**
* Correlates the elements of two sequences based on overlapping durations.
*
* @param {Observable} right The right observable sequence to join elements for.
* @param {Function} leftDurationSelector A function to select the duration (expressed as an observable sequence) of each element of the left observable sequence, used to determine overlap.
* @param {Function} rightDurationSelector A function to select the duration (expressed as an observable sequence) of each element of the right observable sequence, used to determine overlap.
* @param {Function} resultSelector A function invoked to compute a result element for any two overlapping elements of the left and right observable sequences. The parameters passed to the function correspond with the elements from the left and right source sequences for which overlap occurs.
* @returns {Observable} An observable sequence that contains result elements computed from source elements that have an overlapping duration.
*/
observableProto.join = function (right, leftDurationSelector, rightDurationSelector, resultSelector) {
var left = this;
return new AnonymousObservable(function (o) {
var group = new CompositeDisposable();
var leftDone = false, rightDone = false;
var leftId = 0, rightId = 0;
var leftMap = new Map(), rightMap = new Map();
var handleError = function (e) { o.onError(e); };
group.add(left.subscribe(
function (value) {
var id = leftId++, md = new SingleAssignmentDisposable();
leftMap.set(id, value);
group.add(md);
var duration = tryCatch(leftDurationSelector)(value);
if (duration === errorObj) { return o.onError(duration.e); }
md.setDisposable(duration.take(1).subscribe(
noop,
handleError,
function () {
leftMap['delete'](id) && leftMap.size === 0 && leftDone && o.onCompleted();
group.remove(md);
}));
rightMap.forEach(function (v) {
var result = tryCatch(resultSelector)(value, v);
if (result === errorObj) { return o.onError(result.e); }
o.onNext(result);
});
},
handleError,
function () {
leftDone = true;
(rightDone || leftMap.size === 0) && o.onCompleted();
})
);
group.add(right.subscribe(
function (value) {
var id = rightId++, md = new SingleAssignmentDisposable();
rightMap.set(id, value);
group.add(md);
var duration = tryCatch(rightDurationSelector)(value);
if (duration === errorObj) { return o.onError(duration.e); }
md.setDisposable(duration.take(1).subscribe(
noop,
handleError,
function () {
rightMap['delete'](id) && rightMap.size === 0 && rightDone && o.onCompleted();
group.remove(md);
}));
leftMap.forEach(function (v) {
var result = tryCatch(resultSelector)(v, value);
if (result === errorObj) { return o.onError(result.e); }
o.onNext(result);
});
},
handleError,
function () {
rightDone = true;
(leftDone || rightMap.size === 0) && o.onCompleted();
})
);
return group;
}, left);
};
/**
* Correlates the elements of two sequences based on overlapping durations, and groups the results.
*
* @param {Observable} right The right observable sequence to join elements for.
* @param {Function} leftDurationSelector A function to select the duration (expressed as an observable sequence) of each element of the left observable sequence, used to determine overlap.
* @param {Function} rightDurationSelector A function to select the duration (expressed as an observable sequence) of each element of the right observable sequence, used to determine overlap.
* @param {Function} resultSelector A function invoked to compute a result element for any element of the left sequence with overlapping elements from the right observable sequence. The first parameter passed to the function is an element of the left sequence. The second parameter passed to the function is an observable sequence with elements from the right sequence that overlap with the left sequence's element.
* @returns {Observable} An observable sequence that contains result elements computed from source elements that have an overlapping duration.
*/
observableProto.groupJoin = function (right, leftDurationSelector, rightDurationSelector, resultSelector) {
var left = this;
return new AnonymousObservable(function (o) {
var group = new CompositeDisposable();
var r = new RefCountDisposable(group);
var leftMap = new Map(), rightMap = new Map();
var leftId = 0, rightId = 0;
var handleError = function (e) { return function (v) { v.onError(e); }; };
function handleError(e) { };
group.add(left.subscribe(
function (value) {
var s = new Subject();
var id = leftId++;
leftMap.set(id, s);
var result = tryCatch(resultSelector)(value, addRef(s, r));
if (result === errorObj) {
leftMap.forEach(handleError(result.e));
return o.onError(result.e);
}
o.onNext(result);
rightMap.forEach(function (v) { s.onNext(v); });
var md = new SingleAssignmentDisposable();
group.add(md);
var duration = tryCatch(leftDurationSelector)(value);
if (duration === errorObj) {
leftMap.forEach(handleError(duration.e));
return o.onError(duration.e);
}
md.setDisposable(duration.take(1).subscribe(
noop,
function (e) {
leftMap.forEach(handleError(e));
o.onError(e);
},
function () {
leftMap['delete'](id) && s.onCompleted();
group.remove(md);
}));
},
function (e) {
leftMap.forEach(handleError(e));
o.onError(e);
},
function () { o.onCompleted(); })
);
group.add(right.subscribe(
function (value) {
var id = rightId++;
rightMap.set(id, value);
var md = new SingleAssignmentDisposable();
group.add(md);
var duration = tryCatch(rightDurationSelector)(value);
if (duration === errorObj) {
leftMap.forEach(handleError(duration.e));
return o.onError(duration.e);
}
md.setDisposable(duration.take(1).subscribe(
noop,
function (e) {
leftMap.forEach(handleError(e));
o.onError(e);
},
function () {
rightMap['delete'](id);
group.remove(md);
}));
leftMap.forEach(function (v) { v.onNext(value); });
},
function (e) {
leftMap.forEach(handleError(e));
o.onError(e);
})
);
return r;
}, left);
};
function toArray(x) { return x.toArray(); }
/**
* Projects each element of an observable sequence into zero or more buffers.
* @param {Mixed} bufferOpeningsOrClosingSelector Observable sequence whose elements denote the creation of new windows, or, a function invoked to define the boundaries of the produced windows (a new window is started when the previous one is closed, resulting in non-overlapping windows).
* @param {Function} [bufferClosingSelector] A function invoked to define the closing of each produced window. If a closing selector function is specified for the first parameter, this parameter is ignored.
* @returns {Observable} An observable sequence of windows.
*/
observableProto.buffer = function () {
return this.window.apply(this, arguments)
.flatMap(toArray);
};
/**
* Projects each element of an observable sequence into zero or more windows.
*
* @param {Mixed} windowOpeningsOrClosingSelector Observable sequence whose elements denote the creation of new windows, or, a function invoked to define the boundaries of the produced windows (a new window is started when the previous one is closed, resulting in non-overlapping windows).
* @param {Function} [windowClosingSelector] A function invoked to define the closing of each produced window. If a closing selector function is specified for the first parameter, this parameter is ignored.
* @returns {Observable} An observable sequence of windows.
*/
observableProto.window = function (windowOpeningsOrClosingSelector, windowClosingSelector) {
if (arguments.length === 1 && typeof arguments[0] !== 'function') {
return observableWindowWithBoundaries.call(this, windowOpeningsOrClosingSelector);
}
return typeof windowOpeningsOrClosingSelector === 'function' ?
observableWindowWithClosingSelector.call(this, windowOpeningsOrClosingSelector) :
observableWindowWithOpenings.call(this, windowOpeningsOrClosingSelector, windowClosingSelector);
};
function observableWindowWithOpenings(windowOpenings, windowClosingSelector) {
return windowOpenings.groupJoin(this, windowClosingSelector, observableEmpty, function (_, win) {
return win;
});
}
function observableWindowWithBoundaries(windowBoundaries) {
var source = this;
return new AnonymousObservable(function (observer) {
var win = new Subject(),
d = new CompositeDisposable(),
r = new RefCountDisposable(d);
observer.onNext(addRef(win, r));
d.add(source.subscribe(function (x) {
win.onNext(x);
}, function (err) {
win.onError(err);
observer.onError(err);
}, function () {
win.onCompleted();
observer.onCompleted();
}));
isPromise(windowBoundaries) && (windowBoundaries = observableFromPromise(windowBoundaries));
d.add(windowBoundaries.subscribe(function (w) {
win.onCompleted();
win = new Subject();
observer.onNext(addRef(win, r));
}, function (err) {
win.onError(err);
observer.onError(err);
}, function () {
win.onCompleted();
observer.onCompleted();
}));
return r;
}, source);
}
function observableWindowWithClosingSelector(windowClosingSelector) {
var source = this;
return new AnonymousObservable(function (observer) {
var m = new SerialDisposable(),
d = new CompositeDisposable(m),
r = new RefCountDisposable(d),
win = new Subject();
observer.onNext(addRef(win, r));
d.add(source.subscribe(function (x) {
win.onNext(x);
}, function (err) {
win.onError(err);
observer.onError(err);
}, function () {
win.onCompleted();
observer.onCompleted();
}));
function createWindowClose () {
var windowClose;
try {
windowClose = windowClosingSelector();
} catch (e) {
observer.onError(e);
return;
}
isPromise(windowClose) && (windowClose = observableFromPromise(windowClose));
var m1 = new SingleAssignmentDisposable();
m.setDisposable(m1);
m1.setDisposable(windowClose.take(1).subscribe(noop, function (err) {
win.onError(err);
observer.onError(err);
}, function () {
win.onCompleted();
win = new Subject();
observer.onNext(addRef(win, r));
createWindowClose();
}));
}
createWindowClose();
return r;
}, source);
}
var PairwiseObservable = (function (__super__) {
inherits(PairwiseObservable, __super__);
function PairwiseObservable(source) {
this.source = source;
__super__.call(this);
}
PairwiseObservable.prototype.subscribeCore = function (o) {
return this.source.subscribe(new PairwiseObserver(o));
};
return PairwiseObservable;
}(ObservableBase));
var PairwiseObserver = (function(__super__) {
inherits(PairwiseObserver, __super__);
function PairwiseObserver(o) {
this._o = o;
this._p = null;
this._hp = false;
__super__.call(this);
}
PairwiseObserver.prototype.next = function (x) {
if (this._hp) {
this._o.onNext([this._p, x]);
} else {
this._hp = true;
}
this._p = x;
};
PairwiseObserver.prototype.error = function (err) { this._o.onError(err); };
PairwiseObserver.prototype.completed = function () { this._o.onCompleted(); };
return PairwiseObserver;
}(AbstractObserver));
/**
* Returns a new observable that triggers on the second and subsequent triggerings of the input observable.
* The Nth triggering of the input observable passes the arguments from the N-1th and Nth triggering as a pair.
* The argument passed to the N-1th triggering is held in hidden internal state until the Nth triggering occurs.
* @returns {Observable} An observable that triggers on successive pairs of observations from the input observable as an array.
*/
observableProto.pairwise = function () {
return new PairwiseObservable(this);
};
/**
* Returns two observables which partition the observations of the source by the given function.
* The first will trigger observations for those values for which the predicate returns true.
* The second will trigger observations for those values where the predicate returns false.
* The predicate is executed once for each subscribed observer.
* Both also propagate all error observations arising from the source and each completes
* when the source completes.
* @param {Function} predicate
* The function to determine which output Observable will trigger a particular observation.
* @returns {Array}
* An array of observables. The first triggers when the predicate returns true,
* and the second triggers when the predicate returns false.
*/
observableProto.partition = function(predicate, thisArg) {
var fn = bindCallback(predicate, thisArg, 3);
return [
this.filter(predicate, thisArg),
this.filter(function (x, i, o) { return !fn(x, i, o); })
];
};
/**
* Groups the elements of an observable sequence according to a specified key selector function and comparer and selects the resulting elements by using a specified function.
*
* @example
* var res = observable.groupBy(function (x) { return x.id; });
* 2 - observable.groupBy(function (x) { return x.id; }), function (x) { return x.name; });
* 3 - observable.groupBy(function (x) { return x.id; }), function (x) { return x.name; }, function (x) { return x.toString(); });
* @param {Function} keySelector A function to extract the key for each element.
* @param {Function} [elementSelector] A function to map each source element to an element in an observable group.
* @returns {Observable} A sequence of observable groups, each of which corresponds to a unique key value, containing all elements that share that same key value.
*/
observableProto.groupBy = function (keySelector, elementSelector) {
return this.groupByUntil(keySelector, elementSelector, observableNever);
};
/**
* Groups the elements of an observable sequence according to a specified key selector function.
* A duration selector function is used to control the lifetime of groups. When a group expires, it receives an OnCompleted notification. When a new element with the same
* key value as a reclaimed group occurs, the group will be reborn with a new lifetime request.
*
* @example
* var res = observable.groupByUntil(function (x) { return x.id; }, null, function () { return Rx.Observable.never(); });
* 2 - observable.groupBy(function (x) { return x.id; }), function (x) { return x.name; }, function () { return Rx.Observable.never(); });
* 3 - observable.groupBy(function (x) { return x.id; }), function (x) { return x.name; }, function () { return Rx.Observable.never(); }, function (x) { return x.toString(); });
* @param {Function} keySelector A function to extract the key for each element.
* @param {Function} durationSelector A function to signal the expiration of a group.
* @returns {Observable}
* A sequence of observable groups, each of which corresponds to a unique key value, containing all elements that share that same key value.
* If a group's lifetime expires, a new group with the same key value can be created once an element with such a key value is encoutered.
*
*/
observableProto.groupByUntil = function (keySelector, elementSelector, durationSelector) {
var source = this;
return new AnonymousObservable(function (o) {
var map = new Map(),
groupDisposable = new CompositeDisposable(),
refCountDisposable = new RefCountDisposable(groupDisposable),
handleError = function (e) { return function (item) { item.onError(e); }; };
groupDisposable.add(
source.subscribe(function (x) {
var key = tryCatch(keySelector)(x);
if (key === errorObj) {
map.forEach(handleError(key.e));
return o.onError(key.e);
}
var fireNewMapEntry = false, writer = map.get(key);
if (writer === undefined) {
writer = new Subject();
map.set(key, writer);
fireNewMapEntry = true;
}
if (fireNewMapEntry) {
var group = new GroupedObservable(key, writer, refCountDisposable),
durationGroup = new GroupedObservable(key, writer);
var duration = tryCatch(durationSelector)(durationGroup);
if (duration === errorObj) {
map.forEach(handleError(duration.e));
return o.onError(duration.e);
}
o.onNext(group);
var md = new SingleAssignmentDisposable();
groupDisposable.add(md);
md.setDisposable(duration.take(1).subscribe(
noop,
function (e) {
map.forEach(handleError(e));
o.onError(e);
},
function () {
if (map['delete'](key)) { writer.onCompleted(); }
groupDisposable.remove(md);
}));
}
var element = x;
if (isFunction(elementSelector)) {
element = tryCatch(elementSelector)(x);
if (element === errorObj) {
map.forEach(handleError(element.e));
return o.onError(element.e);
}
}
writer.onNext(element);
}, function (e) {
map.forEach(handleError(e));
o.onError(e);
}, function () {
map.forEach(function (item) { item.onCompleted(); });
o.onCompleted();
}));
return refCountDisposable;
}, source);
};
var UnderlyingObservable = (function (__super__) {
inherits(UnderlyingObservable, __super__);
function UnderlyingObservable(m, u) {
this._m = m;
this._u = u;
__super__.call(this);
}
UnderlyingObservable.prototype.subscribeCore = function (o) {
return new BinaryDisposable(this._m.getDisposable(), this._u.subscribe(o));
};
return UnderlyingObservable;
}(ObservableBase));
var GroupedObservable = (function (__super__) {
inherits(GroupedObservable, __super__);
function GroupedObservable(key, underlyingObservable, mergedDisposable) {
__super__.call(this);
this.key = key;
this.underlyingObservable = !mergedDisposable ?
underlyingObservable :
new UnderlyingObservable(mergedDisposable, underlyingObservable);
}
GroupedObservable.prototype._subscribe = function (o) {
return this.underlyingObservable.subscribe(o);
};
return GroupedObservable;
}(Observable));
return Rx;
})); | PypiClean |
/surrogate-0.1.tar.gz/surrogate-0.1/README.md | About
=====
`surrogate` is a micro-lib helping people to create stubs
for non-existing modules in `sys.modules` so that later
those modules can be imported. `surrogate` does not touch
modules that exist in `sys.modules` even if you ask it to.
At the moment `surrogate` offers only decorator interface
but it is planned to add context-manager interface as well.
This library has been forked from Kostia Balytskyi's [ikostia/surrogate](https://github.com/ikostia/surrogate) for packaging and posting to PyPi. The commit history remains intact, but I've broken the GitHub fork link to help make it clear to visitors from PyPi which repo corresponds to the packaged and published version.
Intention
=========
Once author needed to write tests for a function that
works only in production (but not in developement env).
Those function imported modules that did not exist in
development environment. Thus, in order to test those
function, mocking of the aforementioned modules was
necessary. Unfortunately, author did not manage to
mock those modules with `patch` decorator from
`mock` library. It was necessary to create module stubs
first and then to mock them. This micro-lib does exactly
what author needed (except of the mistakes, of course).
Usage
=====
Please, use `surrogate` as a function decorator
```
from surrogate import surrogate
@surrogate('sys.my.cool.module.stub1')
@surrogate('sys.my.cool.module.stub2')
def test_something():
from sys.my.cool.module import stub1
from sys.my.cool.module import stub2
import sys.my.cool as cool
import sys # this is a normal sys module
do_something()
```
Accourding to intention, you can use `surrogate`
with `mock.patch` decorators
```
from surrogate import surrogate
from mock import patch
@surrogate('this.module.doesnt.exist')
@patch('this.module.doesnt.exits', whatever)
def test_something():
from this.module.doesnt import exist
do_something()
```
LICENSE
=======
This code can be used, distributed and modified
in any ways one wants. If one gets any use of it
author is already rewarded.
On the other hand, do not expect any guaranteed
support from author. Use it as is.
| PypiClean |
/vid2info-1.129.tar.gz/vid2info-1.129/yolox/evaluators/evaluation.py | import os
import numpy as np
import copy
import motmetrics as mm
mm.lap.default_solver = 'lap'
class Evaluator(object):
def __init__(self, data_root, seq_name, data_type):
self.data_root = data_root
self.seq_name = seq_name
self.data_type = data_type
self.load_annotations()
self.reset_accumulator()
def load_annotations(self):
assert self.data_type == 'mot'
gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
def reset_accumulator(self):
self.acc = mm.MOTAccumulator(auto_id=True)
def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
# results
trk_tlwhs = np.copy(trk_tlwhs)
trk_ids = np.copy(trk_ids)
# gts
gt_objs = self.gt_frame_dict.get(frame_id, [])
gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
# ignore boxes
ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
ignore_tlwhs = unzip_objs(ignore_objs)[0]
# remove ignored results
keep = np.ones(len(trk_tlwhs), dtype=bool)
iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
if len(iou_distance) > 0:
match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
match_ious = iou_distance[match_is, match_js]
match_js = np.asarray(match_js, dtype=int)
match_js = match_js[np.logical_not(np.isnan(match_ious))]
keep[match_js] = False
trk_tlwhs = trk_tlwhs[keep]
trk_ids = trk_ids[keep]
#match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
#match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
#match_ious = iou_distance[match_is, match_js]
#match_js = np.asarray(match_js, dtype=int)
#match_js = match_js[np.logical_not(np.isnan(match_ious))]
#keep[match_js] = False
#trk_tlwhs = trk_tlwhs[keep]
#trk_ids = trk_ids[keep]
# get distance matrix
iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
# acc
self.acc.update(gt_ids, trk_ids, iou_distance)
if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):
events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics
else:
events = None
return events
def eval_file(self, filename):
self.reset_accumulator()
result_frame_dict = read_results(filename, self.data_type, is_gt=False)
#frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))
frames = sorted(list(set(result_frame_dict.keys())))
for frame_id in frames:
trk_objs = result_frame_dict.get(frame_id, [])
trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
return self.acc
@staticmethod
def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
names = copy.deepcopy(names)
if metrics is None:
metrics = mm.metrics.motchallenge_metrics
metrics = copy.deepcopy(metrics)
mh = mm.metrics.create()
summary = mh.compute_many(
accs,
metrics=metrics,
names=names,
generate_overall=True
)
return summary
@staticmethod
def save_summary(summary, filename):
import pandas as pd
writer = pd.ExcelWriter(filename)
summary.to_excel(writer)
writer.save()
def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
if data_type in ('mot', 'lab'):
read_fun = read_mot_results
else:
raise ValueError('Unknown data type: {}'.format(data_type))
return read_fun(filename, is_gt, is_ignore)
"""
labels={'ped', ... % 1
'person_on_vhcl', ... % 2
'car', ... % 3
'bicycle', ... % 4
'mbike', ... % 5
'non_mot_vhcl', ... % 6
'static_person', ... % 7
'distractor', ... % 8
'occluder', ... % 9
'occluder_on_grnd', ... %10
'occluder_full', ... % 11
'reflection', ... % 12
'crowd' ... % 13
};
"""
def read_mot_results(filename, is_gt, is_ignore):
valid_labels = {1}
ignore_labels = {2, 7, 8, 12}
results_dict = dict()
if os.path.isfile(filename):
with open(filename, 'r') as f:
for line in f.readlines():
linelist = line.split(',')
if len(linelist) < 7:
continue
fid = int(linelist[0])
if fid < 1:
continue
results_dict.setdefault(fid, list())
box_size = float(linelist[4]) * float(linelist[5])
if is_gt:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
mark = int(float(linelist[6]))
if mark == 0 or label not in valid_labels:
continue
score = 1
elif is_ignore:
if 'MOT16-' in filename or 'MOT17-' in filename:
label = int(float(linelist[7]))
vis_ratio = float(linelist[8])
if label not in ignore_labels and vis_ratio >= 0:
continue
else:
continue
score = 1
else:
score = float(linelist[6])
#if box_size > 7000:
#if box_size <= 7000 or box_size >= 15000:
#if box_size < 15000:
#continue
tlwh = tuple(map(float, linelist[2:6]))
target_id = int(linelist[1])
results_dict[fid].append((tlwh, target_id, score))
return results_dict
def unzip_objs(objs):
if len(objs) > 0:
tlwhs, ids, scores = zip(*objs)
else:
tlwhs, ids, scores = [], [], []
tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)
return tlwhs, ids, scores | PypiClean |
/sportmonks_python_sdk-0.1.0-py3-none-any.whl/sportmonks/paths/version_sport_news_post_match/get.py | from dataclasses import dataclass
import typing_extensions
import urllib3
from sportmonks.request_before_hook import request_before_hook
import json
from urllib3._collections import HTTPHeaderDict
from sportmonks.api_response import AsyncGeneratorResponse
from sportmonks import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from sportmonks import schemas # noqa: F401
from . import path
# Path params
VersionSchema = schemas.StrSchema
SportSchema = schemas.StrSchema
RequestRequiredPathParams = typing_extensions.TypedDict(
'RequestRequiredPathParams',
{
}
)
RequestOptionalPathParams = typing_extensions.TypedDict(
'RequestOptionalPathParams',
{
'version': typing.Union[VersionSchema, str, ],
'sport': typing.Union[SportSchema, str, ],
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_version = api_client.PathParameter(
name="version",
style=api_client.ParameterStyle.SIMPLE,
schema=VersionSchema,
)
request_path_sport = api_client.PathParameter(
name="sport",
style=api_client.ParameterStyle.SIMPLE,
schema=SportSchema,
)
_auth = [
'apikeyAuth',
]
SchemaFor0ResponseBodyTextPlain = schemas.StrSchema
@dataclass
class ApiResponseForDefault(api_client.ApiResponse):
body: str
@dataclass
class ApiResponseForDefaultAsync(api_client.AsyncApiResponse):
body: str
_response_for_default = api_client.OpenApiResponse(
response_cls=ApiResponseForDefault,
content={
'text/plain': api_client.MediaType(
schema=SchemaFor0ResponseBodyTextPlain),
},
)
_status_code_to_response = {
'default': _response_for_default,
}
_all_accept_content_types = (
'text/plain',
)
class BaseApi(api_client.Api):
def _news_all_post_match_mapped_args(
self,
version: typing.Optional[str] = None,
sport: typing.Optional[str] = None,
) -> api_client.MappedArgs:
args: api_client.MappedArgs = api_client.MappedArgs()
_path_params = {}
if version is not None:
_path_params["version"] = version
if sport is not None:
_path_params["sport"] = sport
args.path = _path_params
return args
async def _anews_all_post_match_oapg(
self,
path_params: typing.Optional[dict] = {},
skip_deserialization: bool = True,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
) -> typing.Union[
ApiResponseForDefaultAsync,
api_client.ApiResponseWithoutDeserializationAsync,
AsyncGeneratorResponse,
]:
"""
All Post Match
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_version,
request_path_sport,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
method = 'get'.upper()
request_before_hook(
resource_path=used_path,
method=method,
configuration=self.api_client.configuration,
auth_settings=_auth,
headers=_headers,
)
response = await self.api_client.async_call_api(
resource_path=used_path,
method=method,
headers=_headers,
auth_settings=_auth,
timeout=timeout,
)
if stream:
if not 200 <= response.http_response.status <= 299:
body = (await response.http_response.content.read()).decode("utf-8")
raise exceptions.ApiStreamingException(
status=response.http_response.status,
reason=response.http_response.reason,
body=body,
)
async def stream_iterator():
"""
iterates over response.http_response.content and closes connection once iteration has finished
"""
async for line in response.http_response.content:
if line == b'\r\n':
continue
yield line
response.http_response.close()
await response.session.close()
return AsyncGeneratorResponse(
content=stream_iterator(),
headers=response.http_response.headers,
status=response.http_response.status,
response=response.http_response
)
response_for_status = _status_code_to_response.get(str(response.http_response.status))
if response_for_status:
api_response = await response_for_status.deserialize_async(
response,
self.api_client.configuration,
skip_deserialization=skip_deserialization
)
else:
default_response = _status_code_to_response.get('default')
if default_response:
api_response = default_response.deserialize(
response,
self.api_client.configuration,
skip_deserialization=skip_deserialization
)
else:
api_response = api_client.ApiResponseWithoutDeserializationAsync(
response=response.http_response,
round_trip_time=response.round_trip_time,
status=response.http_response.status,
headers=response.http_response.headers,
)
if not 200 <= api_response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
# cleanup session / response
response.http_response.close()
await response.session.close()
return api_response
def _news_all_post_match_oapg(
self,
path_params: typing.Optional[dict] = {},
skip_deserialization: bool = True,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
) -> typing.Union[
ApiResponseForDefault,
api_client.ApiResponseWithoutDeserialization,
]:
"""
All Post Match
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params)
used_path = path.value
_path_params = {}
for parameter in (
request_path_version,
request_path_sport,
):
parameter_data = path_params.get(parameter.name, schemas.unset)
if parameter_data is schemas.unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
for k, v in _path_params.items():
used_path = used_path.replace('{%s}' % k, v)
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
method = 'get'.upper()
request_before_hook(
resource_path=used_path,
method=method,
configuration=self.api_client.configuration,
auth_settings=_auth,
headers=_headers,
)
response = self.api_client.call_api(
resource_path=used_path,
method=method,
headers=_headers,
auth_settings=_auth,
timeout=timeout,
)
response_for_status = _status_code_to_response.get(str(response.http_response.status))
if response_for_status:
api_response = response_for_status.deserialize(
response,
self.api_client.configuration,
skip_deserialization=skip_deserialization
)
else:
default_response = _status_code_to_response.get('default')
if default_response:
api_response = default_response.deserialize(
response,
self.api_client.configuration,
skip_deserialization=skip_deserialization
)
else:
api_response = api_client.ApiResponseWithoutDeserialization(
response=response.http_response,
round_trip_time=response.round_trip_time,
status=response.http_response.status,
headers=response.http_response.headers,
)
if not 200 <= api_response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class NewsAllPostMatch(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
async def anews_all_post_match(
self,
version: typing.Optional[str] = None,
sport: typing.Optional[str] = None,
) -> typing.Union[
ApiResponseForDefaultAsync,
api_client.ApiResponseWithoutDeserializationAsync,
AsyncGeneratorResponse,
]:
args = self._news_all_post_match_mapped_args(
version=version,
sport=sport,
)
return await self._anews_all_post_match_oapg(
path_params=args.path,
)
def news_all_post_match(
self,
version: typing.Optional[str] = None,
sport: typing.Optional[str] = None,
) -> typing.Union[
ApiResponseForDefault,
api_client.ApiResponseWithoutDeserialization,
]:
args = self._news_all_post_match_mapped_args(
version=version,
sport=sport,
)
return self._news_all_post_match_oapg(
path_params=args.path,
)
class ApiForget(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
async def aget(
self,
version: typing.Optional[str] = None,
sport: typing.Optional[str] = None,
) -> typing.Union[
ApiResponseForDefaultAsync,
api_client.ApiResponseWithoutDeserializationAsync,
AsyncGeneratorResponse,
]:
args = self._news_all_post_match_mapped_args(
version=version,
sport=sport,
)
return await self._anews_all_post_match_oapg(
path_params=args.path,
)
def get(
self,
version: typing.Optional[str] = None,
sport: typing.Optional[str] = None,
) -> typing.Union[
ApiResponseForDefault,
api_client.ApiResponseWithoutDeserialization,
]:
args = self._news_all_post_match_mapped_args(
version=version,
sport=sport,
)
return self._news_all_post_match_oapg(
path_params=args.path,
) | PypiClean |
/os_vif-3.2.0-py3-none-any.whl/os_vif/__init__.py |
from oslo_log import log as logging
from stevedore import extension
import os_vif.exception
import os_vif.i18n
import os_vif.objects
_EXT_MANAGER = None
LOG = logging.getLogger(__name__)
def initialize(reset=False):
"""
Loads all os_vif plugins and initializes them with a dictionary of
configuration options. These configuration options are passed as-is
to the individual VIF plugins that are loaded via stevedore.
:param reset: Recreate and load the VIF plugin extensions.
"""
global _EXT_MANAGER
if _EXT_MANAGER is None:
os_vif.objects.register_all()
if reset or (_EXT_MANAGER is None):
_EXT_MANAGER = extension.ExtensionManager(namespace='os_vif',
invoke_on_load=False)
loaded_plugins = []
for plugin_name in _EXT_MANAGER.names():
cls = _EXT_MANAGER[plugin_name].plugin
obj = cls.load(plugin_name)
LOG.debug(("Loaded VIF plugin class '%(cls)s' "
"with name '%(plugin_name)s'"),
{'cls': cls, 'plugin_name': plugin_name})
loaded_plugins.append(plugin_name)
_EXT_MANAGER[plugin_name].obj = obj
LOG.info("Loaded VIF plugins: %s", ", ".join(loaded_plugins))
def plug(vif, instance_info):
"""
Given a model of a VIF, perform operations to plug the VIF properly.
:param vif: Instance of a subclass of ``os_vif.objects.vif.VIFBase``.
:param instance_info: ``os_vif.objects.instance_info.InstanceInfo`` object.
:raises ``exception.LibraryNotInitialized`` if the user of the library
did not call ``os_vif.initialize(**config)`` before trying to
plug a VIF.
:raises ``exception.NoMatchingPlugin`` if there is no plugin for the
type of VIF supplied.
:raises ``exception.PlugException`` if anything fails during unplug
operations.
"""
if _EXT_MANAGER is None:
raise os_vif.exception.LibraryNotInitialized()
plugin_name = vif.plugin
try:
plugin = _EXT_MANAGER[plugin_name].obj
except KeyError:
raise os_vif.exception.NoMatchingPlugin(plugin_name=plugin_name)
try:
LOG.debug("Plugging vif %s", vif)
plugin.plug(vif, instance_info)
LOG.info("Successfully plugged vif %s", vif)
except Exception as err:
LOG.error("Failed to plug vif %(vif)s",
{"vif": vif}, exc_info=True)
raise os_vif.exception.PlugException(vif=vif, err=err)
def unplug(vif, instance_info):
"""
Given a model of a VIF, perform operations to unplug the VIF properly.
:param vif: Instance of a subclass of `os_vif.objects.vif.VIFBase`.
:param instance_info: `os_vif.objects.instance_info.InstanceInfo` object.
:raises `exception.LibraryNotInitialized` if the user of the library
did not call os_vif.initialize(**config) before trying to
plug a VIF.
:raises `exception.NoMatchingPlugin` if there is no plugin for the
type of VIF supplied.
:raises `exception.UnplugException` if anything fails during unplug
operations.
"""
if _EXT_MANAGER is None:
raise os_vif.exception.LibraryNotInitialized()
plugin_name = vif.plugin
try:
plugin = _EXT_MANAGER[plugin_name].obj
except KeyError:
raise os_vif.exception.NoMatchingPlugin(plugin_name=plugin_name)
try:
LOG.debug("Unplugging vif %s", vif)
plugin.unplug(vif, instance_info)
LOG.info("Successfully unplugged vif %s", vif)
except Exception as err:
LOG.error("Failed to unplug vif %(vif)s",
{"vif": vif}, exc_info=True)
raise os_vif.exception.UnplugException(vif=vif, err=err)
def host_info(permitted_vif_type_names=None):
"""
:param permitted_vif_type_names: list of VIF object names
Get information about the host platform configuration to be
provided to the network manager. This will include information
about what plugins are installed in the host
If permitted_vif_type_names is not None, the returned HostInfo
will be filtered such that it only includes plugins which
support one of the listed VIF types. This allows the caller
to filter out impls which are not compatible with the current
usage configuration. For example, to remove VIFVHostUser if
the guest does not support shared memory.
:returns: a os_vif.host_info.HostInfo class instance
"""
if _EXT_MANAGER is None:
raise os_vif.exception.LibraryNotInitialized()
plugins = [
_EXT_MANAGER[name].obj.describe()
for name in sorted(_EXT_MANAGER.names())
]
info = os_vif.objects.host_info.HostInfo(plugin_info=plugins)
if permitted_vif_type_names is not None:
info.filter_vif_types(permitted_vif_type_names)
return info | PypiClean |
/ada-py-0.0.40a4.tar.gz/ada-py-0.0.40a4/src/ada/visualize/formats/threejs/write_threejs_json.py | import json
import os
import pathlib
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ada import Assembly
def to_three_json(assembly: "Assembly", output_file_path):
from OCC.Core.Tesselator import ShapeTesselator
quality = 1.0
render_edges = False
parallel = True
total_json = []
for p in assembly.parts.values():
for obj in p.get_all_physical_objects():
geom = obj.solid()
tess = ShapeTesselator(geom)
tess.Compute(compute_edges=render_edges, mesh_quality=quality, parallel=parallel)
res = tess.ExportShapeToThreejsJSONString(obj.name)
total_json.append(res)
output = {
"metadata": {"version": 4.3, "type": "Object", "generator": "ObjectExporter"},
"textures": [],
"images": [],
"geometries": [
{
"uuid": "0A8F2988-626F-411C-BD6A-AC656C4E6878",
"type": "BufferGeometry",
"data": {
"attributes": {
"position": {
"itemSize": 3,
"type": "Float32Array",
"array": [1, 1, 0, 1, -1, 0, -1, -1, 0, -1, 1, 0],
"normalized": False,
},
"normal": {
"itemSize": 3,
"type": "Float32Array",
"array": [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1],
"normalized": False,
},
"uv": {
"itemSize": 2,
"type": "Float32Array",
"array": [1, 1, 1, 0, 0, 0, 0, 1],
"normalized": False,
},
},
# // type of index must be Uint8Array or Uint16Array.
# // # vertices thus cannot exceed 255 or 65535 respectively.
# // The current parser is able to read the index array
# // if it is nested in the attributes object, but such
# // syntax is no longer encouraged.
"index": {"type": "Uint16Array", "array": [0, 1, 2, 0, 2, 3]},
"boundingSphere": {"center": [0, 0, 0], "radius": 1},
},
}
],
"materials": [],
"object": {
"uuid": "378FAA8D-0888-4249-8701-92D1C1F37C51",
"type": "Scene",
"matrix": [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1],
"children": [
{
"uuid": "E7B44C44-DD75-4C29-B571-21AD6AEF0CA9",
"name": "SharedVertexTest",
"type": "Mesh",
"geometry": "0A8F2988-626F-411C-BD6A-AC656C4E6878",
"matrix": [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1],
}
],
},
}
output_file_path = pathlib.Path(output_file_path)
os.makedirs(output_file_path.parent, exist_ok=True)
with open(output_file_path, "w") as f:
json.dump(output, f, indent=4) | PypiClean |
/atriumsports_sdk-1.6.0.tar.gz/atriumsports_sdk-1.6.0/atriumsports/datacore/openapi/api/video_streams_available_api.py | # noqa: E501
import io
import logging
import re # noqa: F401
import warnings
from datetime import datetime
from typing import Optional
from pydantic import Field, StrictBool, StrictInt, StrictStr, ValidationError, conint, constr, validate_arguments
from typing_extensions import Annotated
from atriumsports.datacore.openapi.api_client import ApiClient
from atriumsports.datacore.openapi.api_response import ApiResponse
from atriumsports.datacore.openapi.exceptions import ApiTypeError, ApiValueError # noqa: F401
from atriumsports.datacore.openapi.models.video_stream_outputs_response import VideoStreamOutputsResponse
logger = logging.getLogger("openapi")
class VideoStreamsAvailableApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def video_stream_outputs_fixture_list(
self,
fixture_id: Annotated[StrictStr, Field(..., description="The unique identifier of the fixture")],
organization_id: Annotated[
constr(strict=True, max_length=5, min_length=5),
Field(..., description="The unique identifier of the organization"),
],
sport: Annotated[StrictStr, Field(..., description="Sport name")],
added: Annotated[
Optional[datetime], Field(description="Record was added after this date/time. In UTC.")
] = None,
content: Annotated[
Optional[constr(strict=True, max_length=30)],
Field(
description="Content of the stream >- `CLEAN` Output signal is the same as the input signal >- `PROGRAM` Score overlays and other enhancements have been added to the stream "
),
] = None,
external: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of fields that will instead be interpreted as an externalId. See [External Ids](#section/Introduction/External-Ids) for more information."
),
] = None,
feed_type: Annotated[
Optional[constr(strict=True, max_length=30)],
Field(
description="Type of video input >- `ADDITIONAL_ANGLE` Additional angle >- `LOW_LATENCY` Low Latency >- `PRIMARY` Primary "
),
] = None,
fields: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of fields to display. The response will only display these fields. See [Partial Response](#section/Partial-Response) section for more information."
),
] = None,
hide_null: Annotated[
Optional[StrictBool], Field(description="Don't display data fields with null values or empty structures")
] = None,
include: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of resource types to include. See [Resource Inclusion](#section/Introduction/Resource-Inclusion) for more information."
),
] = None,
limit: Annotated[
Optional[conint(strict=True, le=1000, ge=1)],
Field(
description="The maximum number of records to return. See [Pagination](#section/Introduction/Pagination) for more information."
),
] = None,
locale: Annotated[
Optional[constr(strict=True, max_length=5, min_length=5)], Field(description="The locale of the video")
] = None,
offset: Annotated[
Optional[StrictInt],
Field(
description="The offset of the records. See [Pagination](#section/Introduction/Pagination) for more information."
),
] = None,
provider: Annotated[
Optional[constr(strict=True, max_length=100)], Field(description="The unique code for the video provider")
] = None,
source_number: Annotated[
Optional[StrictInt], Field(description="Unique identifier for the video source")
] = None,
updated: Annotated[
Optional[datetime], Field(description="Record was modified after this date/time. In UTC.")
] = None,
**kwargs
) -> VideoStreamOutputsResponse: # noqa: E501
"""List available video streams for a fixture # noqa: E501
Display the list of video streams for a fixture # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_stream_outputs_fixture_list(fixture_id, organization_id, sport, added, content, external, feed_type, fields, hide_null, include, limit, locale, offset, provider, source_number, updated, async_req=True)
>>> result = thread.get()
:param fixture_id: The unique identifier of the fixture (required)
:type fixture_id: str
:param organization_id: The unique identifier of the organization (required)
:type organization_id: str
:param sport: Sport name (required)
:type sport: str
:param added: Record was added after this date/time. In UTC.
:type added: datetime
:param content: Content of the stream >- `CLEAN` Output signal is the same as the input signal >- `PROGRAM` Score overlays and other enhancements have been added to the stream
:type content: str
:param external: A comma separated list of fields that will instead be interpreted as an externalId. See [External Ids](#section/Introduction/External-Ids) for more information.
:type external: str
:param feed_type: Type of video input >- `ADDITIONAL_ANGLE` Additional angle >- `LOW_LATENCY` Low Latency >- `PRIMARY` Primary
:type feed_type: str
:param fields: A comma separated list of fields to display. The response will only display these fields. See [Partial Response](#section/Partial-Response) section for more information.
:type fields: str
:param hide_null: Don't display data fields with null values or empty structures
:type hide_null: bool
:param include: A comma separated list of resource types to include. See [Resource Inclusion](#section/Introduction/Resource-Inclusion) for more information.
:type include: str
:param limit: The maximum number of records to return. See [Pagination](#section/Introduction/Pagination) for more information.
:type limit: int
:param locale: The locale of the video
:type locale: str
:param offset: The offset of the records. See [Pagination](#section/Introduction/Pagination) for more information.
:type offset: int
:param provider: The unique code for the video provider
:type provider: str
:param source_number: Unique identifier for the video source
:type source_number: int
:param updated: Record was modified after this date/time. In UTC.
:type updated: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: VideoStreamOutputsResponse
"""
kwargs["_return_http_data_only"] = True
if "_preload_content" in kwargs:
raise ValueError(
"Error! Please call the video_stream_outputs_fixture_list_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data"
)
try:
return self.video_stream_outputs_fixture_list_with_http_info(
fixture_id,
organization_id,
sport,
added,
content,
external,
feed_type,
fields,
hide_null,
include,
limit,
locale,
offset,
provider,
source_number,
updated,
**kwargs
) # noqa: E501
except Exception as e:
logger.error("Exception when calling VideoStreamsAvailableApi->video_stream_outputs_fixture_list: %s\n" % e)
raise
@validate_arguments
def video_stream_outputs_fixture_list_with_http_info(
self,
fixture_id: Annotated[StrictStr, Field(..., description="The unique identifier of the fixture")],
organization_id: Annotated[
constr(strict=True, max_length=5, min_length=5),
Field(..., description="The unique identifier of the organization"),
],
sport: Annotated[StrictStr, Field(..., description="Sport name")],
added: Annotated[
Optional[datetime], Field(description="Record was added after this date/time. In UTC.")
] = None,
content: Annotated[
Optional[constr(strict=True, max_length=30)],
Field(
description="Content of the stream >- `CLEAN` Output signal is the same as the input signal >- `PROGRAM` Score overlays and other enhancements have been added to the stream "
),
] = None,
external: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of fields that will instead be interpreted as an externalId. See [External Ids](#section/Introduction/External-Ids) for more information."
),
] = None,
feed_type: Annotated[
Optional[constr(strict=True, max_length=30)],
Field(
description="Type of video input >- `ADDITIONAL_ANGLE` Additional angle >- `LOW_LATENCY` Low Latency >- `PRIMARY` Primary "
),
] = None,
fields: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of fields to display. The response will only display these fields. See [Partial Response](#section/Partial-Response) section for more information."
),
] = None,
hide_null: Annotated[
Optional[StrictBool], Field(description="Don't display data fields with null values or empty structures")
] = None,
include: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of resource types to include. See [Resource Inclusion](#section/Introduction/Resource-Inclusion) for more information."
),
] = None,
limit: Annotated[
Optional[conint(strict=True, le=1000, ge=1)],
Field(
description="The maximum number of records to return. See [Pagination](#section/Introduction/Pagination) for more information."
),
] = None,
locale: Annotated[
Optional[constr(strict=True, max_length=5, min_length=5)], Field(description="The locale of the video")
] = None,
offset: Annotated[
Optional[StrictInt],
Field(
description="The offset of the records. See [Pagination](#section/Introduction/Pagination) for more information."
),
] = None,
provider: Annotated[
Optional[constr(strict=True, max_length=100)], Field(description="The unique code for the video provider")
] = None,
source_number: Annotated[
Optional[StrictInt], Field(description="Unique identifier for the video source")
] = None,
updated: Annotated[
Optional[datetime], Field(description="Record was modified after this date/time. In UTC.")
] = None,
**kwargs
) -> ApiResponse: # noqa: E501
"""List available video streams for a fixture # noqa: E501
Display the list of video streams for a fixture # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_stream_outputs_fixture_list_with_http_info(fixture_id, organization_id, sport, added, content, external, feed_type, fields, hide_null, include, limit, locale, offset, provider, source_number, updated, async_req=True)
>>> result = thread.get()
:param fixture_id: The unique identifier of the fixture (required)
:type fixture_id: str
:param organization_id: The unique identifier of the organization (required)
:type organization_id: str
:param sport: Sport name (required)
:type sport: str
:param added: Record was added after this date/time. In UTC.
:type added: datetime
:param content: Content of the stream >- `CLEAN` Output signal is the same as the input signal >- `PROGRAM` Score overlays and other enhancements have been added to the stream
:type content: str
:param external: A comma separated list of fields that will instead be interpreted as an externalId. See [External Ids](#section/Introduction/External-Ids) for more information.
:type external: str
:param feed_type: Type of video input >- `ADDITIONAL_ANGLE` Additional angle >- `LOW_LATENCY` Low Latency >- `PRIMARY` Primary
:type feed_type: str
:param fields: A comma separated list of fields to display. The response will only display these fields. See [Partial Response](#section/Partial-Response) section for more information.
:type fields: str
:param hide_null: Don't display data fields with null values or empty structures
:type hide_null: bool
:param include: A comma separated list of resource types to include. See [Resource Inclusion](#section/Introduction/Resource-Inclusion) for more information.
:type include: str
:param limit: The maximum number of records to return. See [Pagination](#section/Introduction/Pagination) for more information.
:type limit: int
:param locale: The locale of the video
:type locale: str
:param offset: The offset of the records. See [Pagination](#section/Introduction/Pagination) for more information.
:type offset: int
:param provider: The unique code for the video provider
:type provider: str
:param source_number: Unique identifier for the video source
:type source_number: int
:param updated: Record was modified after this date/time. In UTC.
:type updated: datetime
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(VideoStreamOutputsResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
"fixture_id",
"organization_id",
"sport",
"added",
"content",
"external",
"feed_type",
"fields",
"hide_null",
"include",
"limit",
"locale",
"offset",
"provider",
"source_number",
"updated",
]
_all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
"_request_auth",
"_content_type",
"_headers",
]
)
# validate the arguments
for _key, _val in _params["kwargs"].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method video_stream_outputs_fixture_list" % _key
)
_params[_key] = _val
del _params["kwargs"]
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params["fixture_id"]:
_path_params["fixtureId"] = _params["fixture_id"]
if _params["organization_id"]:
_path_params["organizationId"] = _params["organization_id"]
if _params["sport"]:
_path_params["sport"] = _params["sport"]
# process the query parameters
_query_params = []
if _params.get("added") is not None: # noqa: E501
if isinstance(_params["added"], datetime):
_query_params.append(
("added", _params["added"].strftime(self.api_client.configuration.datetime_format))
)
else:
_query_params.append(("added", _params["added"]))
if _params.get("content") is not None: # noqa: E501
_query_params.append(("content", _params["content"]))
if _params.get("external") is not None: # noqa: E501
_query_params.append(("external", _params["external"]))
if _params.get("feed_type") is not None: # noqa: E501
_query_params.append(("feedType", _params["feed_type"]))
if _params.get("fields") is not None: # noqa: E501
_query_params.append(("fields", _params["fields"]))
if _params.get("hide_null") is not None: # noqa: E501
_query_params.append(("hideNull", _params["hide_null"]))
if _params.get("include") is not None: # noqa: E501
_query_params.append(("include", _params["include"]))
if _params.get("limit") is not None: # noqa: E501
_query_params.append(("limit", _params["limit"]))
if _params.get("locale") is not None: # noqa: E501
_query_params.append(("locale", _params["locale"]))
if _params.get("offset") is not None: # noqa: E501
_query_params.append(("offset", _params["offset"]))
if _params.get("provider") is not None: # noqa: E501
_query_params.append(("provider", _params["provider"]))
if _params.get("source_number") is not None: # noqa: E501
_query_params.append(("sourceNumber", _params["source_number"]))
if _params.get("updated") is not None: # noqa: E501
if isinstance(_params["updated"], datetime):
_query_params.append(
("updated", _params["updated"].strftime(self.api_client.configuration.datetime_format))
)
else:
_query_params.append(("updated", _params["updated"]))
# process the header parameters
_header_params = dict(_params.get("_headers", {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) # noqa: E501
# authentication setting
_auth_settings = ["OAuth2"] # noqa: E501
_response_types_map = {
"200": "VideoStreamOutputsResponse",
}
return self.api_client.call_api(
"/{sport}/o/{organizationId}/fixtures/{fixtureId}/video/streams/available",
"GET",
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get("async_req"),
_return_http_data_only=_params.get("_return_http_data_only"), # noqa: E501
_preload_content=_params.get("_preload_content", True),
_request_timeout=_params.get("_request_timeout"),
collection_formats=_collection_formats,
_request_auth=_params.get("_request_auth"),
)
@validate_arguments
def video_stream_outputs_org_list(
self,
organization_id: Annotated[
constr(strict=True, max_length=5, min_length=5),
Field(..., description="The unique identifier of the organization"),
],
sport: Annotated[StrictStr, Field(..., description="Sport name")],
added: Annotated[
Optional[datetime], Field(description="Record was added after this date/time. In UTC.")
] = None,
competition_id: Annotated[
Optional[StrictStr], Field(description="The unique identifier of the competition")
] = None,
content: Annotated[
Optional[constr(strict=True, max_length=30)],
Field(
description="Content of the stream >- `CLEAN` Output signal is the same as the input signal >- `PROGRAM` Score overlays and other enhancements have been added to the stream "
),
] = None,
external: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of fields that will instead be interpreted as an externalId. See [External Ids](#section/Introduction/External-Ids) for more information."
),
] = None,
feed_type: Annotated[
Optional[constr(strict=True, max_length=30)],
Field(
description="Type of video input >- `ADDITIONAL_ANGLE` Additional angle >- `LOW_LATENCY` Low Latency >- `PRIMARY` Primary "
),
] = None,
fields: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of fields to display. The response will only display these fields. See [Partial Response](#section/Partial-Response) section for more information."
),
] = None,
from_time_utc: Annotated[
Optional[datetime], Field(description="Where fixture start time (in UTC) >= this value")
] = None,
hide_null: Annotated[
Optional[StrictBool], Field(description="Don't display data fields with null values or empty structures")
] = None,
include: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of resource types to include. See [Resource Inclusion](#section/Introduction/Resource-Inclusion) for more information."
),
] = None,
limit: Annotated[
Optional[conint(strict=True, le=1000, ge=1)],
Field(
description="The maximum number of records to return. See [Pagination](#section/Introduction/Pagination) for more information."
),
] = None,
locale: Annotated[
Optional[constr(strict=True, max_length=5, min_length=5)], Field(description="The locale of the video")
] = None,
master_venue_id: Annotated[
Optional[StrictStr], Field(description="The unique identifier of the master venue")
] = None,
offset: Annotated[
Optional[StrictInt],
Field(
description="The offset of the records. See [Pagination](#section/Introduction/Pagination) for more information."
),
] = None,
provider: Annotated[
Optional[constr(strict=True, max_length=100)], Field(description="The unique code for the video provider")
] = None,
season_id: Annotated[Optional[StrictStr], Field(description="The unique identifier of the season")] = None,
source_number: Annotated[
Optional[StrictInt], Field(description="Unique identifier for the video source")
] = None,
to_time_utc: Annotated[
Optional[datetime], Field(description="Where fixture start time (in UTC) <= this value")
] = None,
updated: Annotated[
Optional[datetime], Field(description="Record was modified after this date/time. In UTC.")
] = None,
venue_id: Annotated[Optional[StrictStr], Field(description="The unique identifier of the venue")] = None,
**kwargs
) -> VideoStreamOutputsResponse: # noqa: E501
"""List available video streams for an organization # noqa: E501
Display the list of video streams for an organization. Streams for completed fixtures and those older than 12 hours, will not be displayed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_stream_outputs_org_list(organization_id, sport, added, competition_id, content, external, feed_type, fields, from_time_utc, hide_null, include, limit, locale, master_venue_id, offset, provider, season_id, source_number, to_time_utc, updated, venue_id, async_req=True)
>>> result = thread.get()
:param organization_id: The unique identifier of the organization (required)
:type organization_id: str
:param sport: Sport name (required)
:type sport: str
:param added: Record was added after this date/time. In UTC.
:type added: datetime
:param competition_id: The unique identifier of the competition
:type competition_id: str
:param content: Content of the stream >- `CLEAN` Output signal is the same as the input signal >- `PROGRAM` Score overlays and other enhancements have been added to the stream
:type content: str
:param external: A comma separated list of fields that will instead be interpreted as an externalId. See [External Ids](#section/Introduction/External-Ids) for more information.
:type external: str
:param feed_type: Type of video input >- `ADDITIONAL_ANGLE` Additional angle >- `LOW_LATENCY` Low Latency >- `PRIMARY` Primary
:type feed_type: str
:param fields: A comma separated list of fields to display. The response will only display these fields. See [Partial Response](#section/Partial-Response) section for more information.
:type fields: str
:param from_time_utc: Where fixture start time (in UTC) >= this value
:type from_time_utc: datetime
:param hide_null: Don't display data fields with null values or empty structures
:type hide_null: bool
:param include: A comma separated list of resource types to include. See [Resource Inclusion](#section/Introduction/Resource-Inclusion) for more information.
:type include: str
:param limit: The maximum number of records to return. See [Pagination](#section/Introduction/Pagination) for more information.
:type limit: int
:param locale: The locale of the video
:type locale: str
:param master_venue_id: The unique identifier of the master venue
:type master_venue_id: str
:param offset: The offset of the records. See [Pagination](#section/Introduction/Pagination) for more information.
:type offset: int
:param provider: The unique code for the video provider
:type provider: str
:param season_id: The unique identifier of the season
:type season_id: str
:param source_number: Unique identifier for the video source
:type source_number: int
:param to_time_utc: Where fixture start time (in UTC) <= this value
:type to_time_utc: datetime
:param updated: Record was modified after this date/time. In UTC.
:type updated: datetime
:param venue_id: The unique identifier of the venue
:type venue_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: VideoStreamOutputsResponse
"""
kwargs["_return_http_data_only"] = True
if "_preload_content" in kwargs:
raise ValueError(
"Error! Please call the video_stream_outputs_org_list_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data"
)
try:
return self.video_stream_outputs_org_list_with_http_info(
organization_id,
sport,
added,
competition_id,
content,
external,
feed_type,
fields,
from_time_utc,
hide_null,
include,
limit,
locale,
master_venue_id,
offset,
provider,
season_id,
source_number,
to_time_utc,
updated,
venue_id,
**kwargs
) # noqa: E501
except Exception as e:
logger.error("Exception when calling VideoStreamsAvailableApi->video_stream_outputs_org_list: %s\n" % e)
raise
@validate_arguments
def video_stream_outputs_org_list_with_http_info(
self,
organization_id: Annotated[
constr(strict=True, max_length=5, min_length=5),
Field(..., description="The unique identifier of the organization"),
],
sport: Annotated[StrictStr, Field(..., description="Sport name")],
added: Annotated[
Optional[datetime], Field(description="Record was added after this date/time. In UTC.")
] = None,
competition_id: Annotated[
Optional[StrictStr], Field(description="The unique identifier of the competition")
] = None,
content: Annotated[
Optional[constr(strict=True, max_length=30)],
Field(
description="Content of the stream >- `CLEAN` Output signal is the same as the input signal >- `PROGRAM` Score overlays and other enhancements have been added to the stream "
),
] = None,
external: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of fields that will instead be interpreted as an externalId. See [External Ids](#section/Introduction/External-Ids) for more information."
),
] = None,
feed_type: Annotated[
Optional[constr(strict=True, max_length=30)],
Field(
description="Type of video input >- `ADDITIONAL_ANGLE` Additional angle >- `LOW_LATENCY` Low Latency >- `PRIMARY` Primary "
),
] = None,
fields: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of fields to display. The response will only display these fields. See [Partial Response](#section/Partial-Response) section for more information."
),
] = None,
from_time_utc: Annotated[
Optional[datetime], Field(description="Where fixture start time (in UTC) >= this value")
] = None,
hide_null: Annotated[
Optional[StrictBool], Field(description="Don't display data fields with null values or empty structures")
] = None,
include: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of resource types to include. See [Resource Inclusion](#section/Introduction/Resource-Inclusion) for more information."
),
] = None,
limit: Annotated[
Optional[conint(strict=True, le=1000, ge=1)],
Field(
description="The maximum number of records to return. See [Pagination](#section/Introduction/Pagination) for more information."
),
] = None,
locale: Annotated[
Optional[constr(strict=True, max_length=5, min_length=5)], Field(description="The locale of the video")
] = None,
master_venue_id: Annotated[
Optional[StrictStr], Field(description="The unique identifier of the master venue")
] = None,
offset: Annotated[
Optional[StrictInt],
Field(
description="The offset of the records. See [Pagination](#section/Introduction/Pagination) for more information."
),
] = None,
provider: Annotated[
Optional[constr(strict=True, max_length=100)], Field(description="The unique code for the video provider")
] = None,
season_id: Annotated[Optional[StrictStr], Field(description="The unique identifier of the season")] = None,
source_number: Annotated[
Optional[StrictInt], Field(description="Unique identifier for the video source")
] = None,
to_time_utc: Annotated[
Optional[datetime], Field(description="Where fixture start time (in UTC) <= this value")
] = None,
updated: Annotated[
Optional[datetime], Field(description="Record was modified after this date/time. In UTC.")
] = None,
venue_id: Annotated[Optional[StrictStr], Field(description="The unique identifier of the venue")] = None,
**kwargs
) -> ApiResponse: # noqa: E501
"""List available video streams for an organization # noqa: E501
Display the list of video streams for an organization. Streams for completed fixtures and those older than 12 hours, will not be displayed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_stream_outputs_org_list_with_http_info(organization_id, sport, added, competition_id, content, external, feed_type, fields, from_time_utc, hide_null, include, limit, locale, master_venue_id, offset, provider, season_id, source_number, to_time_utc, updated, venue_id, async_req=True)
>>> result = thread.get()
:param organization_id: The unique identifier of the organization (required)
:type organization_id: str
:param sport: Sport name (required)
:type sport: str
:param added: Record was added after this date/time. In UTC.
:type added: datetime
:param competition_id: The unique identifier of the competition
:type competition_id: str
:param content: Content of the stream >- `CLEAN` Output signal is the same as the input signal >- `PROGRAM` Score overlays and other enhancements have been added to the stream
:type content: str
:param external: A comma separated list of fields that will instead be interpreted as an externalId. See [External Ids](#section/Introduction/External-Ids) for more information.
:type external: str
:param feed_type: Type of video input >- `ADDITIONAL_ANGLE` Additional angle >- `LOW_LATENCY` Low Latency >- `PRIMARY` Primary
:type feed_type: str
:param fields: A comma separated list of fields to display. The response will only display these fields. See [Partial Response](#section/Partial-Response) section for more information.
:type fields: str
:param from_time_utc: Where fixture start time (in UTC) >= this value
:type from_time_utc: datetime
:param hide_null: Don't display data fields with null values or empty structures
:type hide_null: bool
:param include: A comma separated list of resource types to include. See [Resource Inclusion](#section/Introduction/Resource-Inclusion) for more information.
:type include: str
:param limit: The maximum number of records to return. See [Pagination](#section/Introduction/Pagination) for more information.
:type limit: int
:param locale: The locale of the video
:type locale: str
:param master_venue_id: The unique identifier of the master venue
:type master_venue_id: str
:param offset: The offset of the records. See [Pagination](#section/Introduction/Pagination) for more information.
:type offset: int
:param provider: The unique code for the video provider
:type provider: str
:param season_id: The unique identifier of the season
:type season_id: str
:param source_number: Unique identifier for the video source
:type source_number: int
:param to_time_utc: Where fixture start time (in UTC) <= this value
:type to_time_utc: datetime
:param updated: Record was modified after this date/time. In UTC.
:type updated: datetime
:param venue_id: The unique identifier of the venue
:type venue_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(VideoStreamOutputsResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
"organization_id",
"sport",
"added",
"competition_id",
"content",
"external",
"feed_type",
"fields",
"from_time_utc",
"hide_null",
"include",
"limit",
"locale",
"master_venue_id",
"offset",
"provider",
"season_id",
"source_number",
"to_time_utc",
"updated",
"venue_id",
]
_all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
"_request_auth",
"_content_type",
"_headers",
]
)
# validate the arguments
for _key, _val in _params["kwargs"].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method video_stream_outputs_org_list" % _key
)
_params[_key] = _val
del _params["kwargs"]
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params["organization_id"]:
_path_params["organizationId"] = _params["organization_id"]
if _params["sport"]:
_path_params["sport"] = _params["sport"]
# process the query parameters
_query_params = []
if _params.get("added") is not None: # noqa: E501
if isinstance(_params["added"], datetime):
_query_params.append(
("added", _params["added"].strftime(self.api_client.configuration.datetime_format))
)
else:
_query_params.append(("added", _params["added"]))
if _params.get("competition_id") is not None: # noqa: E501
_query_params.append(("competitionId", _params["competition_id"]))
if _params.get("content") is not None: # noqa: E501
_query_params.append(("content", _params["content"]))
if _params.get("external") is not None: # noqa: E501
_query_params.append(("external", _params["external"]))
if _params.get("feed_type") is not None: # noqa: E501
_query_params.append(("feedType", _params["feed_type"]))
if _params.get("fields") is not None: # noqa: E501
_query_params.append(("fields", _params["fields"]))
if _params.get("from_time_utc") is not None: # noqa: E501
if isinstance(_params["from_time_utc"], datetime):
_query_params.append(
("fromTimeUTC", _params["from_time_utc"].strftime(self.api_client.configuration.datetime_format))
)
else:
_query_params.append(("fromTimeUTC", _params["from_time_utc"]))
if _params.get("hide_null") is not None: # noqa: E501
_query_params.append(("hideNull", _params["hide_null"]))
if _params.get("include") is not None: # noqa: E501
_query_params.append(("include", _params["include"]))
if _params.get("limit") is not None: # noqa: E501
_query_params.append(("limit", _params["limit"]))
if _params.get("locale") is not None: # noqa: E501
_query_params.append(("locale", _params["locale"]))
if _params.get("master_venue_id") is not None: # noqa: E501
_query_params.append(("masterVenueId", _params["master_venue_id"]))
if _params.get("offset") is not None: # noqa: E501
_query_params.append(("offset", _params["offset"]))
if _params.get("provider") is not None: # noqa: E501
_query_params.append(("provider", _params["provider"]))
if _params.get("season_id") is not None: # noqa: E501
_query_params.append(("seasonId", _params["season_id"]))
if _params.get("source_number") is not None: # noqa: E501
_query_params.append(("sourceNumber", _params["source_number"]))
if _params.get("to_time_utc") is not None: # noqa: E501
if isinstance(_params["to_time_utc"], datetime):
_query_params.append(
("toTimeUTC", _params["to_time_utc"].strftime(self.api_client.configuration.datetime_format))
)
else:
_query_params.append(("toTimeUTC", _params["to_time_utc"]))
if _params.get("updated") is not None: # noqa: E501
if isinstance(_params["updated"], datetime):
_query_params.append(
("updated", _params["updated"].strftime(self.api_client.configuration.datetime_format))
)
else:
_query_params.append(("updated", _params["updated"]))
if _params.get("venue_id") is not None: # noqa: E501
_query_params.append(("venueId", _params["venue_id"]))
# process the header parameters
_header_params = dict(_params.get("_headers", {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) # noqa: E501
# authentication setting
_auth_settings = ["OAuth2"] # noqa: E501
_response_types_map = {
"200": "VideoStreamOutputsResponse",
}
return self.api_client.call_api(
"/{sport}/o/{organizationId}/video/streams/available",
"GET",
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get("async_req"),
_return_http_data_only=_params.get("_return_http_data_only"), # noqa: E501
_preload_content=_params.get("_preload_content", True),
_request_timeout=_params.get("_request_timeout"),
collection_formats=_collection_formats,
_request_auth=_params.get("_request_auth"),
)
@validate_arguments
def video_stream_outputs_orggroup_list(
self,
organization_group_code: Annotated[
constr(strict=True, max_length=150, min_length=3),
Field(..., description="The unique identifier of the organization group"),
],
sport: Annotated[StrictStr, Field(..., description="Sport name")],
added: Annotated[
Optional[datetime], Field(description="Record was added after this date/time. In UTC.")
] = None,
competition_id: Annotated[
Optional[StrictStr], Field(description="The unique identifier of the competition")
] = None,
content: Annotated[
Optional[constr(strict=True, max_length=30)],
Field(
description="Content of the stream >- `CLEAN` Output signal is the same as the input signal >- `PROGRAM` Score overlays and other enhancements have been added to the stream "
),
] = None,
external: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of fields that will instead be interpreted as an externalId. See [External Ids](#section/Introduction/External-Ids) for more information."
),
] = None,
feed_type: Annotated[
Optional[constr(strict=True, max_length=30)],
Field(
description="Type of video input >- `ADDITIONAL_ANGLE` Additional angle >- `LOW_LATENCY` Low Latency >- `PRIMARY` Primary "
),
] = None,
fields: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of fields to display. The response will only display these fields. See [Partial Response](#section/Partial-Response) section for more information."
),
] = None,
from_time_utc: Annotated[
Optional[datetime], Field(description="Where fixture start time (in UTC) >= this value")
] = None,
hide_null: Annotated[
Optional[StrictBool], Field(description="Don't display data fields with null values or empty structures")
] = None,
include: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of resource types to include. See [Resource Inclusion](#section/Introduction/Resource-Inclusion) for more information."
),
] = None,
limit: Annotated[
Optional[conint(strict=True, le=1000, ge=1)],
Field(
description="The maximum number of records to return. See [Pagination](#section/Introduction/Pagination) for more information."
),
] = None,
locale: Annotated[
Optional[constr(strict=True, max_length=5, min_length=5)], Field(description="The locale of the video")
] = None,
master_venue_id: Annotated[
Optional[StrictStr], Field(description="The unique identifier of the master venue")
] = None,
offset: Annotated[
Optional[StrictInt],
Field(
description="The offset of the records. See [Pagination](#section/Introduction/Pagination) for more information."
),
] = None,
provider: Annotated[
Optional[constr(strict=True, max_length=100)], Field(description="The unique code for the video provider")
] = None,
season_id: Annotated[Optional[StrictStr], Field(description="The unique identifier of the season")] = None,
source_number: Annotated[
Optional[StrictInt], Field(description="Unique identifier for the video source")
] = None,
to_time_utc: Annotated[
Optional[datetime], Field(description="Where fixture start time (in UTC) <= this value")
] = None,
updated: Annotated[
Optional[datetime], Field(description="Record was modified after this date/time. In UTC.")
] = None,
venue_id: Annotated[Optional[StrictStr], Field(description="The unique identifier of the venue")] = None,
**kwargs
) -> VideoStreamOutputsResponse: # noqa: E501
"""List available video streams for the ~organization group~ # noqa: E501
Display the list of video streams for the organization group. Streams for completed fixtures and those older than 12 hours, will not be displayed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_stream_outputs_orggroup_list(organization_group_code, sport, added, competition_id, content, external, feed_type, fields, from_time_utc, hide_null, include, limit, locale, master_venue_id, offset, provider, season_id, source_number, to_time_utc, updated, venue_id, async_req=True)
>>> result = thread.get()
:param organization_group_code: The unique identifier of the organization group (required)
:type organization_group_code: str
:param sport: Sport name (required)
:type sport: str
:param added: Record was added after this date/time. In UTC.
:type added: datetime
:param competition_id: The unique identifier of the competition
:type competition_id: str
:param content: Content of the stream >- `CLEAN` Output signal is the same as the input signal >- `PROGRAM` Score overlays and other enhancements have been added to the stream
:type content: str
:param external: A comma separated list of fields that will instead be interpreted as an externalId. See [External Ids](#section/Introduction/External-Ids) for more information.
:type external: str
:param feed_type: Type of video input >- `ADDITIONAL_ANGLE` Additional angle >- `LOW_LATENCY` Low Latency >- `PRIMARY` Primary
:type feed_type: str
:param fields: A comma separated list of fields to display. The response will only display these fields. See [Partial Response](#section/Partial-Response) section for more information.
:type fields: str
:param from_time_utc: Where fixture start time (in UTC) >= this value
:type from_time_utc: datetime
:param hide_null: Don't display data fields with null values or empty structures
:type hide_null: bool
:param include: A comma separated list of resource types to include. See [Resource Inclusion](#section/Introduction/Resource-Inclusion) for more information.
:type include: str
:param limit: The maximum number of records to return. See [Pagination](#section/Introduction/Pagination) for more information.
:type limit: int
:param locale: The locale of the video
:type locale: str
:param master_venue_id: The unique identifier of the master venue
:type master_venue_id: str
:param offset: The offset of the records. See [Pagination](#section/Introduction/Pagination) for more information.
:type offset: int
:param provider: The unique code for the video provider
:type provider: str
:param season_id: The unique identifier of the season
:type season_id: str
:param source_number: Unique identifier for the video source
:type source_number: int
:param to_time_utc: Where fixture start time (in UTC) <= this value
:type to_time_utc: datetime
:param updated: Record was modified after this date/time. In UTC.
:type updated: datetime
:param venue_id: The unique identifier of the venue
:type venue_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: VideoStreamOutputsResponse
"""
kwargs["_return_http_data_only"] = True
if "_preload_content" in kwargs:
raise ValueError(
"Error! Please call the video_stream_outputs_orggroup_list_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data"
)
try:
return self.video_stream_outputs_orggroup_list_with_http_info(
organization_group_code,
sport,
added,
competition_id,
content,
external,
feed_type,
fields,
from_time_utc,
hide_null,
include,
limit,
locale,
master_venue_id,
offset,
provider,
season_id,
source_number,
to_time_utc,
updated,
venue_id,
**kwargs
) # noqa: E501
except Exception as e:
logger.error(
"Exception when calling VideoStreamsAvailableApi->video_stream_outputs_orggroup_list: %s\n" % e
)
raise
@validate_arguments
def video_stream_outputs_orggroup_list_with_http_info(
self,
organization_group_code: Annotated[
constr(strict=True, max_length=150, min_length=3),
Field(..., description="The unique identifier of the organization group"),
],
sport: Annotated[StrictStr, Field(..., description="Sport name")],
added: Annotated[
Optional[datetime], Field(description="Record was added after this date/time. In UTC.")
] = None,
competition_id: Annotated[
Optional[StrictStr], Field(description="The unique identifier of the competition")
] = None,
content: Annotated[
Optional[constr(strict=True, max_length=30)],
Field(
description="Content of the stream >- `CLEAN` Output signal is the same as the input signal >- `PROGRAM` Score overlays and other enhancements have been added to the stream "
),
] = None,
external: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of fields that will instead be interpreted as an externalId. See [External Ids](#section/Introduction/External-Ids) for more information."
),
] = None,
feed_type: Annotated[
Optional[constr(strict=True, max_length=30)],
Field(
description="Type of video input >- `ADDITIONAL_ANGLE` Additional angle >- `LOW_LATENCY` Low Latency >- `PRIMARY` Primary "
),
] = None,
fields: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of fields to display. The response will only display these fields. See [Partial Response](#section/Partial-Response) section for more information."
),
] = None,
from_time_utc: Annotated[
Optional[datetime], Field(description="Where fixture start time (in UTC) >= this value")
] = None,
hide_null: Annotated[
Optional[StrictBool], Field(description="Don't display data fields with null values or empty structures")
] = None,
include: Annotated[
Optional[StrictStr],
Field(
description="A comma separated list of resource types to include. See [Resource Inclusion](#section/Introduction/Resource-Inclusion) for more information."
),
] = None,
limit: Annotated[
Optional[conint(strict=True, le=1000, ge=1)],
Field(
description="The maximum number of records to return. See [Pagination](#section/Introduction/Pagination) for more information."
),
] = None,
locale: Annotated[
Optional[constr(strict=True, max_length=5, min_length=5)], Field(description="The locale of the video")
] = None,
master_venue_id: Annotated[
Optional[StrictStr], Field(description="The unique identifier of the master venue")
] = None,
offset: Annotated[
Optional[StrictInt],
Field(
description="The offset of the records. See [Pagination](#section/Introduction/Pagination) for more information."
),
] = None,
provider: Annotated[
Optional[constr(strict=True, max_length=100)], Field(description="The unique code for the video provider")
] = None,
season_id: Annotated[Optional[StrictStr], Field(description="The unique identifier of the season")] = None,
source_number: Annotated[
Optional[StrictInt], Field(description="Unique identifier for the video source")
] = None,
to_time_utc: Annotated[
Optional[datetime], Field(description="Where fixture start time (in UTC) <= this value")
] = None,
updated: Annotated[
Optional[datetime], Field(description="Record was modified after this date/time. In UTC.")
] = None,
venue_id: Annotated[Optional[StrictStr], Field(description="The unique identifier of the venue")] = None,
**kwargs
) -> ApiResponse: # noqa: E501
"""List available video streams for the ~organization group~ # noqa: E501
Display the list of video streams for the organization group. Streams for completed fixtures and those older than 12 hours, will not be displayed. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.video_stream_outputs_orggroup_list_with_http_info(organization_group_code, sport, added, competition_id, content, external, feed_type, fields, from_time_utc, hide_null, include, limit, locale, master_venue_id, offset, provider, season_id, source_number, to_time_utc, updated, venue_id, async_req=True)
>>> result = thread.get()
:param organization_group_code: The unique identifier of the organization group (required)
:type organization_group_code: str
:param sport: Sport name (required)
:type sport: str
:param added: Record was added after this date/time. In UTC.
:type added: datetime
:param competition_id: The unique identifier of the competition
:type competition_id: str
:param content: Content of the stream >- `CLEAN` Output signal is the same as the input signal >- `PROGRAM` Score overlays and other enhancements have been added to the stream
:type content: str
:param external: A comma separated list of fields that will instead be interpreted as an externalId. See [External Ids](#section/Introduction/External-Ids) for more information.
:type external: str
:param feed_type: Type of video input >- `ADDITIONAL_ANGLE` Additional angle >- `LOW_LATENCY` Low Latency >- `PRIMARY` Primary
:type feed_type: str
:param fields: A comma separated list of fields to display. The response will only display these fields. See [Partial Response](#section/Partial-Response) section for more information.
:type fields: str
:param from_time_utc: Where fixture start time (in UTC) >= this value
:type from_time_utc: datetime
:param hide_null: Don't display data fields with null values or empty structures
:type hide_null: bool
:param include: A comma separated list of resource types to include. See [Resource Inclusion](#section/Introduction/Resource-Inclusion) for more information.
:type include: str
:param limit: The maximum number of records to return. See [Pagination](#section/Introduction/Pagination) for more information.
:type limit: int
:param locale: The locale of the video
:type locale: str
:param master_venue_id: The unique identifier of the master venue
:type master_venue_id: str
:param offset: The offset of the records. See [Pagination](#section/Introduction/Pagination) for more information.
:type offset: int
:param provider: The unique code for the video provider
:type provider: str
:param season_id: The unique identifier of the season
:type season_id: str
:param source_number: Unique identifier for the video source
:type source_number: int
:param to_time_utc: Where fixture start time (in UTC) <= this value
:type to_time_utc: datetime
:param updated: Record was modified after this date/time. In UTC.
:type updated: datetime
:param venue_id: The unique identifier of the venue
:type venue_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(VideoStreamOutputsResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
"organization_group_code",
"sport",
"added",
"competition_id",
"content",
"external",
"feed_type",
"fields",
"from_time_utc",
"hide_null",
"include",
"limit",
"locale",
"master_venue_id",
"offset",
"provider",
"season_id",
"source_number",
"to_time_utc",
"updated",
"venue_id",
]
_all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
"_request_auth",
"_content_type",
"_headers",
]
)
# validate the arguments
for _key, _val in _params["kwargs"].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'" " to method video_stream_outputs_orggroup_list" % _key
)
_params[_key] = _val
del _params["kwargs"]
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params["organization_group_code"]:
_path_params["organizationGroupCode"] = _params["organization_group_code"]
if _params["sport"]:
_path_params["sport"] = _params["sport"]
# process the query parameters
_query_params = []
if _params.get("added") is not None: # noqa: E501
if isinstance(_params["added"], datetime):
_query_params.append(
("added", _params["added"].strftime(self.api_client.configuration.datetime_format))
)
else:
_query_params.append(("added", _params["added"]))
if _params.get("competition_id") is not None: # noqa: E501
_query_params.append(("competitionId", _params["competition_id"]))
if _params.get("content") is not None: # noqa: E501
_query_params.append(("content", _params["content"]))
if _params.get("external") is not None: # noqa: E501
_query_params.append(("external", _params["external"]))
if _params.get("feed_type") is not None: # noqa: E501
_query_params.append(("feedType", _params["feed_type"]))
if _params.get("fields") is not None: # noqa: E501
_query_params.append(("fields", _params["fields"]))
if _params.get("from_time_utc") is not None: # noqa: E501
if isinstance(_params["from_time_utc"], datetime):
_query_params.append(
("fromTimeUTC", _params["from_time_utc"].strftime(self.api_client.configuration.datetime_format))
)
else:
_query_params.append(("fromTimeUTC", _params["from_time_utc"]))
if _params.get("hide_null") is not None: # noqa: E501
_query_params.append(("hideNull", _params["hide_null"]))
if _params.get("include") is not None: # noqa: E501
_query_params.append(("include", _params["include"]))
if _params.get("limit") is not None: # noqa: E501
_query_params.append(("limit", _params["limit"]))
if _params.get("locale") is not None: # noqa: E501
_query_params.append(("locale", _params["locale"]))
if _params.get("master_venue_id") is not None: # noqa: E501
_query_params.append(("masterVenueId", _params["master_venue_id"]))
if _params.get("offset") is not None: # noqa: E501
_query_params.append(("offset", _params["offset"]))
if _params.get("provider") is not None: # noqa: E501
_query_params.append(("provider", _params["provider"]))
if _params.get("season_id") is not None: # noqa: E501
_query_params.append(("seasonId", _params["season_id"]))
if _params.get("source_number") is not None: # noqa: E501
_query_params.append(("sourceNumber", _params["source_number"]))
if _params.get("to_time_utc") is not None: # noqa: E501
if isinstance(_params["to_time_utc"], datetime):
_query_params.append(
("toTimeUTC", _params["to_time_utc"].strftime(self.api_client.configuration.datetime_format))
)
else:
_query_params.append(("toTimeUTC", _params["to_time_utc"]))
if _params.get("updated") is not None: # noqa: E501
if isinstance(_params["updated"], datetime):
_query_params.append(
("updated", _params["updated"].strftime(self.api_client.configuration.datetime_format))
)
else:
_query_params.append(("updated", _params["updated"]))
if _params.get("venue_id") is not None: # noqa: E501
_query_params.append(("venueId", _params["venue_id"]))
# process the header parameters
_header_params = dict(_params.get("_headers", {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params["Accept"] = self.api_client.select_header_accept(["application/json"]) # noqa: E501
# authentication setting
_auth_settings = ["OAuth2"] # noqa: E501
_response_types_map = {
"200": "VideoStreamOutputsResponse",
}
return self.api_client.call_api(
"/{sport}/orgGroup/{organizationGroupCode}/video/streams/available",
"GET",
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get("async_req"),
_return_http_data_only=_params.get("_return_http_data_only"), # noqa: E501
_preload_content=_params.get("_preload_content", True),
_request_timeout=_params.get("_request_timeout"),
collection_formats=_collection_formats,
_request_auth=_params.get("_request_auth"),
) | PypiClean |
/opentaxforms-0.5.7.tar.gz/opentaxforms-0.5.7/README.md | OpenTaxForms opens and automates US tax forms--it reads PDF tax forms
(currently from IRS.gov only, not state forms),
converts them to more feature-full HTML5,
and offers a database and API for developers to create their own tax applications.
The converted forms will be available to test (and ultimately use)
at [OpenTaxForms.org](http://OpenTaxForms.org/).
- **PyPI**
[](https://badge.fury.io/py/opentaxforms)
- **License**
[GNU AGPLv3](http://choosealicense.com/licenses/agpl-3.0/)
- **Install**
pip install opentaxforms
- **External dependencies**
[pdf2svg](https://github.com/dawbarton/pdf2svg)
- **Github**
- [code](https://github.com/jsaponara/opentaxforms/)
- [issue tracker link forthcoming]
- [milestones link forthcoming]
- **Build status**
[](https://travis-ci.org/jsaponara/opentaxforms)
- **Form status**
The script reports a status for each form. Current status categories are:
- layout means textboxes and checkboxes--they should not overlap.
- refs are references to other forms--they should all be recognized (ie, in the list of all forms).
- math is the computed fields and their dependencies--each computed field should have at least one dependency, or else what is it computed from?
Each status error has a corresponding warning in the log file, so they're easy to find. Each bugfix will likely reduce errors across many forms.
[1040 form status listing](https://opentaxforms.org/pages/status-form-1040-family-and-immediate-references.html)
- **API**
The ReSTful API is read-only and provides a complete accounting of form fields:
data type, size and position on page, and role in field groupings
like dollars-and-cents fields, fields on the same line, fields in the same table,
fields on the same page, and fields involved in the same formula. The API will
also provide status information and tester feedback for each form.
[API docs forthcoming, for now see examples in test/run_apiclient.sh]
- **How it works**
Most of the IRS tax forms embed all the fillable field information in the
[XML Forms Architecture](https://en.wikipedia.org/wiki/XFA) (XFA) format.
The OpenTaxForms [python](https://www.python.org/) script extracts the XFA
from each PDF form, and parses out:
- relationships among fields (such as dollar and cent fields; fields on the same line; columns and rows of a table).
- math formulas, including which fields are computed vs user-entered (such as "Subtract line 37 from line 35. If line 37 is greaterthan line 35, enter -0-").
- references to other forms
All this information is stored in a database (optionally [PostgreSQL](https://www.postgresql.org/)
or the default [sqlite](https://sqlite.org/)) and served according to
a [ReSTful](https://en.wikipedia.org/wiki/Representational_state_transfer)
API. For each tax form page, an html form (with javascript to express the
formulas) is generated and overlaid on an svg rendering of the original PDF.
The javascript saves all user inputs to local/web storage in the browser
via [basil.js](https://wisembly.github.io/basil.js/). When the page is
loaded, those values are retrieved. Values are keyed by tax year,
form number (eg 1040), and XFA field id (and soon taxpayer name now that I do
my kids' taxes too). Testers will annotate the page image with boxes and comments
via [annotorious.js](http://annotorious.github.io/). A few of the 900+ IRS forms
don't have embedded XFA (such as 2016 Form 1040 Schedule A).
Eventually those forms may be updated to contain XFA, but until then, the
best automated approach is probably
[OCR](link:https://en.wikipedia.org/wiki/Optical_character_recognition)
(optical character recognition). OCR may be a less fool-proof approach in general,
especially for state (NJ, NY, etc) forms, which generally are not XFA-based.
- **To do**
- Move lower-level ToDo items to github/issues.
- Refactor toward a less script-ish architecture that will scale to more developers. [architecturePlease]
- Switch to a pdf-to-svg converter that preserves text (rather than converting text to paths), perhaps pdfjs,
so that testers can easily copy and paste text from forms. [copyableText]
- Should extractFillableFields.py be a separate project called xfadump?
This might provide a cleaner target output interface for an OCR effort. [xfadump]
- Replace allpdfnames.txt with a more detailed form dictionary via a preprocess step. [formDictionary]
- Offer entire-form html interface (currently presenting each page separately). [formAsSingleHtmlPage]
- Incorporate instructions and publications, especially extracting the worksheets from instructions. [worksheets]
- Add the ability to process US state forms. [stateForms]
- Fix countless bugs, especially in forms that contain tables (see [issues])
- Don't seek in a separate file a schedule that occurs within a form. [refsToEmbeddedSchedules]
- Separate dirName command line option into pdfInputDir,htmlOutputDir. [splitIoDirs]
| PypiClean |
/HBT_IP_Test-1.0.1-py3-none-any.whl/HBT_IP_Test/libs/isom/python/AudioSources_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import IsomStdDef_pb2 as IsomStdDef__pb2
import IsomDevices_pb2 as IsomDevices__pb2
import CodecDetails_pb2 as CodecDetails__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='AudioSources.proto',
package='Honeywell.Security.ISOM.AudioSources',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x12\x41udioSources.proto\x12$Honeywell.Security.ISOM.AudioSources\x1a\x10IsomStdDef.proto\x1a\x11IsomDevices.proto\x1a\x12\x43odecDetails.proto\"e\n\x15\x41udioSourceOperations\x12\x42\n\tresources\x18\x0b \x03(\x0e\x32/.Honeywell.Security.ISOM.AudioSources.Resources*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"d\n\x14\x41udioSourceRelations\x12\x42\n\trelations\x18\x0b \x03(\x0e\x32/.Honeywell.Security.ISOM.AudioSources.Relations*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"[\n\x11\x41udioSourceEvents\x12<\n\x06\x65vents\x18\x0b \x03(\x0e\x32,.Honeywell.Security.ISOM.AudioSources.Events*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\xfa\x01\n\x10\x41udioSourceState\x12\n\n\x02id\x18\x0b \x01(\t\x12I\n\x0ctroubleState\x18\r \x01(\x0b\x32\x33.Honeywell.Security.ISOM.Devices.DeviceTroubleState\x12\x43\n\tomitState\x18\x0f \x01(\x0b\x32\x30.Honeywell.Security.ISOM.Devices.DeviceOmitState\x12@\n\x0erecordingState\x18\xe9\x07 \x01(\x0b\x32\'.Honeywell.Security.ISOM.RecordingState*\x08\x08\xa0\xf7\x36\x10\x87\x94=\"g\n\x14\x41udioSourceStateList\x12\x45\n\x05state\x18\x0b \x03(\x0b\x32\x36.Honeywell.Security.ISOM.AudioSources.AudioSourceState*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"Q\n\x16\x41udioSourceIdentifiers\x12\n\n\x02id\x18\x0b \x01(\t\x12\x0c\n\x04name\x18\x0c \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\r \x01(\t*\x08\x08\xc0\x84=\x10\x87\x94=\"|\n\x13\x41udioSourceRelation\x12\n\n\x02id\x18\x0b \x01(\t\x12=\n\x04name\x18\x0c \x01(\x0e\x32/.Honeywell.Security.ISOM.AudioSources.Relations\x12\x10\n\x08\x65ntityID\x18\r \x01(\t*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"p\n\x17\x41udioSourceRelationList\x12K\n\x08relation\x18\x0b \x03(\x0b\x32\x39.Honeywell.Security.ISOM.AudioSources.AudioSourceRelation*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\xab\x02\n\x11\x41udioSourceConfig\x12Q\n\x0bidentifiers\x18\x0b \x01(\x0b\x32<.Honeywell.Security.ISOM.AudioSources.AudioSourceIdentifiers\x12K\n\x08relation\x18\x0c \x03(\x0b\x32\x39.Honeywell.Security.ISOM.AudioSources.AudioSourceRelation\x12W\n\x11\x63ommunicationType\x18\r \x01(\x0e\x32<.Honeywell.Security.ISOM.AudioSources.AudioCommunicationType\x12\x13\n\x0b\x65nableState\x18\x0e \x01(\x08*\x08\x08\xa0\xf7\x36\x10\xe0\x91\x43\"u\n\x15\x41udioSourceConfigList\x12R\n\x11\x41udioSourceConfig\x18\x0b \x03(\x0b\x32\x37.Honeywell.Security.ISOM.AudioSources.AudioSourceConfig*\x08\x08\xc0\x84=\x10\xe0\x91\x43\"\xad\x01\n\x11\x41udioSourceEntity\x12G\n\x06\x63onfig\x18\x15 \x01(\x0b\x32\x37.Honeywell.Security.ISOM.AudioSources.AudioSourceConfig\x12\x45\n\x05state\x18\x1f \x01(\x0b\x32\x36.Honeywell.Security.ISOM.AudioSources.AudioSourceState*\x08\x08\xa0\xf7\x36\x10\xe0\x91\x43\"j\n\x15\x41udioSourceEntityList\x12G\n\x06\x65ntity\x18\x0b \x03(\x0b\x32\x37.Honeywell.Security.ISOM.AudioSources.AudioSourceEntity*\x08\x08\xc0\x84=\x10\xe0\x91\x43*\xd2\x01\n\tResources\x12\x18\n\x13supportedOperations\x10\xf2\x07\x12\x17\n\x12supportedRelations\x10\xf3\x07\x12\x14\n\x0fsupportedEvents\x10\xf4\x07\x12\x1a\n\x15supportedCapabilities\x10\xf5\x07\x12\x0f\n\nfullEntity\x10\xc2N\x12\t\n\x04info\x10\xc3N\x12\x0b\n\x06\x63onfig\x10\xd7N\x12\x10\n\x0bidentifiers\x10\xebN\x12\x0e\n\trelations\x10\xffN\x12\x15\n\rMax_Resources\x10\x80\x80\x80\x80\x04*\xf2\x01\n\tRelations\x12\x1a\n\x16\x41udioSourceOwnedBySite\x10\x0b\x12$\n AudioSourceAssociatedWithAccount\x10\x0c\x12\x1f\n\x1b\x41udioSourceDerivedFromInput\x10\x0e\x12#\n\x1f\x41udioSourceAssignedToPeripheral\x10\x0f\x12!\n\x1d\x41udioSourceRecordedByRecorder\x10\x11\x12#\n\x1f\x41udioSourceupportsStreamProfile\x10\x18\x12\x15\n\rMax_Relations\x10\x80\x80\x80\x80\x04*L\n\x06\x45vents\x12\x13\n\x0e\x63onfig_p_added\x10\x9aN\x12\x16\n\x11\x63onfig_p_modified\x10\x9bN\x12\x15\n\x10\x63onfig_p_deleted\x10\x9cN*b\n\x16\x41udioCommunicationType\x12\n\n\x06OneWay\x10\x0b\x12\n\n\x06TwoWay\x10\x0c\x12\x0c\n\x08Intercom\x10\r\x12\"\n\x1aMax_AudioCommunicationType\x10\x80\x80\x80\x80\x04')
,
dependencies=[IsomStdDef__pb2.DESCRIPTOR,IsomDevices__pb2.DESCRIPTOR,CodecDetails__pb2.DESCRIPTOR,])
_RESOURCES = _descriptor.EnumDescriptor(
name='Resources',
full_name='Honeywell.Security.ISOM.AudioSources.Resources',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='supportedOperations', index=0, number=1010,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='supportedRelations', index=1, number=1011,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='supportedEvents', index=2, number=1012,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='supportedCapabilities', index=3, number=1013,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='fullEntity', index=4, number=10050,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='info', index=5, number=10051,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='config', index=6, number=10071,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='identifiers', index=7, number=10091,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='relations', index=8, number=10111,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Max_Resources', index=9, number=1073741824,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=1802,
serialized_end=2012,
)
_sym_db.RegisterEnumDescriptor(_RESOURCES)
Resources = enum_type_wrapper.EnumTypeWrapper(_RESOURCES)
_RELATIONS = _descriptor.EnumDescriptor(
name='Relations',
full_name='Honeywell.Security.ISOM.AudioSources.Relations',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='AudioSourceOwnedBySite', index=0, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AudioSourceAssociatedWithAccount', index=1, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AudioSourceDerivedFromInput', index=2, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AudioSourceAssignedToPeripheral', index=3, number=15,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AudioSourceRecordedByRecorder', index=4, number=17,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AudioSourceupportsStreamProfile', index=5, number=24,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Max_Relations', index=6, number=1073741824,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2015,
serialized_end=2257,
)
_sym_db.RegisterEnumDescriptor(_RELATIONS)
Relations = enum_type_wrapper.EnumTypeWrapper(_RELATIONS)
_EVENTS = _descriptor.EnumDescriptor(
name='Events',
full_name='Honeywell.Security.ISOM.AudioSources.Events',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='config_p_added', index=0, number=10010,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='config_p_modified', index=1, number=10011,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='config_p_deleted', index=2, number=10012,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2259,
serialized_end=2335,
)
_sym_db.RegisterEnumDescriptor(_EVENTS)
Events = enum_type_wrapper.EnumTypeWrapper(_EVENTS)
_AUDIOCOMMUNICATIONTYPE = _descriptor.EnumDescriptor(
name='AudioCommunicationType',
full_name='Honeywell.Security.ISOM.AudioSources.AudioCommunicationType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OneWay', index=0, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TwoWay', index=1, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Intercom', index=2, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Max_AudioCommunicationType', index=3, number=1073741824,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2337,
serialized_end=2435,
)
_sym_db.RegisterEnumDescriptor(_AUDIOCOMMUNICATIONTYPE)
AudioCommunicationType = enum_type_wrapper.EnumTypeWrapper(_AUDIOCOMMUNICATIONTYPE)
supportedOperations = 1010
supportedRelations = 1011
supportedEvents = 1012
supportedCapabilities = 1013
fullEntity = 10050
info = 10051
config = 10071
identifiers = 10091
relations = 10111
Max_Resources = 1073741824
AudioSourceOwnedBySite = 11
AudioSourceAssociatedWithAccount = 12
AudioSourceDerivedFromInput = 14
AudioSourceAssignedToPeripheral = 15
AudioSourceRecordedByRecorder = 17
AudioSourceupportsStreamProfile = 24
Max_Relations = 1073741824
config_p_added = 10010
config_p_modified = 10011
config_p_deleted = 10012
OneWay = 11
TwoWay = 12
Intercom = 13
Max_AudioCommunicationType = 1073741824
_AUDIOSOURCEOPERATIONS = _descriptor.Descriptor(
name='AudioSourceOperations',
full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceOperations',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resources', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceOperations.resources', index=0,
number=11, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=117,
serialized_end=218,
)
_AUDIOSOURCERELATIONS = _descriptor.Descriptor(
name='AudioSourceRelations',
full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceRelations',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='relations', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceRelations.relations', index=0,
number=11, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=220,
serialized_end=320,
)
_AUDIOSOURCEEVENTS = _descriptor.Descriptor(
name='AudioSourceEvents',
full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceEvents',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='events', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceEvents.events', index=0,
number=11, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=322,
serialized_end=413,
)
_AUDIOSOURCESTATE = _descriptor.Descriptor(
name='AudioSourceState',
full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceState.id', index=0,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='troubleState', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceState.troubleState', index=1,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='omitState', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceState.omitState', index=2,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='recordingState', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceState.recordingState', index=3,
number=1001, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(900000, 1001991), ],
oneofs=[
],
serialized_start=416,
serialized_end=666,
)
_AUDIOSOURCESTATELIST = _descriptor.Descriptor(
name='AudioSourceStateList',
full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceStateList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='state', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceStateList.state', index=0,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=668,
serialized_end=771,
)
_AUDIOSOURCEIDENTIFIERS = _descriptor.Descriptor(
name='AudioSourceIdentifiers',
full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceIdentifiers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceIdentifiers.id', index=0,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceIdentifiers.name', index=1,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceIdentifiers.description', index=2,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1001991), ],
oneofs=[
],
serialized_start=773,
serialized_end=854,
)
_AUDIOSOURCERELATION = _descriptor.Descriptor(
name='AudioSourceRelation',
full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceRelation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceRelation.id', index=0,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceRelation.name', index=1,
number=12, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=11,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entityID', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceRelation.entityID', index=2,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=856,
serialized_end=980,
)
_AUDIOSOURCERELATIONLIST = _descriptor.Descriptor(
name='AudioSourceRelationList',
full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceRelationList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='relation', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceRelationList.relation', index=0,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=982,
serialized_end=1094,
)
_AUDIOSOURCECONFIG = _descriptor.Descriptor(
name='AudioSourceConfig',
full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='identifiers', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceConfig.identifiers', index=0,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relation', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceConfig.relation', index=1,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='communicationType', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceConfig.communicationType', index=2,
number=13, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=11,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enableState', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceConfig.enableState', index=3,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(900000, 1100000), ],
oneofs=[
],
serialized_start=1097,
serialized_end=1396,
)
_AUDIOSOURCECONFIGLIST = _descriptor.Descriptor(
name='AudioSourceConfigList',
full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceConfigList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='AudioSourceConfig', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceConfigList.AudioSourceConfig', index=0,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=1398,
serialized_end=1515,
)
_AUDIOSOURCEENTITY = _descriptor.Descriptor(
name='AudioSourceEntity',
full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceEntity',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='config', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceEntity.config', index=0,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceEntity.state', index=1,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(900000, 1100000), ],
oneofs=[
],
serialized_start=1518,
serialized_end=1691,
)
_AUDIOSOURCEENTITYLIST = _descriptor.Descriptor(
name='AudioSourceEntityList',
full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceEntityList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='entity', full_name='Honeywell.Security.ISOM.AudioSources.AudioSourceEntityList.entity', index=0,
number=11, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(1000000, 1100000), ],
oneofs=[
],
serialized_start=1693,
serialized_end=1799,
)
_AUDIOSOURCEOPERATIONS.fields_by_name['resources'].enum_type = _RESOURCES
_AUDIOSOURCERELATIONS.fields_by_name['relations'].enum_type = _RELATIONS
_AUDIOSOURCEEVENTS.fields_by_name['events'].enum_type = _EVENTS
_AUDIOSOURCESTATE.fields_by_name['troubleState'].message_type = IsomDevices__pb2._DEVICETROUBLESTATE
_AUDIOSOURCESTATE.fields_by_name['omitState'].message_type = IsomDevices__pb2._DEVICEOMITSTATE
_AUDIOSOURCESTATE.fields_by_name['recordingState'].message_type = CodecDetails__pb2._RECORDINGSTATE
_AUDIOSOURCESTATELIST.fields_by_name['state'].message_type = _AUDIOSOURCESTATE
_AUDIOSOURCERELATION.fields_by_name['name'].enum_type = _RELATIONS
_AUDIOSOURCERELATIONLIST.fields_by_name['relation'].message_type = _AUDIOSOURCERELATION
_AUDIOSOURCECONFIG.fields_by_name['identifiers'].message_type = _AUDIOSOURCEIDENTIFIERS
_AUDIOSOURCECONFIG.fields_by_name['relation'].message_type = _AUDIOSOURCERELATION
_AUDIOSOURCECONFIG.fields_by_name['communicationType'].enum_type = _AUDIOCOMMUNICATIONTYPE
_AUDIOSOURCECONFIGLIST.fields_by_name['AudioSourceConfig'].message_type = _AUDIOSOURCECONFIG
_AUDIOSOURCEENTITY.fields_by_name['config'].message_type = _AUDIOSOURCECONFIG
_AUDIOSOURCEENTITY.fields_by_name['state'].message_type = _AUDIOSOURCESTATE
_AUDIOSOURCEENTITYLIST.fields_by_name['entity'].message_type = _AUDIOSOURCEENTITY
DESCRIPTOR.message_types_by_name['AudioSourceOperations'] = _AUDIOSOURCEOPERATIONS
DESCRIPTOR.message_types_by_name['AudioSourceRelations'] = _AUDIOSOURCERELATIONS
DESCRIPTOR.message_types_by_name['AudioSourceEvents'] = _AUDIOSOURCEEVENTS
DESCRIPTOR.message_types_by_name['AudioSourceState'] = _AUDIOSOURCESTATE
DESCRIPTOR.message_types_by_name['AudioSourceStateList'] = _AUDIOSOURCESTATELIST
DESCRIPTOR.message_types_by_name['AudioSourceIdentifiers'] = _AUDIOSOURCEIDENTIFIERS
DESCRIPTOR.message_types_by_name['AudioSourceRelation'] = _AUDIOSOURCERELATION
DESCRIPTOR.message_types_by_name['AudioSourceRelationList'] = _AUDIOSOURCERELATIONLIST
DESCRIPTOR.message_types_by_name['AudioSourceConfig'] = _AUDIOSOURCECONFIG
DESCRIPTOR.message_types_by_name['AudioSourceConfigList'] = _AUDIOSOURCECONFIGLIST
DESCRIPTOR.message_types_by_name['AudioSourceEntity'] = _AUDIOSOURCEENTITY
DESCRIPTOR.message_types_by_name['AudioSourceEntityList'] = _AUDIOSOURCEENTITYLIST
DESCRIPTOR.enum_types_by_name['Resources'] = _RESOURCES
DESCRIPTOR.enum_types_by_name['Relations'] = _RELATIONS
DESCRIPTOR.enum_types_by_name['Events'] = _EVENTS
DESCRIPTOR.enum_types_by_name['AudioCommunicationType'] = _AUDIOCOMMUNICATIONTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AudioSourceOperations = _reflection.GeneratedProtocolMessageType('AudioSourceOperations', (_message.Message,), {
'DESCRIPTOR' : _AUDIOSOURCEOPERATIONS,
'__module__' : 'AudioSources_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AudioSources.AudioSourceOperations)
})
_sym_db.RegisterMessage(AudioSourceOperations)
AudioSourceRelations = _reflection.GeneratedProtocolMessageType('AudioSourceRelations', (_message.Message,), {
'DESCRIPTOR' : _AUDIOSOURCERELATIONS,
'__module__' : 'AudioSources_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AudioSources.AudioSourceRelations)
})
_sym_db.RegisterMessage(AudioSourceRelations)
AudioSourceEvents = _reflection.GeneratedProtocolMessageType('AudioSourceEvents', (_message.Message,), {
'DESCRIPTOR' : _AUDIOSOURCEEVENTS,
'__module__' : 'AudioSources_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AudioSources.AudioSourceEvents)
})
_sym_db.RegisterMessage(AudioSourceEvents)
AudioSourceState = _reflection.GeneratedProtocolMessageType('AudioSourceState', (_message.Message,), {
'DESCRIPTOR' : _AUDIOSOURCESTATE,
'__module__' : 'AudioSources_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AudioSources.AudioSourceState)
})
_sym_db.RegisterMessage(AudioSourceState)
AudioSourceStateList = _reflection.GeneratedProtocolMessageType('AudioSourceStateList', (_message.Message,), {
'DESCRIPTOR' : _AUDIOSOURCESTATELIST,
'__module__' : 'AudioSources_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AudioSources.AudioSourceStateList)
})
_sym_db.RegisterMessage(AudioSourceStateList)
AudioSourceIdentifiers = _reflection.GeneratedProtocolMessageType('AudioSourceIdentifiers', (_message.Message,), {
'DESCRIPTOR' : _AUDIOSOURCEIDENTIFIERS,
'__module__' : 'AudioSources_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AudioSources.AudioSourceIdentifiers)
})
_sym_db.RegisterMessage(AudioSourceIdentifiers)
AudioSourceRelation = _reflection.GeneratedProtocolMessageType('AudioSourceRelation', (_message.Message,), {
'DESCRIPTOR' : _AUDIOSOURCERELATION,
'__module__' : 'AudioSources_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AudioSources.AudioSourceRelation)
})
_sym_db.RegisterMessage(AudioSourceRelation)
AudioSourceRelationList = _reflection.GeneratedProtocolMessageType('AudioSourceRelationList', (_message.Message,), {
'DESCRIPTOR' : _AUDIOSOURCERELATIONLIST,
'__module__' : 'AudioSources_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AudioSources.AudioSourceRelationList)
})
_sym_db.RegisterMessage(AudioSourceRelationList)
AudioSourceConfig = _reflection.GeneratedProtocolMessageType('AudioSourceConfig', (_message.Message,), {
'DESCRIPTOR' : _AUDIOSOURCECONFIG,
'__module__' : 'AudioSources_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AudioSources.AudioSourceConfig)
})
_sym_db.RegisterMessage(AudioSourceConfig)
AudioSourceConfigList = _reflection.GeneratedProtocolMessageType('AudioSourceConfigList', (_message.Message,), {
'DESCRIPTOR' : _AUDIOSOURCECONFIGLIST,
'__module__' : 'AudioSources_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AudioSources.AudioSourceConfigList)
})
_sym_db.RegisterMessage(AudioSourceConfigList)
AudioSourceEntity = _reflection.GeneratedProtocolMessageType('AudioSourceEntity', (_message.Message,), {
'DESCRIPTOR' : _AUDIOSOURCEENTITY,
'__module__' : 'AudioSources_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AudioSources.AudioSourceEntity)
})
_sym_db.RegisterMessage(AudioSourceEntity)
AudioSourceEntityList = _reflection.GeneratedProtocolMessageType('AudioSourceEntityList', (_message.Message,), {
'DESCRIPTOR' : _AUDIOSOURCEENTITYLIST,
'__module__' : 'AudioSources_pb2'
# @@protoc_insertion_point(class_scope:Honeywell.Security.ISOM.AudioSources.AudioSourceEntityList)
})
_sym_db.RegisterMessage(AudioSourceEntityList)
# @@protoc_insertion_point(module_scope) | PypiClean |
/hpy-0.9.0rc2.tar.gz/hpy-0.9.0rc2/docs/api-reference/hpy-type.rst | HPy Types and Modules
=====================
Types, modules and their attributes (i.e. methods, members, slots, get-set
descriptors) are defined in a similar way. Section `HPy Type`_ documents the
type-specific and `HPy Module`_ documents the module-specific part. Section
`HPy Definition`_ documents how to define attributes for both, types and
modules.
HPy Type
--------
Definition
~~~~~~~~~~
.. autocmodule:: hpy/hpytype.h
:members: HPyType_Spec,HPyType_BuiltinShape,HPyType_SpecParam,HPyType_SpecParam_Kind,HPyType_HELPERS,HPyType_LEGACY_HELPERS,HPy_TPFLAGS_DEFAULT,HPy_TPFLAGS_BASETYPE,HPy_TPFLAGS_HAVE_GC
Construction and More
~~~~~~~~~~~~~~~~~~~~~
.. autocmodule:: autogen/public_api.h
:members: HPyType_FromSpec, HPyType_GetName, HPyType_IsSubtype
HPy Module
----------
.. c:macro:: HPY_EMBEDDED_MODULES
If ``HPY_EMBEDDED_MODULES`` is defined, this means that there will be
several embedded HPy modules (and so, several ``HPy_MODINIT`` usages) in the
same binary. In this case, some restrictions apply:
1. all of the module's methods/member/slots/... must be defined in the same
file
2. the embedder **MUST** declare the module to be *embeddable* by using macro
:c:macro:`HPY_MOD_EMBEDDABLE`.
.. autocmodule:: hpy/hpymodule.h
:members: HPY_MOD_EMBEDDABLE,HPyModuleDef,HPy_MODINIT
HPy Definition
--------------
Defining slots, methods, members, and get-set descriptors for types and modules
is done with HPy definition (represented by C struct :c:struct:`HPyDef`).
.. autocmodule:: hpy/hpydef.h
:members: HPyDef,HPyDef_Kind,HPySlot,HPyMeth,HPyMember_FieldType,HPyMember,HPyGetSet,HPyDef_SLOT,HPyDef_METH,HPyDef_MEMBER,HPyDef_GET,HPyDef_SET,HPyDef_GETSET,HPyDef_CALL_FUNCTION
| PypiClean |
/calibreweb-nepali-0.6.20.tar.gz/calibreweb-nepali-0.6.20/src/cps/oauth_bb.py |
# This file is part of the Calibre-Web (https://github.com/janeczku/calibre-web)
# Copyright (C) 2018-2019 OzzieIsaacs, cervinko, jkrehm, bodybybuddha, ok11,
# andy29485, idalin, Kyosfonica, wuqi, Kennyl, lemmsh,
# falgh1, grunjol, csitko, ytils, xybydy, trasba, vrabe,
# ruben-herold, marblepebble, JackED42, SiphonSquirrel,
# apetresc, nanu-c, mutschler
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import json
from functools import wraps
from flask import session, request, make_response, abort
from flask import Blueprint, flash, redirect, url_for
from flask_babel import gettext as _
from flask_dance.consumer import oauth_authorized, oauth_error
from flask_dance.contrib.github import make_github_blueprint, github
from flask_dance.contrib.google import make_google_blueprint, google
from oauthlib.oauth2 import TokenExpiredError, InvalidGrantError
from flask_login import login_user, current_user, login_required
from sqlalchemy.orm.exc import NoResultFound
from . import constants, logger, config, app, ub
try:
from .oauth import OAuthBackend, backend_resultcode
except NameError:
pass
oauth_check = {}
oauthblueprints = []
oauth = Blueprint('oauth', __name__)
log = logger.create()
def oauth_required(f):
@wraps(f)
def inner(*args, **kwargs):
if config.config_login_type == constants.LOGIN_OAUTH:
return f(*args, **kwargs)
if request.headers.get('X-Requested-With') == 'XMLHttpRequest':
data = {'status': 'error', 'message': 'Not Found'}
response = make_response(json.dumps(data, ensure_ascii=False))
response.headers["Content-Type"] = "application/json; charset=utf-8"
return response, 404
abort(404)
return inner
def register_oauth_blueprint(cid, show_name):
oauth_check[cid] = show_name
def register_user_with_oauth(user=None):
all_oauth = {}
for oauth_key in oauth_check.keys():
if str(oauth_key) + '_oauth_user_id' in session and session[str(oauth_key) + '_oauth_user_id'] != '':
all_oauth[oauth_key] = oauth_check[oauth_key]
if len(all_oauth.keys()) == 0:
return
if user is None:
flash(_("%(provider)s सँग दर्ता गर्नुहोस्", provider=", ".join(list(all_oauth.values()))), category="success")
else:
for oauth_key in all_oauth.keys():
# Find this OAuth token in the database, or create it
query = ub.session.query(ub.OAuth).filter_by(
provider=oauth_key,
provider_user_id=session[str(oauth_key) + "_oauth_user_id"],
)
try:
oauth_key = query.one()
oauth_key.user_id = user.id
except NoResultFound:
# no found, return error
return
ub.session_commit("User {} with OAuth for provider {} registered".format(user.name, oauth_key))
def logout_oauth_user():
for oauth_key in oauth_check.keys():
if str(oauth_key) + '_oauth_user_id' in session:
session.pop(str(oauth_key) + '_oauth_user_id')
def oauth_update_token(provider_id, token, provider_user_id):
session[provider_id + "_oauth_user_id"] = provider_user_id
session[provider_id + "_oauth_token"] = token
# Find this OAuth token in the database, or create it
query = ub.session.query(ub.OAuth).filter_by(
provider=provider_id,
provider_user_id=provider_user_id,
)
try:
oauth_entry = query.one()
# update token
oauth_entry.token = token
except NoResultFound:
oauth_entry = ub.OAuth(
provider=provider_id,
provider_user_id=provider_user_id,
token=token,
)
ub.session.add(oauth_entry)
ub.session_commit()
# Disable Flask-Dance's default behavior for saving the OAuth token
# Value differrs depending on flask-dance version
return backend_resultcode
def bind_oauth_or_register(provider_id, provider_user_id, redirect_url, provider_name):
query = ub.session.query(ub.OAuth).filter_by(
provider=provider_id,
provider_user_id=provider_user_id,
)
try:
oauth_entry = query.first()
# already bind with user, just login
if oauth_entry.user:
login_user(oauth_entry.user)
log.debug("तपाईं अब यस रूपमा लग इन हुनुहुन्छ: '%s'", oauth_entry.user.name)
flash(_("सफलता! तपाईं अब %(nickname)s को रूपमा लग इन हुनुहुन्छ ", nickname= oauth_entry.user.name),
category="success")
return redirect(url_for('web.index'))
else:
# bind to current user
if current_user and current_user.is_authenticated:
oauth_entry.user = current_user
try:
ub.session.add(oauth_entry)
ub.session.commit()
flash(_("Link to %(oauth)s Succeeded", oauth=provider_name), category="success")
log.info("Link to {} Succeeded".format(provider_name))
return redirect(url_for('web.profile'))
except Exception as ex:
log.error_or_exception(ex)
ub.session.rollback()
else:
flash(_("लगइन असफल भयो, OAuth खातासँग कुनै प्रयोगकर्ता लिङ्क गरिएको छैन"), category="error")
log.info('Login failed, No User Linked With OAuth Account')
return redirect(url_for('web.login'))
# return redirect(url_for('web.login'))
# if config.config_public_reg:
# return redirect(url_for('web.register'))
# else:
# flash(_("Public registration is not enabled"), category="error")
# return redirect(url_for(redirect_url))
except (NoResultFound, AttributeError):
return redirect(url_for(redirect_url))
def get_oauth_status():
status = []
query = ub.session.query(ub.OAuth).filter_by(
user_id=current_user.id,
)
try:
oauths = query.all()
for oauth_entry in oauths:
status.append(int(oauth_entry.provider))
return status
except NoResultFound:
return None
def unlink_oauth(provider):
if request.host_url + 'me' != request.referrer:
pass
query = ub.session.query(ub.OAuth).filter_by(
provider=provider,
user_id=current_user.id,
)
try:
oauth_entry = query.one()
if current_user and current_user.is_authenticated:
oauth_entry.user = current_user
try:
ub.session.delete(oauth_entry)
ub.session.commit()
logout_oauth_user()
flash(_("%(oauth)s मा अनलिंक सफल भयो", oauth=oauth_check[provider]), category="success")
log.info("Unlink to {} Succeeded".format(oauth_check[provider]))
except Exception as ex:
log.error_or_exception(ex)
ub.session.rollback()
flash(_("%(oauth)s लाई अनलिंक गर्न सकिएन", oauth=oauth_check[provider]), category="error")
except NoResultFound:
log.warning("oauth %s for user %d not found", provider, current_user.id)
flash(_("%(oauth)s सँग लिङ्क गरिएको छैन", oauth=provider), category="error")
return redirect(url_for('web.profile'))
def generate_oauth_blueprints():
if not ub.session.query(ub.OAuthProvider).count():
for provider in ("github", "google"):
oauthProvider = ub.OAuthProvider()
oauthProvider.provider_name = provider
oauthProvider.active = False
ub.session.add(oauthProvider)
ub.session_commit("{} Blueprint Created".format(provider))
oauth_ids = ub.session.query(ub.OAuthProvider).all()
ele1 = dict(provider_name='github',
id=oauth_ids[0].id,
active=oauth_ids[0].active,
oauth_client_id=oauth_ids[0].oauth_client_id,
scope=None,
oauth_client_secret=oauth_ids[0].oauth_client_secret,
obtain_link='https://github.com/settings/developers')
ele2 = dict(provider_name='google',
id=oauth_ids[1].id,
active=oauth_ids[1].active,
scope=["https://www.googleapis.com/auth/userinfo.email"],
oauth_client_id=oauth_ids[1].oauth_client_id,
oauth_client_secret=oauth_ids[1].oauth_client_secret,
obtain_link='https://console.developers.google.com/apis/credentials')
oauthblueprints.append(ele1)
oauthblueprints.append(ele2)
for element in oauthblueprints:
if element['provider_name'] == 'github':
blueprint_func = make_github_blueprint
else:
blueprint_func = make_google_blueprint
blueprint = blueprint_func(
client_id=element['oauth_client_id'],
client_secret=element['oauth_client_secret'],
redirect_to="oauth."+element['provider_name']+"_login",
scope=element['scope']
)
element['blueprint'] = blueprint
element['blueprint'].backend = OAuthBackend(ub.OAuth, ub.session, str(element['id']),
user=current_user, user_required=True)
app.register_blueprint(blueprint, url_prefix="/login")
if element['active']:
register_oauth_blueprint(element['id'], element['provider_name'])
return oauthblueprints
if ub.oauth_support:
oauthblueprints = generate_oauth_blueprints()
@oauth_authorized.connect_via(oauthblueprints[0]['blueprint'])
def github_logged_in(blueprint, token):
if not token:
flash(_("GitHub सँग लग इन गर्न असफल भयो।"), category="error")
log.error("Failed to log in with GitHub")
return False
resp = blueprint.session.get("/user")
if not resp.ok:
flash(_("GitHub बाट प्रयोगकर्ता जानकारी ल्याउन असफल भयो।"), category="error")
log.error("Failed to fetch user info from GitHub")
return False
github_info = resp.json()
github_user_id = str(github_info["id"])
return oauth_update_token(str(oauthblueprints[0]['id']), token, github_user_id)
@oauth_authorized.connect_via(oauthblueprints[1]['blueprint'])
def google_logged_in(blueprint, token):
if not token:
flash(_("Google सँग लग इन गर्न असफल भयो।"), category="error")
log.error("Failed to log in with Google")
return False
resp = blueprint.session.get("/oauth2/v2/userinfo")
if not resp.ok:
flash(_("Google बाट प्रयोगकर्ता जानकारी ल्याउन असफल भयो।"), category="error")
log.error("Failed to fetch user info from Google")
return False
google_info = resp.json()
google_user_id = str(google_info["id"])
return oauth_update_token(str(oauthblueprints[1]['id']), token, google_user_id)
# notify on OAuth provider error
@oauth_error.connect_via(oauthblueprints[0]['blueprint'])
def github_error(blueprint, error, error_description=None, error_uri=None):
msg = (
"OAuth error from {name}! "
"error={error} description={description} uri={uri}"
).format(
name=blueprint.name,
error=error,
description=error_description,
uri=error_uri,
) # ToDo: Translate
flash(msg, category="error")
@oauth_error.connect_via(oauthblueprints[1]['blueprint'])
def google_error(blueprint, error, error_description=None, error_uri=None):
msg = (
"OAuth error from {name}! "
"error={error} description={description} uri={uri}"
).format(
name=blueprint.name,
error=error,
description=error_description,
uri=error_uri,
) # ToDo: Translate
flash(msg, category="error")
@oauth.route('/link/github')
@oauth_required
def github_login():
if not github.authorized:
return redirect(url_for('github.login'))
try:
account_info = github.get('/user')
if account_info.ok:
account_info_json = account_info.json()
return bind_oauth_or_register(oauthblueprints[0]['id'], account_info_json['id'], 'github.login', 'github')
flash(_("GitHub Oauth त्रुटि, कृपया पछि पुन: प्रयास गर्नुहोस्।"), category="error")
log.error("GitHub Oauth error, please retry later")
except (InvalidGrantError, TokenExpiredError) as e:
flash(_("GitHub Oauth त्रुटि: {}").format(e), category="error")
log.error(e)
return redirect(url_for('web.login'))
@oauth.route('/unlink/github', methods=["GET"])
@login_required
def github_login_unlink():
return unlink_oauth(oauthblueprints[0]['id'])
@oauth.route('/link/google')
@oauth_required
def google_login():
if not google.authorized:
return redirect(url_for("google.login"))
try:
resp = google.get("/oauth2/v2/userinfo")
if resp.ok:
account_info_json = resp.json()
return bind_oauth_or_register(oauthblueprints[1]['id'], account_info_json['id'], 'google.login', 'google')
flash(_("Google Oauth त्रुटि, कृपया पछि पुन: प्रयास गर्नुहोस्।"), category="error")
log.error("Google Oauth error, please retry later")
except (InvalidGrantError, TokenExpiredError) as e:
flash(_("Google Oauth त्रुटि: {}").format(e), category="error")
log.error(e)
return redirect(url_for('web.login'))
@oauth.route('/unlink/google', methods=["GET"])
@login_required
def google_login_unlink():
return unlink_oauth(oauthblueprints[1]['id']) | PypiClean |
/idem-random-2.0.0.tar.gz/idem-random-2.0.0/idem_random/states/random/password.py | from typing import Any
from typing import Dict
import dict_tools.differ as differ
def present(
hub,
ctx,
name: str,
length: int,
resource_id: str = None,
keepers: Dict[str, Any] = None,
upper: bool = True,
min_upper: int = 0,
lower: bool = True,
min_lower: int = 0,
numeric: bool = True,
min_numeric: int = 0,
special: bool = True,
min_special: int = 0,
override_special: str = None,
) -> Dict[str, Any]:
r"""
This is a logical state and doesn't interact with any cloud providers.
This state can be used in conjunction with any other state to generate
random password with the provided configurations. State's configuration
data is stored in esm. If for a given state , the configuration changes
, a new random password is generated. If there are no configuration changes
, then the old password is retained.
Args:
name(str):
An Idem name of the resource.
length(int) :
The length of the required random password.
resource_id(str, Optional):
Unique random password
keepers(List, Optional):
A list of arbitrary map of values that, when changed, will trigger recreation of resource.
lower(bool, Optional):
Include lowercase alphabet characters in the result. Default value is true.
min_lower(int, Optional):
Minimum number of lowercase alphabet characters in the result.Default value is 0.
min_numeric(int, Optional):
Minimum number of numeric characters in the result. Default value is 0.
min_special(int, Optional):
Minimum number of special characters in the result. efault value is 0.
min_upper(int, Optional):
Minimum number of uppercase alphabet characters in the result. Default value is 0.
numeric(bool, Optional):
Include numeric characters in the result. Default value is true.
override_special(str, Optional):
Supply your own list of special characters to use for string generation. This overrides the default
character list in the special argument. The special argument must still be set to true for any overwritten
characters to be used in generation.
special(bool, Optional):
Include special characters in the result. These are !@#$%&*()-_=+[]{}<>:?. Default value is true.
upper(bool, Optional):
Include uppercase alphabet characters in the result. Default value is true.
Request Syntax:
.. code-block:: sls
[random_password_state]:
random.password.present:
- name: 'string'
- length: 'int'
- keepers:
'string': 'string'
- lower: 'boolean'
- min_lower: 'int'
- upper: 'boolean'
- min_upper: 'int'
- numeric: 'boolean'
- min_numeric: 'int'
- special: 'boolean'
- override_special: 'string'
Returns:
Dict[str, Any]
Examples:
.. code-block:: sls
random_passwd_state:
random.password.present:
- name: random_passwd
- length: 13
- keepers:
key: value
"""
result = dict(comment=[], old_state=None, new_state=None, name=name, result=True)
params = dict(
upper=upper,
min_upper=min_upper,
lower=lower,
min_lower=min_lower,
numeric=numeric,
min_numeric=min_numeric,
special=special,
min_special=min_special,
override_special=override_special,
)
if resource_id:
result["old_state"] = result["new_state"] = {
"keepers": keepers,
"params": params,
"name": name,
"output": resource_id,
"resource_id": resource_id,
}
return result
before = ctx.get("old_state")
if before:
result["old_state"] = before
keepers_old = before.get("keepers", {})
keepers_new = keepers if keepers else {}
result["changes"] = differ.deep_diff(keepers_old, keepers_new)
old_params = before.get("params", {})
new_params = params
if result["changes"] or old_params != new_params:
result["comment"].append(f"Confiig change detected for '{name}'.")
result["new_state"] = {
"keepers": keepers,
"params": params,
"name": name,
"output": None,
}
if ctx.get("test"):
result["comment"].append(f"Would generate new random.random.password.")
return result
ret = hub.exec.random.password.generate_random_string(
length=length, **params
)
if ret and ret["result"]:
result["new_state"]["output"] = ret["ret"]
result["comment"].append(
f"Generated new random.random.password '{name}'."
)
else:
result["comment"].append(
f"No config change detected for '{name}'. Old password will be retained."
)
result["new_state"] = result["old_state"]
else:
result["new_state"] = {
"keepers": keepers,
"params": params,
"name": name,
"output": None,
}
if ctx.get("test"):
result["comment"].append(f"Would generate random.random.password '{name}'.")
return result
ret = hub.exec.random.password.generate_random_string(length=length, **params)
if not ret or not ret["result"]:
result["result"] = ret["result"]
result["comment"].append(
f"Unable to generate random.random.password '{name}'."
)
return result
result["new_state"]["output"] = ret["ret"]
result["new_state"]["resource_id"] = result["new_state"]["output"]
result["comment"].append(f"random.random.password '{name}' generated.")
return result
def absent(
hub,
ctx,
name: str,
) -> Dict[str, Any]:
r"""
This logical state is used to invalidate/delete the password.
Args:
name(str): An Idem name of the resource.
Request Syntax:
.. code-block:: sls
[random_password_state]:
random.password.absent:
- name: 'string'
Returns:
Dict[str, Any]
Examples:
.. code-block:: sls
random_state01011:
random.password.absent:
- name: rs01011
"""
result = dict(comment=[], old_state=None, new_state=None, name=name, result=True)
before = ctx.get("old_state")
result["old_state"] = before
if before:
if ctx.get("test", False):
result["comment"].append(
f"Would remove the random.random.password '{name}'."
)
return result
result["comment"].append(f"Removed the random.random.password '{name}'.")
else:
result["comment"].append(f"random.random.password '{name}' already absent.")
return result | PypiClean |
/Flask-PagedList-0.2.1.zip/Flask-PagedList-0.2.1/README.rst | Flask-PagedList
===============
Flask-PagedList bundles features from pypagedlist into a blueprint named 'PagedList'.
Installation
------------
Flask-PagedList can be installed using ``pip`` from `PyPI`_. `virtualenv`_ is highly
recommended:
.. code-block:: bash
pip install -U flask-pagedlist
.. _PyPI: https://pypi.python.org/pypi/Flask-PagedList
.. _virtualenv: https://virtualenv.pypa.io/en/latest/
For development, instead, clone the `github repository <https://github.com/timonwong/flask-pagedlist>`_, and use:
.. code-block:: bash
python setup.py develop # Or, pip install -e .
Example Project
----------------
Screenshots
~~~~~~~~~~~
Traditional
+++++++++++
.. image:: https://raw.github.com/timonwong/flask-pagedlist/gh-pages/screenshots/demo1.png
AJAX
++++
.. image:: https://raw.github.com/timonwong/flask-pagedlist/gh-pages/screenshots/demo2.png
Run
~~~
Here is a simple description about how to run the demo project:
.. code-block:: bash
# 1. Clone this git repo in order to get the example
git clone https://github.com/timonwong/flask-pagedlist.git
cd flask-pagedlist
# 2. Install flask-pagedlist
pip install -U flask-pagedlist
# 3. Install dependencies for the example
pip install -U -r example-requirements.txt
# 4. Start the example project
python run_example.py
Usage
-----
Basic usage
~~~~~~~~~~~
Here is an example:
.. code-block:: python
from flask_pagedlist import PagedList
PagedList(app)
Static resources
~~~~~~~~~~~~~~~~
``pagedlist_static_for`` is recommended for requiring static resources for Flask-PagedList in templates:
.. code-block:: python
def pagedlist_static_for(filename, use_minified=None):
"""Resource finding function, also available in templates.
:param filename: File to find a URL for.
:param use_minified': If set to ``True``/``False``, use/don't use
minified. If ``None``, use the default setting
from ``PAGEDLIST_USE_MINIFIED``.
:return: A URL.
"""
Configuration
~~~~~~~~~~~~~
``PAGEDLIST_USE_MINIFIED``
++++++++++++++++++++++++++
``PAGEDLIST_PREFIX``
++++++++++++++++++++
| PypiClean |
/rul_pm-1.1.0.tar.gz/rul_pm-1.1.0/rul_pm/models/keras/models/VisionTransformer.py | import numpy as np
import tensorflow as tf
from rul_pm.models.keras.keras import KerasTrainableModel
from rul_pm.models.keras.layers import ExpandDimension, RemoveDimension
from tensorflow.keras import Input, Model, optimizers
from tensorflow.keras.layers import (Layer, LayerNormalization, MultiHeadAttention,
Add, Conv1D, Conv2D, Dense, Embedding,
Dropout, Flatten, GlobalAveragePooling1D)
class Patches(Layer):
def __init__(self, patch_size, features):
super(Patches, self).__init__()
self.patch_size = patch_size
self.features = features
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.features, 1],
strides=[1, self.patch_size, self.features, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, -1, patch_dims])
patch_dims = patches.shape[-1]
return patches
class PatchEncoder(Layer):
def __init__(self, num_patches, projection_dim):
super(PatchEncoder, self).__init__()
self.num_patches = num_patches
self.projection = Dense(units=projection_dim)
self.position_embedding = Embedding(
input_dim=num_patches, output_dim=projection_dim
)
def call(self, patch):
positions = tf.range(start=0, limit=self.num_patches, delta=1)
encoded = self.projection(patch) + self.position_embedding(positions)
return encoded
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = Dense(units, activation=tf.nn.gelu)(x)
x = Dropout(dropout_rate)(x)
return x
class VisionTransformer(KerasTrainableModel):
"""
"""
def __init__(self,
patch_size:int=5,
projection_dim:int = 64,
num_heads:int= 4,
transformer_layers:int= 8,
mlp_head_units = [2048, 1024],
**kwargs):
super().__init__(**kwargs)
self.patch_size = patch_size
self.num_patches = (self.window // patch_size)
self.projection_dim= projection_dim
self.num_heads = num_heads
self.transformer_units = [
projection_dim * 2,
projection_dim,
]
self.transformer_layers = transformer_layers
self.mlp_head_units= mlp_head_units
def compile(self):
self.compiled = True
self.model.compile(
loss=self.loss,
optimizer=optimizers.Adam(lr=self.learning_rate,
beta_1=0.85,
beta_2=0.9,
epsilon=0.001,
amsgrad=True),
metrics=self.metrics)
def build_model(self):
n_features = self.transformer.n_features
input = Input(shape=(self.window, n_features))
x = ExpandDimension()(input)
patches = Patches(self.patch_size, n_features)(x)
encoded_patches = PatchEncoder(self.num_patches, self.projection_dim)(patches)
for _ in range(self.transformer_layers):
x1 = LayerNormalization(epsilon=1e-6)(encoded_patches)
attention_output = MultiHeadAttention(
num_heads=self.num_heads, key_dim=self.projection_dim, dropout=0.1
)(x1, x1)
x2 = Add()([attention_output, encoded_patches])
x3 = LayerNormalization(epsilon=1e-6)(x2)
x3 = mlp(x3, hidden_units=self.transformer_units, dropout_rate=0.1)
encoded_patches = Add()([x3, x2])
# Create a [batch_size, projection_dim] tensor.
representation = LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = Flatten()(representation)
representation = Dropout(0.5)(representation)
features = mlp(representation, hidden_units=self.mlp_head_units, dropout_rate=0.5)
logits = Dense(1, activation='relu')(features)
# Create the Keras model.
model = Model(inputs=input, outputs=logits)
return model
def get_params(self, deep=False):
d = super().get_params()
return d
@property
def name(self):
return "VisionTransformer" | PypiClean |
/idealoom-0.1.0-py3-none-any.whl/assembl/models/idea_graph_view.py | from collections import defaultdict
from datetime import datetime
from abc import abstractmethod
from itertools import chain
from future.utils import as_native_str
from sqlalchemy.orm import (
relationship, backref)
from sqlalchemy import (
Column,
Integer,
String,
UnicodeText,
DateTime,
Boolean,
ForeignKey,
UniqueConstraint,
)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import join
import lxml.html as htmlt
from . import DiscussionBoundBase, OriginMixin
from .discussion import Discussion
from ..semantic.virtuoso_mapping import QuadMapPatternS
from ..auth import (
CrudPermissions, P_ADMIN_DISC, P_EDIT_SYNTHESIS)
from .idea import Idea, IdeaLink, RootIdea, IdeaVisitor
from ..semantic.namespaces import (
SIOC, CATALYST, IDEA, ASSEMBL, DCTERMS, QUADNAMES)
from assembl.views.traversal import AbstractCollectionDefinition
from ..views.traversal import collection_creation_side_effects, InstanceContext
class defaultdictlist(defaultdict):
def __init__(self):
super(defaultdictlist, self).__init__(list)
class IdeaGraphView(DiscussionBoundBase, OriginMixin):
"""
A view on the graph of idea.
"""
__tablename__ = "idea_graph_view"
__external_typename = "Map"
rdf_class = CATALYST.Map
type = Column(String(60), nullable=False)
id = Column(Integer, primary_key=True,
info={'rdf': QuadMapPatternS(None, ASSEMBL.db_id)})
creation_date = Column(DateTime, nullable=False, default=datetime.utcnow,
info={'rdf': QuadMapPatternS(None, DCTERMS.created)})
discussion_id = Column(
Integer,
ForeignKey('discussion.id', ondelete="CASCADE", onupdate="CASCADE"),
nullable=False, index=True,
info={'rdf': QuadMapPatternS(None, SIOC.has_container)}
)
discussion = relationship(
Discussion, backref=backref("views", cascade="all, delete-orphan"),
info={'rdf': QuadMapPatternS(None, ASSEMBL.in_conversation)})
__mapper_args__ = {
'polymorphic_identity': 'idea_graph_view',
'polymorphic_on': 'type',
'with_polymorphic': '*'
}
def copy(self, db=None):
retval = self.__class__()
retval.discussion = self.discussion
return retval
def get_discussion_id(self):
return self.discussion_id
@classmethod
def get_discussion_conditions(cls, discussion_id, alias_maker=None):
return (cls.discussion_id == discussion_id, )
crud_permissions = CrudPermissions(P_ADMIN_DISC)
@abstractmethod
def get_idea_links(self):
pass
@abstractmethod
def get_ideas(self):
pass
class SubGraphIdeaAssociation(DiscussionBoundBase):
"""Association table saying that an Idea is part of a ExplicitSubGraphView"""
__tablename__ = 'sub_graph_idea_association'
__table_args__ = (
UniqueConstraint("idea_id", "sub_graph_id"),
)
id = Column(Integer, primary_key=True)
sub_graph_id = Column(Integer, ForeignKey(
'explicit_sub_graph_view.id', ondelete="CASCADE", onupdate="CASCADE"),
index=True, nullable=False)
sub_graph = relationship(
"ExplicitSubGraphView", backref=backref(
"idea_assocs", cascade="all, delete-orphan"))
idea_id = Column(Integer, ForeignKey(
'idea.id', ondelete="CASCADE", onupdate="CASCADE"),
nullable=False, index=True)
# reference to the "Idea" object for proxying
idea = relationship("Idea", backref="in_subgraph_assoc")
include_body = Column(Boolean, server_default='false')
@classmethod
def special_quad_patterns(cls, alias_maker, discussion_id):
idea_assoc = alias_maker.alias_from_class(cls)
idea_alias = alias_maker.alias_from_relns(cls.idea)
# Assume tombstone status of target is similar to source, for now.
conditions = [(idea_assoc.idea_id == idea_alias.id),
(idea_alias.tombstone_date == None)]
if discussion_id:
conditions.append((idea_alias.discussion_id == discussion_id))
return [
QuadMapPatternS(
Idea.iri_class().apply(idea_assoc.idea_id),
IDEA.inMap,
IdeaGraphView.iri_class().apply(idea_assoc.sub_graph_id),
conditions=conditions,
name=QUADNAMES.sub_graph_idea_assoc_reln)
]
def get_discussion_id(self):
sub_graph = self.sub_graph or IdeaGraphView.get(self.sub_graph_id)
return sub_graph.get_discussion_id()
@classmethod
def get_discussion_conditions(cls, discussion_id, alias_maker=None):
return ((cls.sub_graph_id == ExplicitSubGraphView.id),
(ExplicitSubGraphView.discussion_id == discussion_id))
discussion = relationship(
Discussion, viewonly=True, uselist=False, secondary=Idea.__table__,
info={'rdf': QuadMapPatternS(None, ASSEMBL.in_conversation)})
def unique_query(self):
# documented in lib/sqla
idea_id = self.idea_id or self.idea.id
subgraph_id = self.sub_graph_id or self.sub_graph.id
return self.db.query(self.__class__).filter_by(
idea_id=idea_id, sub_graph_id=subgraph_id), True
# @classmethod
# def special_quad_patterns(cls, alias_maker, discussion_id):
# return [QuadMapPatternS(
# Idea.iri_class().apply(cls.source_id),
# IDEA.includes,
# Idea.iri_class().apply(cls.target_id),
# name=QUADNAMES.idea_inclusion_reln)]
crud_permissions = CrudPermissions(P_ADMIN_DISC)
class SubGraphIdeaLinkAssociation(DiscussionBoundBase):
"""Association table saying that an IdeaLink is part of a ExplicitSubGraphView"""
__tablename__ = 'sub_graph_idea_link_association'
id = Column(Integer, primary_key=True)
__table_args__ = (
UniqueConstraint("idea_link_id", "sub_graph_id"),
)
sub_graph_id = Column(Integer, ForeignKey(
'explicit_sub_graph_view.id', ondelete="CASCADE", onupdate="CASCADE"),
index=True, nullable=False)
sub_graph = relationship(
"ExplicitSubGraphView", backref=backref(
"idealink_assocs", cascade="all, delete-orphan"))
idea_link_id = Column(Integer, ForeignKey(
'idea_idea_link.id', ondelete="CASCADE", onupdate="CASCADE"),
index=True, nullable=False)
# reference to the "IdeaLink" object for proxying
idea_link = relationship("IdeaLink", backref="in_subgraph_assoc")
@classmethod
def special_quad_patterns(cls, alias_maker, discussion_id):
idea_link_assoc = alias_maker.alias_from_class(cls)
idea_link_alias = alias_maker.alias_from_relns(cls.idea_link)
# Assume tombstone status of target is similar to source, for now.
conditions = [(idea_link_assoc.idea_link_id == idea_link_alias.id),
(idea_link_alias.tombstone_date == None)]
if discussion_id:
conditions.extend(cls.get_discussion_conditions(
discussion_id, alias_maker))
return [
QuadMapPatternS(
IdeaLink.iri_class().apply(idea_link_assoc.idea_link_id),
IDEA.inMap,
IdeaGraphView.iri_class().apply(idea_link_assoc.sub_graph_id),
conditions=conditions,
name=QUADNAMES.sub_graph_idea_link_assoc_reln)
]
def get_discussion_id(self):
sub_graph = self.sub_graph or IdeaGraphView.get(self.sub_graph_id)
return sub_graph.get_discussion_id()
def unique_query(self):
# documented in lib/sqla
idea_link_id = self.idea_link_id or self.idea_link.id
subgraph_id = self.sub_graph_id or self.sub_graph.id
return self.db.query(self.__class__).filter_by(
idea_link_id=idea_link_id, sub_graph_id=subgraph_id), True
@classmethod
def get_discussion_conditions(cls, discussion_id, alias_maker=None):
if alias_maker:
subgraph_alias = alias_maker.alias_from_relns(cls.sub_graph)
return ((subgraph_alias.discussion_id == discussion_id))
else:
return ((cls.sub_graph_id == ExplicitSubGraphView.id),
(ExplicitSubGraphView.discussion_id == discussion_id))
crud_permissions = CrudPermissions(P_ADMIN_DISC)
class ExplicitSubGraphView(IdeaGraphView):
"""
A view where the Ideas and/or ideaLinks have been explicitly selected.
Note that ideaLinks may point to ideas that are not in the graph. They
should be followed transitively (if their nature is compatible) to reach
every idea in graph as if they were directly linked.
"""
__tablename__ = "explicit_sub_graph_view"
id = Column(Integer, ForeignKey(
'idea_graph_view.id',
ondelete='CASCADE',
onupdate='CASCADE'
), primary_key=True)
# proxy the 'idea' attribute from the 'idea_assocs' relationship
# for direct access
ideas = association_proxy('idea_assocs', 'idea',
creator=lambda idea: SubGraphIdeaAssociation(idea=idea))
# proxy the 'idea_link' attribute from the 'idealink_assocs'
# relationship for direct access
idea_links = association_proxy('idealink_assocs', 'idea_link',
creator=lambda idea_link: SubGraphIdeaLinkAssociation(idea_link=idea_link))
__mapper_args__ = {
'polymorphic_identity': 'explicit_sub_graph_view',
}
def copy(self, db=None):
retval = IdeaGraphView.copy(self, db=db)
# retval.ideas = self.ideas
return retval
def get_idea_links(self):
# more efficient than the association_proxy
return self.db.query(IdeaLink).join(
SubGraphIdeaLinkAssociation
).filter_by(sub_graph_id=self.id).all()
def get_idealink_assocs(self):
return self.idealink_assocs
def get_ideas(self):
# more efficient than the association_proxy
return self.db.query(Idea).join(
SubGraphIdeaAssociation
).filter_by(sub_graph_id=self.id).all()
def visit_ideas_depth_first(self, idea_visitor):
# prefetch
idea_assocs_by_idea_id = {
link.idea_id: link for link in self.idea_assocs}
children_links = defaultdict(list)
with self.db.no_autoflush:
idealink_assocs = self.get_idealink_assocs()
for link_assoc in idealink_assocs:
children_links[link_assoc.idea_link.source_id].append(link_assoc)
for assocs in children_links.values():
assocs.sort(key=lambda l: l.idea_link.order)
root = self.discussion.root_idea
root_assoc = idea_assocs_by_idea_id.get(root.base_id, None)
root_id = root_assoc.idea_id if root_assoc else root.id
result = self._visit_ideas_depth_first(
root_id, idea_assocs_by_idea_id, children_links, idea_visitor,
set(), 0, None, None)
# special case for autocreated links
for link_assoc in idealink_assocs:
self.db.expunge(link_assoc)
return result
def _visit_ideas_depth_first(
self, idea_id, idea_assocs_by_idea_id, children_links, idea_visitor,
visited, level, prev_result, parent_link_assoc):
result = None
if idea_id in visited:
# not necessary in a tree, but let's start to think graph.
return False
assoc = idea_assocs_by_idea_id.get(idea_id, None)
if assoc:
result = idea_visitor.visit_idea(
assoc, level, prev_result, parent_link_assoc)
visited.add(idea_id)
child_results = []
if result is not IdeaVisitor.CUT_VISIT:
for link_assoc in children_links[idea_id]:
child_id = link_assoc.idea_link.target_id
r = self._visit_ideas_depth_first(
child_id, idea_assocs_by_idea_id, children_links,
idea_visitor, visited, level+1, result, link_assoc)
if r:
child_results.append((child_id, r))
return idea_visitor.end_visit(assoc, level, result, child_results, parent_link_assoc)
@classmethod
def extra_collections(cls):
class GViewIdeaCollectionDefinition(AbstractCollectionDefinition):
def __init__(self, cls):
super(GViewIdeaCollectionDefinition, self).__init__(
cls, 'ideas', Idea)
def decorate_query(self, query, owner_alias, last_alias,
parent_instance, ctx):
return query.join(SubGraphIdeaAssociation, owner_alias)
def contains(self, parent_instance, instance):
return instance.db.query(
SubGraphIdeaAssociation).filter_by(
idea=instance,
sub_graph=parent_instance
).count() > 0
@collection_creation_side_effects.register(
inst_ctx=Idea, ctx='ExplicitSubGraphView.ideas')
def add_graph_idea_assoc(inst_ctx, ctx):
yield InstanceContext(
inst_ctx['in_subgraph_assoc'],
SubGraphIdeaAssociation(
idea=inst_ctx._instance, sub_graph=ctx.parent_instance))
@collection_creation_side_effects.register(
inst_ctx=IdeaLink, ctx='ExplicitSubGraphView.ideas')
def add_graph_idea_link_assoc(inst_ctx, ctx):
yield InstanceContext(
inst_ctx['in_subgraph_assoc'],
SubGraphIdeaLinkAssociation(
idea_link=inst_ctx._instance,
sub_graph=ctx.parent_instance))
class GViewIdeaLinkCollectionDefinition(AbstractCollectionDefinition):
def __init__(self, cls):
super(GViewIdeaLinkCollectionDefinition, self
).__init__(cls, 'idea_links', IdeaLink)
def decorate_query(self, query, owner_alias, last_alias,
parent_instance, ctx):
return query.join(SubGraphIdeaLinkAssociation, owner_alias)
def contains(self, parent_instance, instance):
return instance.db.query(
SubGraphIdeaLinkAssociation).filter_by(
idea_link=instance,
sub_graph=parent_instance
).count() > 0
@collection_creation_side_effects.register(
inst_ctx=IdeaLink, ctx='ExplicitSubGraphView.idea_links')
def add_graph_idea_link_assoc2(inst_ctx, ctx):
yield InstanceContext(
inst_ctx['in_subgraph_assoc'],
SubGraphIdeaLinkAssociation(
idea_link=inst_ctx._instance, sub_graph=ctx.parent_instance))
return (GViewIdeaCollectionDefinition(cls),
GViewIdeaLinkCollectionDefinition(cls))
crud_permissions = CrudPermissions(P_ADMIN_DISC)
SubGraphIdeaLinkAssociation.discussion = relationship(
Discussion, viewonly=True, uselist=False,
secondary=join(
ExplicitSubGraphView.__table__,
IdeaGraphView.__table__,
ExplicitSubGraphView.id == IdeaGraphView.id),
info={'rdf': QuadMapPatternS(None, ASSEMBL.in_conversation)})
class TableOfContents(IdeaGraphView):
"""
Represents a Table of Ideas.
A ToI in IdeaLoom is used to organize the core ideas of a discussion in a
threaded hierarchy.
"""
__tablename__ = "table_of_contents"
id = Column(Integer, ForeignKey(
'idea_graph_view.id',
ondelete='CASCADE',
onupdate='CASCADE'
), primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'table_of_contents',
}
discussion = relationship(
Discussion, backref=backref("table_of_contents", uselist=False))
def get_discussion_id(self):
return self.discussion.id
@classmethod
def get_discussion_conditions(cls, discussion_id, alias_maker=None):
return (cls.discussion_id == discussion_id,)
def get_idea_links(self):
return self.discussion.get_idea_links()
def get_ideas(self):
return self.discussion.ideas
@as_native_str()
def __repr__(self):
r = super(TableOfContents, self).__repr__()
return r[:-1] + self.discussion.slug + ">"
class Synthesis(ExplicitSubGraphView):
"""
A synthesis of the discussion. A selection of ideas, associated with
comments, sent periodically to the discussion.
A synthesis only has link's to ideas before publication (as it is edited)
Once published, if freezes the links by copying tombstoned versions of
each link in the discussion.
"""
__tablename__ = "synthesis"
id = Column(Integer, ForeignKey(
'explicit_sub_graph_view.id',
ondelete='CASCADE',
onupdate='CASCADE'
), primary_key=True)
subject = Column(UnicodeText)
introduction = Column(UnicodeText)
conclusion = Column(UnicodeText)
__mapper_args__ = {
'polymorphic_identity': 'synthesis',
}
def copy(self):
retval = ExplicitSubGraphView.copy(self)
retval.subject = self.subject
retval.introduction = self.introduction
retval.conclusion = self.conclusion
return retval
def publish(self):
""" Publication is the end of a synthesis's lifecycle.
It creates and returns a frozen copy of its state
using tombstones for ideas and links."""
now = datetime.utcnow()
frozen_synthesis = self.copy()
self.db.add(frozen_synthesis)
self.db.flush()
# Copy tombstoned versions of all idea links and relevant ideas in the current synthesis
links = Idea.get_all_idea_links(self.discussion_id)
synthesis_idea_ids = {idea.id for idea in self.ideas}
# Do not copy the root
root = self.discussion.root_idea
idea_copies = {root.id: root}
# Also copies ideas between two synthesis ideas
relevant_idea_ids = synthesis_idea_ids.copy()
def add_ancestors_between(idea, path=None):
if isinstance(idea, RootIdea):
return
path = path[:] if path else []
if idea.id in synthesis_idea_ids:
relevant_idea_ids.update({i.id for i in path})
else:
path.append(idea)
for parent in idea.parents:
add_ancestors_between(parent, path)
for idea in self.ideas:
for parent in idea.parents:
add_ancestors_between(parent)
for link in links:
new_link = link.copy(tombstone=now)
frozen_synthesis.idea_links.append(new_link)
if link.source_id in relevant_idea_ids:
if link.source_id not in idea_copies:
new_idea = link.source_ts.copy(tombstone=now)
idea_copies[link.source_id] = new_idea
if link.source_id in synthesis_idea_ids:
frozen_synthesis.ideas.append(new_idea)
new_link.source_ts = idea_copies[link.source_id]
if link.target_id in relevant_idea_ids:
if link.target_id not in idea_copies:
new_idea = link.target_ts.copy(tombstone=now)
idea_copies[link.target_id] = new_idea
if link.target_id in synthesis_idea_ids:
frozen_synthesis.ideas.append(new_idea)
new_link.target_ts = idea_copies[link.target_id]
return frozen_synthesis
def as_html(self, jinja_env):
v = SynthesisHtmlizationVisitor(self, jinja_env)
self.visit_ideas_depth_first(v)
return v.as_html()
def get_idea_links(self):
if self.is_next_synthesis:
return Idea.get_all_idea_links(self.discussion_id)
else:
return super(Synthesis, self).get_idea_links()
def get_idealink_assocs(self):
if self.is_next_synthesis:
return [
SubGraphIdeaLinkAssociation(
idea_link=link)
for link in Idea.get_all_idea_links(self.discussion_id)]
else:
return super(Synthesis, self).get_idealink_assocs()
@property
def is_next_synthesis(self):
return self.discussion.get_next_synthesis() == self
def get_discussion_id(self):
return self.discussion_id
@classmethod
def get_discussion_conditions(cls, discussion_id, alias_maker=None):
return (cls.discussion_id == discussion_id,)
@as_native_str()
def __repr__(self):
r = super(Synthesis, self).__repr__()
subject = self.subject or ""
return r[:-1] + subject + ">"
crud_permissions = CrudPermissions(P_EDIT_SYNTHESIS)
class SynthesisHtmlizationVisitor(IdeaVisitor):
def __init__(self, graph_view, jinja_env):
self.jinja_env = jinja_env
self.idea_template = jinja_env.get_template('idea_in_synthesis.jinja2')
self.synthesis_template = jinja_env.get_template('synthesis.jinja2')
self.graph_view = graph_view
def visit_idea(self, idea_assoc, level, prev_result, parent_link_assoc):
return True
def end_visit(self, idea_assoc, level, prev_result, child_results, parent_link_assoc):
if prev_result is not True:
idea_assoc = None
if idea_assoc or child_results:
results = [r for (c, r) in child_results]
idea = idea_assoc.idea if idea_assoc else None
self.result = self.idea_template.render(
idea=idea, children=results, level=level,
idea_assoc=idea_assoc, parent_link_assoc=parent_link_assoc)
return self.result
def as_html(self):
return self.synthesis_template.render(
synthesis=self.graph_view, content=self.result) | PypiClean |
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flashblade/FB_2_7/models/directory_service_role.py | import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_7 import models
class DirectoryServiceRole(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'group': 'str',
'group_base': 'str',
'id': 'str',
'role': 'Reference'
}
attribute_map = {
'group': 'group',
'group_base': 'group_base',
'id': 'id',
'role': 'role'
}
required_args = {
}
def __init__(
self,
group=None, # type: str
group_base=None, # type: str
id=None, # type: str
role=None, # type: models.Reference
):
"""
Keyword args:
group (str): Common Name (CN) of the directory service group containing users with authority level of the specified role name.
group_base (str): Specifies where the configured group is located in the directory tree.
id (str): A non-modifiable, globally unique ID chosen by the system.
role (Reference): A reference to the role; can be any role that exists on the system.
"""
if group is not None:
self.group = group
if group_base is not None:
self.group_base = group_base
if id is not None:
self.id = id
if role is not None:
self.role = role
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `DirectoryServiceRole`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DirectoryServiceRole, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DirectoryServiceRole):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | PypiClean |
/comt-2.6.4.tar.gz/comt-2.6.4/src/cm/media/js/lib/yui/yui3-3.15.0/src/datatable/js/message.js | var Message;
/**
_API docs for this extension are included in the DataTable class._
Adds support for a message container to appear in the table. This can be used
to indicate loading progress, lack of records, or any other communication
needed.
Features added to `Y.DataTable`, and made available for custom classes at
`Y.DataTable.Message`.
@class DataTable.Message
@for DataTable
@since 3.5.0
**/
Y.namespace('DataTable').Message = Message = function () {};
Message.ATTRS = {
/**
Enables the display of messages in the table. Setting this to false will
prevent the message Node from being created and `showMessage` from doing
anything.
@attribute showMessages
@type {Boolean}
@default true
@since 3.5.0
**/
showMessages: {
value: true,
validator: Y.Lang.isBoolean
}
};
Y.mix(Message.prototype, {
/**
Template used to generate the node that will be used to report messages.
@property MESSAGE_TEMPLATE
@type {String}
@default <tbody class="{className}"><td class="{contentClass}" colspan="{colspan}"></td></tbody>
@since 3.5.0
**/
MESSAGE_TEMPLATE: '<tbody class="{className}"><tr><td class="{contentClass}" colspan="{colspan}"></td></tr></tbody>',
/**
Hides the message node.
@method hideMessage
@return {DataTable}
@chainable
@since 3.5.0
**/
hideMessage: function () {
this.get('boundingBox').removeClass(
this.getClassName('message', 'visible'));
return this;
},
/**
Display the message node and set its content to `message`. If there is a
localized `strings` entry for the value of `message`, that string will be
used.
@method showMessage
@param {String} message The message name or message itself to display
@return {DataTable}
@chainable
@since 3.5.0
**/
showMessage: function (message) {
var content = this.getString(message) || message;
if (!this._messageNode) {
this._initMessageNode();
}
if (this.get('showMessages')) {
if (content) {
this._messageNode.one(
'.' + this.getClassName('message', 'content'))
.setHTML(content);
this.get('boundingBox').addClass(
this.getClassName('message','visible'));
} else {
// TODO: is this right?
// If no message provided, remove the message node.
this.hideMessage();
}
}
return this;
},
//--------------------------------------------------------------------------
// Protected methods
//--------------------------------------------------------------------------
/**
Updates the colspan of the `<td>` used to display the messages.
@method _afterMessageColumnsChange
@param {EventFacade} e The columnsChange event
@protected
@since 3.5.0
**/
_afterMessageColumnsChange: function () {
var contentNode;
if (this._messageNode) {
contentNode = this._messageNode.one(
'.' + this.getClassName('message', 'content'));
if (contentNode) {
// FIXME: This needs to become a class extension plus a view or
// plugin for the table view.
contentNode.set('colSpan', this._displayColumns.length);
}
}
},
/**
Relays to `_uiSetMessage` to hide or show the message node.
@method _afterMessageDataChange
@param {EventFacade} e The dataChange event
@protected
@since 3.5.0
**/
_afterMessageDataChange: function () {
this._uiSetMessage();
},
/**
Removes the message node if `showMessages` is `false`, or relays to
`_uiSetMessage` if `true`.
@method _afterShowMessagesChange
@param {EventFacade} e The showMessagesChange event
@protected
@since 3.5.0
**/
_afterShowMessagesChange: function (e) {
if (e.newVal) {
this._uiSetMessage(e);
} else if (this._messageNode) {
this.get('boundingBox').removeClass(
this.getClassName('message', 'visible'));
this._messageNode.remove().destroy(true);
this._messageNode = null;
}
},
/**
Binds the events necessary to keep the message node in sync with the current
table and configuration state.
@method _bindMessageUI
@protected
@since 3.5.0
**/
_bindMessageUI: function () {
this.after(['dataChange', '*:add', '*:remove', '*:reset'],
Y.bind('_afterMessageDataChange', this));
this.after('columnsChange', Y.bind('_afterMessageColumnsChange', this));
this.after('showMessagesChange',
Y.bind('_afterShowMessagesChange', this));
},
/**
Merges in the message related strings and hooks into the rendering cycle to
also render and bind the message node.
@method initializer
@protected
@since 3.5.0
**/
initializer: function () {
this._initMessageStrings();
if (this.get('showMessages')) {
this.after('table:renderBody', Y.bind('_initMessageNode', this));
}
this.after(Y.bind('_bindMessageUI', this), this, 'bindUI');
this.after(Y.bind('_syncMessageUI', this), this, 'syncUI');
},
/**
Creates the `_messageNode` property from the configured `MESSAGE_TEMPLATE`
and inserts it before the `<table>`'s `<tbody>` node.
@method _initMessageNode
@protected
@since 3.5.0
**/
_initMessageNode: function () {
if (!this._messageNode) {
this._messageNode = Y.Node.create(
Y.Lang.sub(this.MESSAGE_TEMPLATE, {
className: this.getClassName('message'),
contentClass: this.getClassName('message', 'content'),
colspan: this._displayColumns.length || 1
}));
this._tableNode.insertBefore(this._messageNode, this._tbodyNode);
}
},
/**
Add the messaging related strings to the `strings` map.
@method _initMessageStrings
@protected
@since 3.5.0
**/
_initMessageStrings: function () {
// Not a valueFn because other class extensions will want to add to it
this.set('strings', Y.mix((this.get('strings') || {}),
Y.Intl.get('datatable-message')));
},
/**
Node used to display messages from `showMessage`.
@property _messageNode
@type {Node}
@value `undefined` (not initially set)
@since 3.5.0
**/
//_messageNode: null,
/**
Synchronizes the message UI with the table state.
@method _syncMessageUI
@protected
@since 3.5.0
**/
_syncMessageUI: function () {
this._uiSetMessage();
},
/**
Calls `hideMessage` or `showMessage` as appropriate based on the presence of
records in the `data` ModelList.
This is called when `data` is reset or records are added or removed. Also,
if the `showMessages` attribute is updated. In either case, if the
triggering event has a `message` property on the EventFacade, it will be
passed to `showMessage` (if appropriate). If no such property is on the
facade, the `emptyMessage` will be used (see the strings).
@method _uiSetMessage
@param {EventFacade} e The columnsChange event
@protected
@since 3.5.0
**/
_uiSetMessage: function (e) {
if (!this.data.size()) {
this.showMessage((e && e.message) || 'emptyMessage');
} else {
this.hideMessage();
}
}
});
if (Y.Lang.isFunction(Y.DataTable)) {
Y.Base.mix(Y.DataTable, [ Message ]);
} | PypiClean |
/jupyter-server-proxy-noe-1.1.tar.gz/jupyter-server-proxy-noe-1.1/README.rst | ====================
Jupyter Server Proxy test
====================
|ReadTheDocs badge| |Travis badge| |PyPI badge| |Conda badge| |NPM badge|
.. |ReadTheDocs badge| image:: https://img.shields.io/readthedocs/jupyter-server-proxy?logo=read-the-docs
:target: https://jupyter-server-proxy.readthedocs.io/
.. |Travis badge| image:: https://img.shields.io/travis/jupyterhub/jupyter-server-proxy/master.svg?logo=travis
:target: https://travis-ci.org/jupyterhub/jupyter-server-proxy
.. |PyPI badge| image:: https://img.shields.io/pypi/v/jupyter-server-proxy.svg?logo=pypi
:target: https://pypi.python.org/pypi/jupyter-server-proxy
.. |Conda badge| image:: https://img.shields.io/conda/vn/conda-forge/jupyter-server-proxy?logo=conda-forge
:target: https://anaconda.org/conda-forge/jupyter-server-proxy
.. |NPM badge| image:: https://img.shields.io/npm/v/@jupyterlab/server-proxy.svg?logo=npm
:target: https://www.npmjs.com/package/@jupyterlab/server-proxy
Jupyter Server Proxy lets you run arbitrary external processes (such
as RStudio, Shiny Server, Syncthing, PostgreSQL, Code Server, etc)
alongside your notebook server and provide authenticated web access to
them using a path like ``/rstudio`` next to others like ``/lab``.
Alongside the python package that provides the main functionality, the
JupyterLab extension (``@jupyterlab/server-proxy``) provides buttons
in the JupyterLab launcher window to get to RStudio for example.
**Note:** This project used to be called **nbserverproxy**. As
nbserverproxy is an older version of jupyter-server-proxy, uninstall
nbserverproxy before installing jupyter-server-proxy to avoid
conflicts.
The primary use cases are:
#. Use with JupyterHub / Binder to allow launching users into web
interfaces that have nothing to do with Jupyter - such as RStudio,
Shiny, or OpenRefine.
#. Allow access from frontend javascript (in classic notebook or
JupyterLab extensions) to access web APIs of other processes
running locally in a safe manner. This is used by the `JupyterLab
extension <https://github.com/dask/dask-labextension>`_ for
`dask <https://dask.org/>`_.
`The documentation <https://jupyter-server-proxy.readthedocs.io/>`_
contains information on installation & usage.
Install
=======
Python package
--------------
pip
^^^
.. code-block::
pip install jupyter-server-proxy
conda
^^^^^
.. code-block::
conda install jupyter-server-proxy -c conda-forge
JupyterLab extension
--------------------
Note that as the JupyterLab extension only is a graphical interface to
launch registered applications in the python package, the extension
requires the python package to be installed.
.. code-block::
jupyter labextension install @jupyterlab/server-proxy
Contributing
============
Python package
--------------
.. code-block::
pip install -e .
# explicit install needed with editable mode (-e) jupyter
serverextension enable --sys-prefix jupyter_server_proxy
JupyterLab extension
--------------------
The ``jlpm`` command is JupyterLab's pinned version of ``yarn`` that
is installed with JupyterLab. You may use ``yarn`` or ``npm`` instead
of ``jlpm`` below.
.. code-block::
cd jupyterlab-server-proxy
# Install dependencies
jlpm
# Build Typescript source
jlpm build
# Link your development version of the extension with JupyterLab
jupyter labextension link .
# Rebuild Typescript source after making changes
jlpm build
# Rebuild JupyterLab after making any changes
jupyter lab build
You can watch the source directory and run JupyterLab in watch mode to
watch for changes in the extension's source and automatically rebuild
the extension and application.
.. code-block::
# Watch the source directory in another terminal tab
jlpm watch
# Run jupyterlab in watch mode in one terminal tab
jupyter lab --watch
| PypiClean |
/svg.py-1.4.2.tar.gz/svg.py-1.4.2/reflect/_mdn_attr.py | from __future__ import annotations
import re
from dataclasses import dataclass
from pathlib import Path
import yaml
try:
from functools import cached_property
except ImportError:
cached_property = property # type: ignore
REX_EL = re.compile(r'li>\{\{\s*SVGElement\(["\']([a-zA-Z-]+)["\']\)\s*\}\}\.?</')
DEPRECATED = [
# attributes of <font-face> which is deprecated
'underline-thickness',
'underline-position',
'overline-thickness',
'overline-position',
'strikethrough-thickness',
'strikethrough-position',
# not supported by any browser
'crossorigin',
'systemLanguage',
]
@dataclass
class MDNAttr:
title: str
slug: str
tags: list[str]
content: str
spec_urls: list[str]
@classmethod
def parse(cls, path: Path) -> MDNAttr | None:
path /= 'index.md'
if not path.is_file():
return None
raw = path.read_text()
first, _, second = raw.partition('\n---\n')
fields: dict = next(yaml.load_all(first, Loader=yaml.SafeLoader))
fields.pop('browser-compat', None)
spec_urls = fields.pop('spec-urls', [])
return cls(**fields, content=second, spec_urls=spec_urls)
@classmethod
def parse_all(cls, path: Path) -> list['MDNAttr']:
result = []
for subpath in (path / 'attribute').iterdir():
attr = cls.parse(subpath)
if attr is not None:
result.append(attr)
return result
@cached_property
def is_deprecated(self) -> bool:
if self.title in DEPRECATED:
return True
if 'Deprecated' in self.tags:
return True
if '<div>{{SVGRef}}{{Deprecated_Header}}</div>' in self.content:
return True
if '<div>{{SVGRef}}{{deprecated_header}}</div>' in self.content:
return True
if '<div>{{deprecated_header}}</div>' in self.content:
return True
return False
@cached_property
def elements(self) -> set[str]:
sep = 'You can use this attribute with the following SVG elements:'
_, _, txt = self.content.partition(sep)
txt, _, _ = self.content.partition('</ul>')
txt, _, _ = self.content.partition('See also')
result = set()
for match in REX_EL.finditer(txt):
result.add(match.group(1))
return result | PypiClean |
/mewpy-0.1.31.tar.gz/mewpy-0.1.31/docs/mewpy.optimization.jmetal.rst | mewpy.optimization.jmetal package
=================================
Submodules
----------
mewpy.optimization.jmetal.ea module
-----------------------------------
.. automodule:: mewpy.optimization.jmetal.ea
:members:
:undoc-members:
:show-inheritance:
mewpy.optimization.jmetal.observers module
------------------------------------------
.. automodule:: mewpy.optimization.jmetal.observers
:members:
:undoc-members:
:show-inheritance:
mewpy.optimization.jmetal.operators module
------------------------------------------
.. automodule:: mewpy.optimization.jmetal.operators
:members:
:undoc-members:
:show-inheritance:
mewpy.optimization.jmetal.problem module
----------------------------------------
.. automodule:: mewpy.optimization.jmetal.problem
:members:
:undoc-members:
:show-inheritance:
Module contents
---------------
.. automodule:: mewpy.optimization.jmetal
:members:
:undoc-members:
:show-inheritance:
| PypiClean |
/pyved-engine-23.8a5.tar.gz/pyved-engine-23.8a5/src/pyved_engine/add_ons/tmx/pytiled_parser/layer.py |
from pathlib import Path
from typing import List, Optional, Union
# import attr
from .common_types import Color, OrderedPair, Size
from .properties import Properties
from .tiled_object import TiledObject
class Layer:
"""Base class that all layer types inherit from. Includes common attributes between
the various types of layers. This class will never be returned directly by the parser.
It will always return one of the full layer types.
`TMX Reference <https://doc.mapeditor.org/en/stable/reference/tmx-map-format/#layer>`_
`JSON Reference <https://doc.mapeditor.org/en/stable/reference/json-map-format/#layer>`_
Attributes:
name: The name of the layer object.
opacity: Decimal value between 0 and 1 to determine opacity. 1 is completely
opaque, 0 is completely transparent. Defaults to 1.
visible: If the layer is visible in the Tiled Editor. Defaults to True
coordinates: Where layer content starts in tiles. Only used by infinite maps.
Defaults to (0, 0).
parallax_factor: Used to determine parallaxing speed of a layer. Defaults to (1, 1).
offset: Rendering offset of the layer object in pixels. Defaults to (0, 0).
id: Unique ID of the layer. Each layer that is added to a map gets a unique id.
Even if a layer is deleted, no layer ever gets the same ID.
size: Ordered pair of size of map in tiles.
properties: Properties for the layer.
tint_color: Tint color that is multiplied with any graphics in this layer.
class_: The Tiled class of this Layer.
repeat_x: Repeat drawing on the X Axis(Currently only applies to image layers)
repeat_y: Repeat drawing on the Y Axis(Currently only applies to image layers)
"""
def __init__(self, name='', opacity=1, visible=True, repeat_x=False, repeat_y=False, coordinates=[0,0], id=0, parallax_factor=[1,1], size=[0,0]):
self.name = name
self.opacity = opacity
self.visible = visible
# These technically only apply to image layers as of now, however Tiled has indicated
# that is only at this time, and there's no reason they couldn't apply to other
# types of layers in the future. For this reason they are stored in the common class.
self.repeat_x=repeat_x
self.repeat_y = repeat_y
self.coordinates = coordinates
# TODO finish
self.parallax_factor=parallax_factor
# offset: OrderedPair = OrderedPair(0, 0)
self.id = id
# class_: Optional[str] = None
self.size = size
# properties: Optional[Properties] = None
# tint_color: Optional[Color] = None
TileLayerGrid = List[List[int]]
class Chunk:
"""Chunk object for infinite maps. Stores `data` like you would have in a normal
TileLayer but only for the area specified by `coordinates` and `size`.
`Infinite Maps Docs <https://doc.mapeditor.org/en/stable/manual/using-infinite-maps/>`_
`TMX Reference <https://doc.mapeditor.org/en/stable/reference/tmx-map-format/#chunk>`_
`JSON Reference <https://doc.mapeditor.org/en/stable/reference/json-map-format/#chunk>`_
Attributes:
coordinates: Location of chunk in tiles.
size: The size of the chunk in tiles.
data: The global tile IDs in the chunk. A row-first two dimensional array.
"""
coordinates: OrderedPair
size: Size
data: List[List[int]]
# The tile data for one layer.
#
# Either a 2 dimensional array of integers representing the global tile IDs
# for a TileLayerGrid, or a list of chunks for an infinite map layer.
LayerData = Union[TileLayerGrid, List[Chunk]]
class TileLayer(Layer):
"""The base type of layer which stores tile data for an area of a map.
`Tiled Docs <https://doc.mapeditor.org/en/stable/manual/layers/#tile-layers>`_
`TMX Reference <https://doc.mapeditor.org/en/stable/reference/tmx-map-format/#layer>`_
`JSON Reference <https://doc.mapeditor.org/en/stable/reference/json-map-format/#tile-layer-example>`_
Attributes:
chunks: List of chunks (only populated for infinite maps)
data: A two dimensional array of integers representing the global
tile IDs for the layer (only populaed for non-infinite maps)
"""
chunks: Optional[List[Chunk]] = None
data: Optional[List[List[int]]] = None
class ObjectLayer(Layer):
"""A Layer type which stores a list of Tiled Objects
`Tiled Docs <https://doc.mapeditor.org/en/stable/manual/layers/#object-layers>`_
`TMX Reference <https://doc.mapeditor.org/en/stable/reference/tmx-map-format/#objectgroup>`_
`JSON Reference <https://doc.mapeditor.org/en/stable/reference/json-map-format/#object-layer-example>`_
Attributes:
tiled_objects: List of tiled_objects in the layer.
draworder: Whether the objects are drawn according to the order of the object
elements in the object group element ('manual'), or sorted by their
y-coordinate ('topdown'). Defaults to 'topdown'. See:
https://doc.mapeditor.org/en/stable/manual/objects/#changing-stacking-order
for more info.
"""
def __init__(self,tiled_objects=None,draw_order='topdown', **kwargs):
self.tiled_objects=list()
if tiled_objects:
self.tiled_objects=tiled_objects
self.draw_order= draw_order
super().__init__(**kwargs)
class ImageLayer(Layer):
"""A layer type which stores a single image
`Tiled Docs <https://doc.mapeditor.org/en/stable/manual/layers/#image-layers>`_
`TMX Reference <https://doc.mapeditor.org/en/stable/reference/tmx-map-format/#imagelayer>`_
`JSON Reference <https://doc.mapeditor.org/en/stable/reference/json-map-format/#layer>`_
Attributes:
image: The image used by this layer.
transparent_color: Color that is to be made transparent on this layer.
"""
image: Path
transparent_color: Optional[Color] = None
class LayerGroup(Layer):
"""A layer that contains layers (potentially including other LayerGroups, nested infinitely).
In Tiled, offset and opacity recursively affect child layers, however that is not enforced during
parsing by pytiled_parser, and is up to the implementation how to handle recursive effects of
LayerGroups
`Tiled Docs <https://doc.mapeditor.org/en/stable/manual/layers/#group-layers>`_
`TMX Reference <https://doc.mapeditor.org/en/stable/reference/tmx-map-format/#group>`_
`JSON Reference <https://doc.mapeditor.org/en/stable/reference/json-map-format/#layer>`_
Attributes:
layers: list of layers contained in the group.
"""
layers: Optional[List[Layer]] | PypiClean |
/mis_modulos-0.1.tar.gz/mis_modulos-0.1/tensorflow/python/keras/callbacks_v1.py | """Callbacks: utilities called at certain points during model training."""
import os
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler import profiler_v2 as profiler
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.training import saver
from tensorflow.python.util.tf_export import keras_export
@keras_export(v1=['keras.callbacks.TensorBoard'])
class TensorBoard(callbacks.TensorBoard):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```sh
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Args:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
batch_size: size of batch of inputs to feed to the network for histograms
computation.
write_images: whether to write model weights to visualize as image in
TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding layers
will be saved. If set to 0, embeddings won't be computed. Data to be
visualized in TensorBoard's Embedding tab must be passed as
`embeddings_data`.
embeddings_layer_names: a list of names of layers to keep eye on. If None
or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved.
[Here are details](
https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
embeddings_data: data to be embedded at layers specified in
`embeddings_layer_names`. Numpy array (if the model has a single input)
or list of Numpy arrays (if the model has multiple inputs). Learn more
about embeddings [in this guide](
https://www.tensorflow.org/programmers_guide/embedding).
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
samples. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch to sample compute characteristics. By
default, it will profile the second batch. Set profile_batch=0 to
disable profiling.
Raises:
ValueError: If histogram_freq is set and no validation data is provided.
@compatibility(eager)
Using the `TensorBoard` callback will work when eager execution is enabled,
with the restriction that outputting histogram summaries of weights and
gradients is not supported. Consequently, `histogram_freq` will be ignored.
@end_compatibility
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='./logs',
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None,
update_freq='epoch',
profile_batch=2):
# Don't call super's init since it is an eager-only version.
callbacks.Callback.__init__(self)
self.log_dir = log_dir
self.histogram_freq = histogram_freq
if self.histogram_freq and context.executing_eagerly():
logging.warning(
UserWarning('Weight and gradient histograms not supported for eager'
'execution, setting `histogram_freq` to `0`.'))
self.histogram_freq = 0
self.merged = None
self.write_graph = write_graph
self.write_grads = write_grads
self.write_images = write_images
self.batch_size = batch_size
self._current_batch = 0
self._total_batches_seen = 0
self._total_val_batches_seen = 0
self.embeddings_freq = embeddings_freq
self.embeddings_layer_names = embeddings_layer_names
self.embeddings_metadata = embeddings_metadata
self.embeddings_data = embeddings_data
if update_freq == 'batch':
self.update_freq = 1
else:
self.update_freq = update_freq
self._samples_seen = 0
self._samples_seen_at_last_write = 0
# TODO(fishx): Add a link to the full profiler tutorial.
self._profile_batch = profile_batch
# True when the profiler was successfully started by this callback.
# We track the status here to make sure callbacks do not interfere with
# each other. The callback will only stop the profiler it started.
self._profiler_started = False
# TensorBoard should only write summaries on the chief when in a
# Multi-Worker setting.
self._chief_worker_only = True
def _init_writer(self, model):
"""Sets file writer."""
if context.executing_eagerly():
self.writer = summary_ops_v2.create_file_writer_v2(self.log_dir)
if not model.run_eagerly and self.write_graph:
with self.writer.as_default():
summary_ops_v2.graph(K.get_graph())
elif self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, K.get_graph())
else:
self.writer = tf_summary.FileWriter(self.log_dir)
def _make_histogram_ops(self, model):
"""Defines histogram ops when histogram_freq > 0."""
# only make histogram summary op if it hasn't already been made
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf_summary.histogram(mapped_weight_name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(mapped_weight_name, w_img)
if self.write_grads:
for weight in layer.trainable_weights:
mapped_weight_name = weight.name.replace(':', '_')
grads = model.optimizer.get_gradients(model.total_loss, weight)
def is_indexed_slices(grad):
return type(grad).__name__ == 'IndexedSlices'
grads = [
grad.values if is_indexed_slices(grad) else grad
for grad in grads
]
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if hasattr(layer, 'output'):
if isinstance(layer.output, list):
for i, output in enumerate(layer.output):
tf_summary.histogram('{}_out_{}'.format(layer.name, i), output)
else:
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
def set_model(self, model):
"""Sets Keras model and creates summary ops."""
self.model = model
self._init_writer(model)
# histogram summaries only enabled in graph mode
if not context.executing_eagerly():
self._make_histogram_ops(model)
self.merged = tf_summary.merge_all()
# If both embedding_freq and embeddings_data are available, we will
# visualize embeddings.
if self.embeddings_freq and self.embeddings_data is not None:
# Avoid circular dependency.
from tensorflow.python.keras.engine import training_utils_v1 # pylint: disable=g-import-not-at-top
self.embeddings_data = training_utils_v1.standardize_input_data(
self.embeddings_data, model.input_names)
# If embedding_layer_names are not provided, get all of the embedding
# layers from the model.
embeddings_layer_names = self.embeddings_layer_names
if not embeddings_layer_names:
embeddings_layer_names = [
layer.name
for layer in self.model.layers
if type(layer).__name__ == 'Embedding'
]
self.assign_embeddings = []
embeddings_vars = {}
self.batch_id = batch_id = array_ops.placeholder(dtypes.int32)
self.step = step = array_ops.placeholder(dtypes.int32)
for layer in self.model.layers:
if layer.name in embeddings_layer_names:
embedding_input = self.model.get_layer(layer.name).output
embedding_size = np.prod(embedding_input.shape[1:])
embedding_input = array_ops.reshape(embedding_input,
(step, int(embedding_size)))
shape = (self.embeddings_data[0].shape[0], int(embedding_size))
embedding = variables.Variable(
array_ops.zeros(shape), name=layer.name + '_embedding')
embeddings_vars[layer.name] = embedding
batch = state_ops.assign(embedding[batch_id:batch_id + step],
embedding_input)
self.assign_embeddings.append(batch)
self.saver = saver.Saver(list(embeddings_vars.values()))
# Create embeddings_metadata dictionary
if isinstance(self.embeddings_metadata, str):
embeddings_metadata = {
layer_name: self.embeddings_metadata
for layer_name in embeddings_vars.keys()
}
else:
# If embedding_metadata is already a dictionary
embeddings_metadata = self.embeddings_metadata
try:
from tensorboard.plugins import projector
except ImportError:
raise ImportError('Failed to import TensorBoard. Please make sure that '
'TensorBoard integration is complete."')
# TODO(psv): Add integration tests to test embedding visualization
# with TensorBoard callback. We are unable to write a unit test for this
# because TensorBoard dependency assumes TensorFlow package is installed.
config = projector.ProjectorConfig()
for layer_name, tensor in embeddings_vars.items():
embedding = config.embeddings.add()
embedding.tensor_name = tensor.name
if (embeddings_metadata is not None and
layer_name in embeddings_metadata):
embedding.metadata_path = embeddings_metadata[layer_name]
projector.visualize_embeddings(self.writer, config)
def _fetch_callback(self, summary):
self.writer.add_summary(summary, self._total_val_batches_seen)
self._total_val_batches_seen += 1
def _write_custom_summaries(self, step, logs=None):
"""Writes metrics out as custom scalar summaries.
Args:
step: the global step to use for TensorBoard.
logs: dict. Keys are scalar summary names, values are
NumPy scalars.
"""
logs = logs or {}
if context.executing_eagerly():
# use v2 summary ops
with self.writer.as_default(), summary_ops_v2.record_if(True):
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary_ops_v2.scalar(name, value, step=step)
else:
# use FileWriter from v1 summary
for name, value in logs.items():
if isinstance(value, np.ndarray):
value = value.item()
summary = tf_summary.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
self.writer.add_summary(summary, step)
self.writer.flush()
def on_train_batch_begin(self, batch, logs=None):
if self._total_batches_seen == self._profile_batch - 1:
self._start_profiler()
def on_train_batch_end(self, batch, logs=None):
return self.on_batch_end(batch, logs)
def on_test_begin(self, logs=None):
pass
def on_test_end(self, logs=None):
pass
def on_batch_end(self, batch, logs=None):
"""Writes scalar summaries for metrics on every training batch.
Performs profiling if current batch is in profiler_batches.
"""
# Don't output batch_size and batch number as TensorBoard summaries
logs = logs or {}
self._samples_seen += logs.get('size', 1)
samples_seen_since = self._samples_seen - self._samples_seen_at_last_write
if self.update_freq != 'epoch' and samples_seen_since >= self.update_freq:
batch_logs = {('batch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
self._write_custom_summaries(self._total_batches_seen, batch_logs)
self._samples_seen_at_last_write = self._samples_seen
self._total_batches_seen += 1
self._stop_profiler()
def on_train_begin(self, logs=None):
pass
def on_epoch_begin(self, epoch, logs=None):
"""Add histogram op to Model eval_function callbacks, reset batch count."""
# check if histogram summary should be run for this epoch
if self.histogram_freq and epoch % self.histogram_freq == 0:
# pylint: disable=protected-access
# add the histogram summary op if it should run this epoch
self.model._make_test_function()
if self.merged not in self.model.test_function.fetches:
self.model.test_function.fetches.append(self.merged)
self.model.test_function.fetch_callbacks[
self.merged] = self._fetch_callback
# pylint: enable=protected-access
def on_epoch_end(self, epoch, logs=None):
"""Checks if summary ops should run next epoch, logs scalar summaries."""
# don't output batch_size and
# batch number as TensorBoard summaries
logs = {('epoch_' + k): v
for k, v in logs.items()
if k not in ['batch', 'size', 'num_steps']}
if self.update_freq == 'epoch':
step = epoch
else:
step = self._samples_seen
self._write_custom_summaries(step, logs)
# pop the histogram summary op after each epoch
if self.histogram_freq:
# pylint: disable=protected-access
if self.merged in self.model.test_function.fetches:
self.model.test_function.fetches.remove(self.merged)
if self.merged in self.model.test_function.fetch_callbacks:
self.model.test_function.fetch_callbacks.pop(self.merged)
# pylint: enable=protected-access
if self.embeddings_data is None and self.embeddings_freq:
raise ValueError('To visualize embeddings, embeddings_data must '
'be provided.')
if self.embeddings_freq and self.embeddings_data is not None:
if epoch % self.embeddings_freq == 0:
# We need a second forward-pass here because we're passing
# the `embeddings_data` explicitly. This design allows to pass
# arbitrary data as `embeddings_data` and results from the fact
# that we need to know the size of the `tf.Variable`s which
# hold the embeddings in `set_model`. At this point, however,
# the `validation_data` is not yet set.
embeddings_data = self.embeddings_data
n_samples = embeddings_data[0].shape[0]
i = 0
sess = K.get_session()
while i < n_samples:
step = min(self.batch_size, n_samples - i)
batch = slice(i, i + step)
if isinstance(self.model.input, list):
feed_dict = {
model_input: embeddings_data[idx][batch]
for idx, model_input in enumerate(self.model.input)
}
else:
feed_dict = {self.model.input: embeddings_data[0][batch]}
feed_dict.update({self.batch_id: i, self.step: step})
if not isinstance(K.learning_phase(), int):
feed_dict[K.learning_phase()] = False
sess.run(self.assign_embeddings, feed_dict=feed_dict)
self.saver.save(sess,
os.path.join(self.log_dir, 'keras_embedding.ckpt'),
epoch)
i += self.batch_size
def on_train_end(self, logs=None):
self._stop_profiler()
self.writer.close()
def _start_profiler(self):
"""Starts the profiler if currently inactive."""
if self._profiler_started:
return
try:
profiler.start(logdir=self.log_dir)
self._profiler_started = True
except errors.AlreadyExistsError as e:
# Profiler errors should not be fatal.
logging.error('Failed to start profiler: %s', e.message)
def _stop_profiler(self):
"""Stops the profiler if currently active."""
if not self._profiler_started:
return
try:
profiler.stop()
except errors.UnavailableError as e:
# Profiler errors should not be fatal.
logging.error('Failed to stop profiler: %s', e.message)
finally:
self._profiler_started = False | PypiClean |
/test_stockpyl-0.0.9-py3-none-any.whl/stockpyl/instances.py | import csv
import datetime
import json
# import copy
import os
import warnings
from copy import deepcopy
import jsonpickle
from stockpyl.supply_chain_network import *
from stockpyl.supply_chain_node import *
#: Default path to JSON file containing built-in instances. Relative to 'src' directory.
DEFAULT_JSON_FILEPATH = "../datasets/stockpyl_instances.json"
def load_instance(
instance_name,
filepath=DEFAULT_JSON_FILEPATH,
initialize_missing_attributes=True,
ignore_state_vars=True,
):
"""Load an instance from a JSON file.
If the instance was originally specified as a |class_network| object, returns the
object; otherwise, returns the instance in a dictionary.
Parameters
----------
instance_name : str
The name of the instance.
filepath : str, optional
Path to the JSON file. If ``None``, ``DEFAULT_JSON_FILEPATH`` is used.
initialize_missing_attributes : bool, optional
If ``True``, function will ensure that all attributes are present in the instance loaded,
initializing any missing attributes to their default values. (Typically this is only set
to ``False`` for debugging purposes.)
ignore_state_vars : bool, optional
If ``True``, function will ignore any saved state variables in the nodes.
Returns
-------
dict or |class_network|
The loaded instance. If the instance was originally specified as a |class_network|
object, returns the object; otherwise, returns the instance in a dictionary in which
the keys equal the parameter names (e.g., "holding_cost") and the values equal the parameter
values (e.g., 0.5).
Raises
------
ValueError
If the JSON file does not exist or the instance cannot be found in the JSON file.
"""
# Does JSON file exist?
if os.path.exists(filepath):
# Use this path.
new_path = filepath
else:
# Try changing working directory to project root (stockpyl/). This is mainly a workaround for
# when this function is called from doctests.
one_level_up = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
new_path = os.path.join(one_level_up, filepath)
if not os.path.exists(new_path):
raise FileNotFoundError(
f"The JSON file {os.path.abspath(filepath)} was not found"
)
# Load data from JSON.
with open(new_path) as f:
json_contents = json.load(f)
# Look for instance. (https://stackoverflow.com/a/8653568/3453768)
instance_index = next(
(
i
for i, item in enumerate(json_contents["instances"])
if item["name"] == instance_name
),
None,
)
# Was instance found?
if instance_index is None:
raise KeyError("The speficied instance name was not found")
# Get instance (in case it was jsonpickled).
instance = json_contents["instances"][instance_index]["data"]
# Try to decode instance using jsonpickle. This will fail if the
# instance is a regular dict, in which case we'll just return the dict.
try:
instance = jsonpickle.decode(instance)
# Replace the instance with a deep copy of it. This is important because if there are
# missing attributes in the saved instance (which can happen if the instance was
# saved under an earlier version of the code and a new field was introduced subsequently),
# the deep copy will contain default values for those attributes.
if initialize_missing_attributes:
instance.initialize(overwrite=False)
# Delete the state variables and replace with initialized version,
# if ignore_state_variables = True.
if ignore_state_vars:
for n in instance.nodes:
n.state_vars = []
return instance
except TypeError as e:
# If the instance contains any dicts with integer keys, they will have
# been saved as strings when the JSON was saved. Convert them back to integers here.
# Currently, only demand_pmf has this issue.
if "demand_pmf" in instance.keys():
instance["demand_pmf"] = {
int(k): v for k, v in instance["demand_pmf"].items()
}
return instance
def save_instance(
instance_name,
instance_data,
instance_description="",
filepath=DEFAULT_JSON_FILEPATH,
replace=True,
create_if_none=True,
omit_state_vars=True,
):
"""Save an instance to a JSON file.
Parameters
----------
instance_name : str
The name of the instance. This will be used later for retreving the instance.
instance_data : dict or SupplyChainNetwork
The instance data as a dictionary (with keys equal to parameter names (e.g., "holding_cost")
and values equal to parameter values (e.g., 0.5)) or as a |class_network| object
(in which case the instance is serialized using :mod:`jsonpickle`).
instance_description : str, optional
A longer descrtiption of the instance.
filepath : str, optional
Path to the JSON file. If ``None``, ``DEFAULT_JSON_FILEPATH`` is used.
replace : bool, optional
If an instance with the same ``instance_name`` is already in the file, the function
will replace it if ``True`` and will ignore it (and write nothing) if ``False``.
create_if_none : bool, optional
If the file does not already exist, the function will create a new file if ``True``;
otherwise, it will not do anything and issue a warning.
omit_state_vars : bool, optional
If ``True``, the function will not save state variables as part of the nodes,
even if they are present in the instance. # TODO
"""
# Does JSON file exist?
if os.path.exists(filepath):
# Load data from JSON.
with open(filepath) as f:
json_contents = json.load(f)
else:
# Should we create it?
if create_if_none:
json_contents = {"_id": "", "instances": [], "last_updated": ""}
else:
warnings.warn(
"filepath does not exist and create_if_none is False; no action was taken"
)
return
# Look for instance. (https://stackoverflow.com/a/8653568/3453768)
instance_index = next(
(
i
for i, item in enumerate(json_contents["instances"])
if item["name"] == instance_name
),
None,
)
# Was instance found?
if instance_index is not None:
if not replace:
return
# Make local copy of network and omit state variables, if requested.
local_copy = deepcopy(instance_data)
if omit_state_vars:
for n in local_copy.nodes:
n.state_vars = None
# Was data provided as dict or SupplyChainNetwork?
if isinstance(local_copy, dict):
data = local_copy
else:
# Assume SupplyChainNetwork.
data = jsonpickle.encode(local_copy)
# Create dictionary with instance metadata and data.
instance_dict = {
"name": instance_name,
"description": instance_description,
"data": data,
}
# Add (or replace) instance.
if instance_index:
# We already know replace is True, otherwise we would have exited already.
json_contents["instances"][instance_index] = instance_dict
else:
json_contents["instances"].append(instance_dict)
json_contents["last_updated"] = f"{datetime.datetime.now()}"
# If the instance contains any dicts with integer keys, they will be
# saved as strings when the JSON is saved. load_instance() converts them back to integers.
# Currently, only demand_pmf has this issue.
# Make sure path exists; if not, create it.
os.makedirs(os.path.dirname(filepath), exist_ok=True)
# Write all instances to JSON.
with open(filepath, "w") as f:
json.dump(json_contents, f)
# Close file.
f.close()
def _save_summary_to_csv(save_filepath, json_filepath=DEFAULT_JSON_FILEPATH):
"""Save a CSV file with a summary of the instances in a JSON file.
Main purpose of this method is to build the CSV file that populates the table
at the top of this page.
Parameters
----------
save_filepath : str
Path to the CSV file to create.
json_filepath : str, optional
Path to the JSON file. If ``None``, ``DEFAULT_JSON_FILEPATH`` is used.
"""
# Load JSON file.
with open(json_filepath) as f:
json_contents = json.load(f)
# Write to CSV.
with open(save_filepath, "w", newline="") as csvfile:
instance_writer = csv.writer(
csvfile, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
instance_writer.writerow(["Name", "Description"])
for instance in json_contents["instances"]:
instance_writer.writerow([instance["name"], instance["description"]])
f.close()
csvfile.close() | PypiClean |
/sdksio_juniper_mist_sdk-1.0.0-py3-none-any.whl/mistapi/models/deviceprofile.py | from mistapi.api_helper import APIHelper
from mistapi.models.ap_aeroscout import ApAeroscout
from mistapi.models.ap_ble import ApBle
from mistapi.models.ap_iot import ApIot
from mistapi.models.ap_ip import ApIp
from mistapi.models.ap_led import ApLed
from mistapi.models.ap_mesh import ApMesh
from mistapi.models.ap_radio import ApRadio
from mistapi.models.ap_switch import ApSwitch
from mistapi.models.ap_usb import ApUsb
from mistapi.models.dhcpd_config import DhcpdConfig
from mistapi.models.extra_routes_2 import ExtraRoutes2
from mistapi.models.gatewaytemplate_tunnel_configs import GatewaytemplateTunnelConfigs
from mistapi.models.idp_profiles import IdpProfiles
from mistapi.models.ip_configs import IpConfigs
from mistapi.models.junos_bgp_config import JunosBgpConfig
from mistapi.models.junos_oob_ip_config import JunosOobIpConfig
from mistapi.models.junos_routing_policy import JunosRoutingPolicy
from mistapi.models.network import Network
from mistapi.models.path_preferences import PathPreferences
from mistapi.models.port_config import PortConfig
from mistapi.models.pwr_config_1 import PwrConfig1
from mistapi.models.service_policy_2 import ServicePolicy2
from mistapi.models.template_gateway_matching import TemplateGatewayMatching
class Deviceprofile(object):
"""Implementation of the 'deviceprofile' model.
TODO: type model description here.
Attributes:
aeroscout (ApAeroscout): Aeroscout AP settings
ble_config (ApBle): BLE AP settings
created_time (float): TODO: type description here.
disable_eth_1 (bool): whether to disable eth1 port
disable_eth_2 (bool): whether to disable eth2 port
disable_eth_3 (bool): whether to disable eth3 port
disable_module (bool): whether to disable module port
for_site (bool): TODO: type description here.
height (float): Device Only. Height, in meters, optional
id (uuid|string): TODO: type description here.
iot_config (ApIot): IoT AP settings
ip_config (ApIp): IP AP settings
led (ApLed): LED AP settings
map_id (uuid|string): Device Only.. Map where the device belongs to
mesh (ApMesh): Mesh AP settings
modified_time (float): TODO: type description here.
name (string): TODO: type description here.
notes (string): Device Only. Any notes about this AP
ntp_servers (list of string): TODO: type description here.
org_id (uuid|string): TODO: type description here.
orientation (float): Device Only. Orientation, 0-359, in degrees, up
is 0, right is 90.
orientation_overwrite (bool): whether the user overwrite the
orientation
poe_passthrough (bool): whether to enable power out through module
port (for APH) or eth1 (for APL/BT11)
port_config (PortConfig): The property key is the interface(s) name
(e.g. "eth1,eth2")
pwr_config (PwrConfig1): power related configs
radio_config (ApRadio): Radio AP settings
site_id (uuid|string): TODO: type description here.
switch_config (ApSwitch): for people who want to fully control the
vlans (advanced)
mtype (Type16Enum): TODO: type description here.
usb_config (ApUsb): USB AP settings Note: if native imagotag is
enabled, BLE will be disabled automatically Note: legacy, new
config moved to ESL Config.
vars (object): a dictionary of name->value, the vars can then be used
in Wlans. This can overwrite those from Site Vars
x (float): Device Only. x in pixel
y (float): Device Only. y in pixel
additional_config_cmds (list of string): TODO: type description here.
bgp_config (dict): TODO: type description here.
dhcpd_config (dict): The property key is the network name
extra_routes (dict): TODO: type description here.
gateway_matching (TemplateGatewayMatching): Gateway matching
idp_profiles (IdpProfiles): Property key is the profile name
ip_configs (dict): The property key is the network name
networks (dict): The property key is the network name
oob_ip_config (JunosOobIpConfig): Junos out-of-band (vme/em0/fxp0) IP
config
path_preferences (dict): experimental
routing_policies (dict): The property key is the routing policy name
service_policies (list of ServicePolicy2): TODO: type description
here.
tunnel_configs (dict): Property key is the tunnel name
"""
# Create a mapping from Model property names to API property names
_names = {
"aeroscout": 'aeroscout',
"ble_config": 'ble_config',
"created_time": 'created_time',
"disable_eth_1": 'disable_eth1',
"disable_eth_2": 'disable_eth2',
"disable_eth_3": 'disable_eth3',
"disable_module": 'disable_module',
"for_site": 'for_site',
"height": 'height',
"id": 'id',
"iot_config": 'iot_config',
"ip_config": 'ip_config',
"led": 'led',
"map_id": 'map_id',
"mesh": 'mesh',
"modified_time": 'modified_time',
"name": 'name',
"notes": 'notes',
"ntp_servers": 'ntp_servers',
"org_id": 'org_id',
"orientation": 'orientation',
"orientation_overwrite": 'orientation_overwrite',
"poe_passthrough": 'poe_passthrough',
"port_config": 'port_config',
"pwr_config": 'pwr_config',
"radio_config": 'radio_config',
"site_id": 'site_id',
"switch_config": 'switch_config',
"mtype": 'type',
"usb_config": 'usb_config',
"vars": 'vars',
"x": 'x',
"y": 'y',
"additional_config_cmds": 'additional_config_cmds',
"bgp_config": 'bgp_config',
"dhcpd_config": 'dhcpd_config',
"extra_routes": 'extra_routes',
"gateway_matching": 'gateway_matching',
"idp_profiles": 'idp_profiles',
"ip_configs": 'ip_configs',
"networks": 'networks',
"oob_ip_config": 'oob_ip_config',
"path_preferences": 'path_preferences',
"routing_policies": 'routing_policies',
"service_policies": 'service_policies',
"tunnel_configs": 'tunnel_configs'
}
_optionals = [
'aeroscout',
'ble_config',
'created_time',
'disable_eth_1',
'disable_eth_2',
'disable_eth_3',
'disable_module',
'for_site',
'height',
'id',
'iot_config',
'ip_config',
'led',
'map_id',
'mesh',
'modified_time',
'name',
'notes',
'ntp_servers',
'org_id',
'orientation',
'orientation_overwrite',
'poe_passthrough',
'port_config',
'pwr_config',
'radio_config',
'site_id',
'switch_config',
'mtype',
'usb_config',
'vars',
'x',
'y',
'additional_config_cmds',
'bgp_config',
'dhcpd_config',
'extra_routes',
'gateway_matching',
'idp_profiles',
'ip_configs',
'networks',
'oob_ip_config',
'path_preferences',
'routing_policies',
'service_policies',
'tunnel_configs',
]
_nullables = [
'height',
'map_id',
'name',
'notes',
'x',
'y',
]
def __init__(self,
aeroscout=APIHelper.SKIP,
ble_config=APIHelper.SKIP,
created_time=APIHelper.SKIP,
disable_eth_1=False,
disable_eth_2=False,
disable_eth_3=False,
disable_module=False,
for_site=APIHelper.SKIP,
height=APIHelper.SKIP,
id=APIHelper.SKIP,
iot_config=APIHelper.SKIP,
ip_config=APIHelper.SKIP,
led=APIHelper.SKIP,
map_id=APIHelper.SKIP,
mesh=APIHelper.SKIP,
modified_time=APIHelper.SKIP,
name=APIHelper.SKIP,
notes=APIHelper.SKIP,
ntp_servers=APIHelper.SKIP,
org_id=APIHelper.SKIP,
orientation=APIHelper.SKIP,
orientation_overwrite=APIHelper.SKIP,
poe_passthrough=False,
port_config=APIHelper.SKIP,
pwr_config=APIHelper.SKIP,
radio_config=APIHelper.SKIP,
site_id=APIHelper.SKIP,
switch_config=APIHelper.SKIP,
mtype='ap',
usb_config=APIHelper.SKIP,
vars=APIHelper.SKIP,
x=APIHelper.SKIP,
y=APIHelper.SKIP,
additional_config_cmds=APIHelper.SKIP,
bgp_config=APIHelper.SKIP,
dhcpd_config=APIHelper.SKIP,
extra_routes=APIHelper.SKIP,
gateway_matching=APIHelper.SKIP,
idp_profiles=APIHelper.SKIP,
ip_configs=APIHelper.SKIP,
networks=APIHelper.SKIP,
oob_ip_config=APIHelper.SKIP,
path_preferences=APIHelper.SKIP,
routing_policies=APIHelper.SKIP,
service_policies=APIHelper.SKIP,
tunnel_configs=APIHelper.SKIP):
"""Constructor for the Deviceprofile class"""
# Initialize members of the class
if aeroscout is not APIHelper.SKIP:
self.aeroscout = aeroscout
if ble_config is not APIHelper.SKIP:
self.ble_config = ble_config
if created_time is not APIHelper.SKIP:
self.created_time = created_time
self.disable_eth_1 = disable_eth_1
self.disable_eth_2 = disable_eth_2
self.disable_eth_3 = disable_eth_3
self.disable_module = disable_module
if for_site is not APIHelper.SKIP:
self.for_site = for_site
if height is not APIHelper.SKIP:
self.height = height
if id is not APIHelper.SKIP:
self.id = id
if iot_config is not APIHelper.SKIP:
self.iot_config = iot_config
if ip_config is not APIHelper.SKIP:
self.ip_config = ip_config
if led is not APIHelper.SKIP:
self.led = led
if map_id is not APIHelper.SKIP:
self.map_id = map_id
if mesh is not APIHelper.SKIP:
self.mesh = mesh
if modified_time is not APIHelper.SKIP:
self.modified_time = modified_time
if name is not APIHelper.SKIP:
self.name = name
if notes is not APIHelper.SKIP:
self.notes = notes
if ntp_servers is not APIHelper.SKIP:
self.ntp_servers = ntp_servers
if org_id is not APIHelper.SKIP:
self.org_id = org_id
if orientation is not APIHelper.SKIP:
self.orientation = orientation
if orientation_overwrite is not APIHelper.SKIP:
self.orientation_overwrite = orientation_overwrite
self.poe_passthrough = poe_passthrough
if port_config is not APIHelper.SKIP:
self.port_config = port_config
if pwr_config is not APIHelper.SKIP:
self.pwr_config = pwr_config
if radio_config is not APIHelper.SKIP:
self.radio_config = radio_config
if site_id is not APIHelper.SKIP:
self.site_id = site_id
if switch_config is not APIHelper.SKIP:
self.switch_config = switch_config
self.mtype = mtype
if usb_config is not APIHelper.SKIP:
self.usb_config = usb_config
if vars is not APIHelper.SKIP:
self.vars = vars
if x is not APIHelper.SKIP:
self.x = x
if y is not APIHelper.SKIP:
self.y = y
if additional_config_cmds is not APIHelper.SKIP:
self.additional_config_cmds = additional_config_cmds
if bgp_config is not APIHelper.SKIP:
self.bgp_config = bgp_config
if dhcpd_config is not APIHelper.SKIP:
self.dhcpd_config = dhcpd_config
if extra_routes is not APIHelper.SKIP:
self.extra_routes = extra_routes
if gateway_matching is not APIHelper.SKIP:
self.gateway_matching = gateway_matching
if idp_profiles is not APIHelper.SKIP:
self.idp_profiles = idp_profiles
if ip_configs is not APIHelper.SKIP:
self.ip_configs = ip_configs
if networks is not APIHelper.SKIP:
self.networks = networks
if oob_ip_config is not APIHelper.SKIP:
self.oob_ip_config = oob_ip_config
if path_preferences is not APIHelper.SKIP:
self.path_preferences = path_preferences
if routing_policies is not APIHelper.SKIP:
self.routing_policies = routing_policies
if service_policies is not APIHelper.SKIP:
self.service_policies = service_policies
if tunnel_configs is not APIHelper.SKIP:
self.tunnel_configs = tunnel_configs
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object
as obtained from the deserialization of the server's response. The
keys MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
aeroscout = ApAeroscout.from_dictionary(dictionary.get('aeroscout')) if 'aeroscout' in dictionary.keys() else APIHelper.SKIP
ble_config = ApBle.from_dictionary(dictionary.get('ble_config')) if 'ble_config' in dictionary.keys() else APIHelper.SKIP
created_time = dictionary.get("created_time") if dictionary.get("created_time") else APIHelper.SKIP
disable_eth_1 = dictionary.get("disable_eth1") if dictionary.get("disable_eth1") else False
disable_eth_2 = dictionary.get("disable_eth2") if dictionary.get("disable_eth2") else False
disable_eth_3 = dictionary.get("disable_eth3") if dictionary.get("disable_eth3") else False
disable_module = dictionary.get("disable_module") if dictionary.get("disable_module") else False
for_site = dictionary.get("for_site") if "for_site" in dictionary.keys() else APIHelper.SKIP
height = dictionary.get("height") if "height" in dictionary.keys() else APIHelper.SKIP
id = dictionary.get("id") if dictionary.get("id") else APIHelper.SKIP
iot_config = ApIot.from_dictionary(dictionary.get('iot_config')) if 'iot_config' in dictionary.keys() else APIHelper.SKIP
ip_config = ApIp.from_dictionary(dictionary.get('ip_config')) if 'ip_config' in dictionary.keys() else APIHelper.SKIP
led = ApLed.from_dictionary(dictionary.get('led')) if 'led' in dictionary.keys() else APIHelper.SKIP
map_id = dictionary.get("map_id") if "map_id" in dictionary.keys() else APIHelper.SKIP
mesh = ApMesh.from_dictionary(dictionary.get('mesh')) if 'mesh' in dictionary.keys() else APIHelper.SKIP
modified_time = dictionary.get("modified_time") if dictionary.get("modified_time") else APIHelper.SKIP
name = dictionary.get("name") if "name" in dictionary.keys() else APIHelper.SKIP
notes = dictionary.get("notes") if "notes" in dictionary.keys() else APIHelper.SKIP
ntp_servers = dictionary.get("ntp_servers") if dictionary.get("ntp_servers") else APIHelper.SKIP
org_id = dictionary.get("org_id") if dictionary.get("org_id") else APIHelper.SKIP
orientation = dictionary.get("orientation") if dictionary.get("orientation") else APIHelper.SKIP
orientation_overwrite = dictionary.get("orientation_overwrite") if "orientation_overwrite" in dictionary.keys() else APIHelper.SKIP
poe_passthrough = dictionary.get("poe_passthrough") if dictionary.get("poe_passthrough") else False
port_config = PortConfig.from_dictionary(dictionary.get('port_config')) if 'port_config' in dictionary.keys() else APIHelper.SKIP
pwr_config = PwrConfig1.from_dictionary(dictionary.get('pwr_config')) if 'pwr_config' in dictionary.keys() else APIHelper.SKIP
radio_config = ApRadio.from_dictionary(dictionary.get('radio_config')) if 'radio_config' in dictionary.keys() else APIHelper.SKIP
site_id = dictionary.get("site_id") if dictionary.get("site_id") else APIHelper.SKIP
switch_config = ApSwitch.from_dictionary(dictionary.get('switch_config')) if 'switch_config' in dictionary.keys() else APIHelper.SKIP
mtype = dictionary.get("type") if dictionary.get("type") else 'ap'
usb_config = ApUsb.from_dictionary(dictionary.get('usb_config')) if 'usb_config' in dictionary.keys() else APIHelper.SKIP
vars = dictionary.get("vars") if dictionary.get("vars") else APIHelper.SKIP
x = dictionary.get("x") if "x" in dictionary.keys() else APIHelper.SKIP
y = dictionary.get("y") if "y" in dictionary.keys() else APIHelper.SKIP
additional_config_cmds = dictionary.get("additional_config_cmds") if dictionary.get("additional_config_cmds") else APIHelper.SKIP
bgp_config = JunosBgpConfig.from_dictionary(dictionary.get('bgp_config')) if 'bgp_config' in dictionary.keys() else APIHelper.SKIP
dhcpd_config = DhcpdConfig.from_dictionary(dictionary.get('dhcpd_config')) if 'dhcpd_config' in dictionary.keys() else APIHelper.SKIP
extra_routes = ExtraRoutes2.from_dictionary(dictionary.get('extra_routes')) if 'extra_routes' in dictionary.keys() else APIHelper.SKIP
gateway_matching = TemplateGatewayMatching.from_dictionary(dictionary.get('gateway_matching')) if 'gateway_matching' in dictionary.keys() else APIHelper.SKIP
idp_profiles = IdpProfiles.from_dictionary(dictionary.get('idp_profiles')) if 'idp_profiles' in dictionary.keys() else APIHelper.SKIP
ip_configs = IpConfigs.from_dictionary(dictionary.get('ip_configs')) if 'ip_configs' in dictionary.keys() else APIHelper.SKIP
networks = Network.from_dictionary(dictionary.get('networks')) if 'networks' in dictionary.keys() else APIHelper.SKIP
oob_ip_config = JunosOobIpConfig.from_dictionary(dictionary.get('oob_ip_config')) if 'oob_ip_config' in dictionary.keys() else APIHelper.SKIP
path_preferences = PathPreferences.from_dictionary(dictionary.get('path_preferences')) if 'path_preferences' in dictionary.keys() else APIHelper.SKIP
routing_policies = JunosRoutingPolicy.from_dictionary(dictionary.get('routing_policies')) if 'routing_policies' in dictionary.keys() else APIHelper.SKIP
service_policies = None
if dictionary.get('service_policies') is not None:
service_policies = [ServicePolicy2.from_dictionary(x) for x in dictionary.get('service_policies')]
else:
service_policies = APIHelper.SKIP
tunnel_configs = GatewaytemplateTunnelConfigs.from_dictionary(dictionary.get('tunnel_configs')) if 'tunnel_configs' in dictionary.keys() else APIHelper.SKIP
# Return an object of this model
return cls(aeroscout,
ble_config,
created_time,
disable_eth_1,
disable_eth_2,
disable_eth_3,
disable_module,
for_site,
height,
id,
iot_config,
ip_config,
led,
map_id,
mesh,
modified_time,
name,
notes,
ntp_servers,
org_id,
orientation,
orientation_overwrite,
poe_passthrough,
port_config,
pwr_config,
radio_config,
site_id,
switch_config,
mtype,
usb_config,
vars,
x,
y,
additional_config_cmds,
bgp_config,
dhcpd_config,
extra_routes,
gateway_matching,
idp_profiles,
ip_configs,
networks,
oob_ip_config,
path_preferences,
routing_policies,
service_policies,
tunnel_configs) | PypiClean |
/youtube_dl_fork-2022.6.30.tar.gz/youtube_dl_fork-2022.6.30/youtube_dl/extractor/microsoftvirtualacademy.py | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_xpath,
)
from ..utils import (
int_or_none,
parse_duration,
smuggle_url,
unsmuggle_url,
xpath_text,
)
class MicrosoftVirtualAcademyBaseIE(InfoExtractor):
def _extract_base_url(self, course_id, display_id):
return self._download_json(
'https://api-mlxprod.microsoft.com/services/products/anonymous/%s' % course_id,
display_id, 'Downloading course base URL')
def _extract_chapter_and_title(self, title):
if not title:
return None, None
m = re.search(r'(?P<chapter>\d+)\s*\|\s*(?P<title>.+)', title)
return (int(m.group('chapter')), m.group('title')) if m else (None, title)
class MicrosoftVirtualAcademyIE(MicrosoftVirtualAcademyBaseIE):
IE_NAME = 'mva'
IE_DESC = 'Microsoft Virtual Academy videos'
_VALID_URL = r'(?:%s:|https?://(?:mva\.microsoft|(?:www\.)?microsoftvirtualacademy)\.com/[^/]+/training-courses/[^/?#&]+-)(?P<course_id>\d+)(?::|\?l=)(?P<id>[\da-zA-Z]+_\d+)' % IE_NAME
_TESTS = [{
'url': 'https://mva.microsoft.com/en-US/training-courses/microsoft-azure-fundamentals-virtual-machines-11788?l=gfVXISmEB_6804984382',
'md5': '7826c44fc31678b12ad8db11f6b5abb9',
'info_dict': {
'id': 'gfVXISmEB_6804984382',
'ext': 'mp4',
'title': 'Course Introduction',
'formats': 'mincount:3',
'subtitles': {
'en': [{
'ext': 'ttml',
}],
},
}
}, {
'url': 'mva:11788:gfVXISmEB_6804984382',
'only_matching': True,
}]
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
mobj = re.match(self._VALID_URL, url)
course_id = mobj.group('course_id')
video_id = mobj.group('id')
base_url = smuggled_data.get('base_url') or self._extract_base_url(course_id, video_id)
settings = self._download_xml(
'%s/content/content_%s/videosettings.xml?v=1' % (base_url, video_id),
video_id, 'Downloading video settings XML')
_, title = self._extract_chapter_and_title(xpath_text(
settings, './/Title', 'title', fatal=True))
formats = []
for sources in settings.findall(compat_xpath('.//MediaSources')):
sources_type = sources.get('videoType')
for source in sources.findall(compat_xpath('./MediaSource')):
video_url = source.text
if not video_url or not video_url.startswith('http'):
continue
if sources_type == 'smoothstreaming':
formats.extend(self._extract_ism_formats(
video_url, video_id, 'mss', fatal=False))
continue
video_mode = source.get('videoMode')
height = int_or_none(self._search_regex(
r'^(\d+)[pP]$', video_mode or '', 'height', default=None))
codec = source.get('codec')
acodec, vcodec = [None] * 2
if codec:
codecs = codec.split(',')
if len(codecs) == 2:
acodec, vcodec = codecs
elif len(codecs) == 1:
vcodec = codecs[0]
formats.append({
'url': video_url,
'format_id': video_mode,
'height': height,
'acodec': acodec,
'vcodec': vcodec,
})
self._sort_formats(formats)
subtitles = {}
for source in settings.findall(compat_xpath('.//MarkerResourceSource')):
subtitle_url = source.text
if not subtitle_url:
continue
subtitles.setdefault('en', []).append({
'url': '%s/%s' % (base_url, subtitle_url),
'ext': source.get('type'),
})
return {
'id': video_id,
'title': title,
'subtitles': subtitles,
'formats': formats
}
class MicrosoftVirtualAcademyCourseIE(MicrosoftVirtualAcademyBaseIE):
IE_NAME = 'mva:course'
IE_DESC = 'Microsoft Virtual Academy courses'
_VALID_URL = r'(?:%s:|https?://(?:mva\.microsoft|(?:www\.)?microsoftvirtualacademy)\.com/[^/]+/training-courses/(?P<display_id>[^/?#&]+)-)(?P<id>\d+)' % IE_NAME
_TESTS = [{
'url': 'https://mva.microsoft.com/en-US/training-courses/microsoft-azure-fundamentals-virtual-machines-11788',
'info_dict': {
'id': '11788',
'title': 'Microsoft Azure Fundamentals: Virtual Machines',
},
'playlist_count': 36,
}, {
# with emphasized chapters
'url': 'https://mva.microsoft.com/en-US/training-courses/developing-windows-10-games-with-construct-2-16335',
'info_dict': {
'id': '16335',
'title': 'Developing Windows 10 Games with Construct 2',
},
'playlist_count': 10,
}, {
'url': 'https://www.microsoftvirtualacademy.com/en-US/training-courses/microsoft-azure-fundamentals-virtual-machines-11788',
'only_matching': True,
}, {
'url': 'mva:course:11788',
'only_matching': True,
}]
@classmethod
def suitable(cls, url):
return False if MicrosoftVirtualAcademyIE.suitable(url) else super(
MicrosoftVirtualAcademyCourseIE, cls).suitable(url)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
course_id = mobj.group('id')
display_id = mobj.group('display_id')
base_url = self._extract_base_url(course_id, display_id)
manifest = self._download_json(
'%s/imsmanifestlite.json' % base_url,
display_id, 'Downloading course manifest JSON')['manifest']
organization = manifest['organizations']['organization'][0]
entries = []
for chapter in organization['item']:
chapter_number, chapter_title = self._extract_chapter_and_title(chapter.get('title'))
chapter_id = chapter.get('@identifier')
for item in chapter.get('item', []):
item_id = item.get('@identifier')
if not item_id:
continue
metadata = item.get('resource', {}).get('metadata') or {}
if metadata.get('learningresourcetype') != 'Video':
continue
_, title = self._extract_chapter_and_title(item.get('title'))
duration = parse_duration(metadata.get('duration'))
description = metadata.get('description')
entries.append({
'_type': 'url_transparent',
'url': smuggle_url(
'mva:%s:%s' % (course_id, item_id), {'base_url': base_url}),
'title': title,
'description': description,
'duration': duration,
'chapter': chapter_title,
'chapter_number': chapter_number,
'chapter_id': chapter_id,
})
title = organization.get('title') or manifest.get('metadata', {}).get('title')
return self.playlist_result(entries, course_id, title) | PypiClean |
/diva-boiler-0.2.2.tar.gz/diva-boiler-0.2.2/boiler/__init__.py | import datetime
import json
import os
import platform
import sys
import traceback
from typing import Any, Optional
from urllib.parse import urlparse
import click
from packaging.version import parse as parse_version
from pkg_resources import DistributionNotFound, get_distribution
import requests
from requests.exceptions import RequestException
from requests_toolbelt.sessions import BaseUrlSession
import sentry_sdk
from sentry_sdk import capture_exception, configure_scope, push_scope # type: ignore
from xdg import BaseDirectory
__version__ = None
try:
__version__ = get_distribution('diva-boiler').version
except DistributionNotFound:
pass
BOILER_CONFIG_PATH = __name__
BOILER_CONFIG_FILE = 'config'
SENTRY_DSN = 'https://639591f20e1148c3a65e5e9237c6afc1@o157137.ingest.sentry.io/5224648'
class BoilerException(Exception):
pass
class BoilerWarning(BoilerException):
pass
class BoilerError(BoilerException):
pass
class BoilerSession(BaseUrlSession):
page_size = 50
def __init__(self, base_url: str, token: Optional[str]):
base_url = f'{base_url.rstrip("/")}/' # tolerate input with or without trailing slash
super(BoilerSession, self).__init__(base_url=base_url)
if token:
token = token.strip()
self.token = token
self.headers.update(
{
'User-agent': f'boiler/{__version__}',
'Accept': 'application/json',
'X-Stumpf-Token': self.token,
}
)
def request(self, *args, **kwargs):
response = super().request(*args, **kwargs)
if response.status_code in [401, 403]:
click.echo(
click.style(
"You are attempting to perform an authorized operation but you aren't logged in.\n" # noqa
'Run the following command: boiler login stumpf',
fg='yellow',
),
err=True,
)
sys.exit(1)
return response
def newer_version_available():
if __version__ is None:
return False
this_version = parse_version(__version__)
if this_version.is_devrelease:
return False
r = requests.get('https://pypi.org/pypi/diva-boiler/json', timeout=(5, 5))
r.raise_for_status()
releases = [parse_version(v) for v in r.json()['releases'].keys()]
for release in releases:
if not (release.is_prerelease or release.is_devrelease) and release > this_version:
return True
return False
def main():
try:
# make boiler configuration directory
BaseDirectory.save_config_path(BOILER_CONFIG_PATH)
cli()
except Exception as e:
click.echo(
click.style(
'The following unexpected error occurred while attempting your operation:\n',
fg='red',
),
err=True,
)
click.echo(traceback.format_exc(), err=True)
click.echo(f'boiler: v{__version__}', err=True)
click.echo(f'python: v{platform.python_version()}', err=True)
click.echo(f'time: {datetime.datetime.now(datetime.timezone.utc).isoformat()}', err=True)
click.echo(f'os: {platform.platform()}', err=True)
click.echo(f'command: {" ".join(sys.argv[1:])}\n', err=True)
click.echo(
click.style(
'This is a bug in boiler and has already been reported. '
'If you would like to add any detail you can open an issue below.',
fg='yellow',
),
err=True,
)
click.echo(
'https://gitlab.com/diva-mturk/stumpf-diva/issues/new', err=True,
)
with push_scope() as scope:
scope.set_extra('boiler_version', f'v{__version__}')
scope.set_extra('python_version', f'v{platform.python_version()}')
scope.set_extra('os', f'{platform.platform()}')
scope.set_extra('command', f'{" ".join(sys.argv[1:])}')
capture_exception(e)
def update_config_value(filename: str, key: str, value: Any) -> None:
config_dir = BaseDirectory.load_first_config(BOILER_CONFIG_PATH)
config_file = os.path.join(config_dir, filename)
if os.path.exists(config_file):
with open(config_file, 'r') as infile:
config = json.load(infile)
config[key] = value
else:
config = {key: value}
with open(config_file, 'w') as outfile:
json.dump(config, outfile, indent=4)
def get_config_value(filename: str, key: str) -> Optional[Any]:
config_dir: Optional[str] = BaseDirectory.load_first_config(BOILER_CONFIG_PATH)
if config_dir:
config_file = os.path.join(config_dir, BOILER_CONFIG_FILE)
if os.path.exists(config_file):
with open(config_file, 'r') as infile:
config = json.load(infile)
return config.get(key)
return None
@click.group()
@click.option(
'--api-url', default='https://stumpf.avidannotations.com/api/diva/', envvar='STUMPF_API_URL',
)
@click.option(
'--x-stumpf-token',
envvar='X_STUMPF_TOKEN',
default=get_config_value(BOILER_CONFIG_FILE, 'stumpf_token'),
)
@click.option('--offline', is_flag=True)
@click.option(
'--gitlab-url', envvar='GITLAB_URL', default='https://kwgitlab.kitware.com', hidden=True
)
@click.option(
'--gitlab-project-id', type=click.INT, envvar='GITLAB_PROJECT_ID', default=497, hidden=True
)
@click.version_option()
@click.pass_context
def cli(ctx, api_url, x_stumpf_token, offline, gitlab_url, gitlab_project_id):
api = urlparse(api_url)
if api.netloc == 'stumpf.avidannotations.com':
sentry_sdk.init(SENTRY_DSN) # type: ignore
if not offline:
try:
if newer_version_available():
click.echo(
click.style(
"""There is a newer version of boiler available.
You must upgrade to the latest version before continuing.
If you are using pipx, then you can upgrade by running the following command:
""",
fg='yellow',
),
err=True,
)
click.echo(click.style('pipx upgrade diva-boiler', fg='green'), err=True)
sys.exit(1)
except RequestException:
click.echo(
click.style('Failed to check for newer version of boiler:', fg='red'), err=True
)
raise
# remove old text config file at 'credentials'
# this isn't strictly necessary, it just cleans up an unused file on their machine.
# TODO: remove this after a short period
config_dir = BaseDirectory.load_first_config(BOILER_CONFIG_PATH)
if os.path.exists(os.path.join(config_dir, 'credentials')):
os.remove(os.path.join(config_dir, 'credentials'))
session = BoilerSession(api_url, x_stumpf_token)
ctx.obj = {
'session': session,
'stumpf_url': api_url.replace('/api/diva', '').rstrip('/'),
'gitlab_url': gitlab_url.rstrip('/'),
'gitlab_project_id': gitlab_project_id,
}
# set current user under sentry
# ignore any error from requests since the user may be trying to perform a local operation
with configure_scope() as scope:
try:
user = requests.get(
f'{ctx.obj["stumpf_url"]}/api/v1/user/me',
headers={'X-Stumpf-Token': x_stumpf_token},
timeout=(5, 5),
)
if user.ok:
scope.user = user.json()
except RequestException:
pass
# TODO: re-enable kpf once deserialization is fixed
from boiler.commands import ( # noqa: F401 E402
activity,
export,
gunrunner,
janitor,
kw18,
login,
mturk,
vendor,
video,
) | PypiClean |
/autoenrich-2.1.1.tar.gz/autoenrich-2.1.1/autoENRICH/preferences/preferences.py |
#autoENRICH is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#autoENRICH is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#You should have received a copy of the GNU Affero General Public License
#along with autoENRICH. If not, see <https://www.gnu.org/licenses/>.
import json
import sys
def read_prefs(file):
try:
with open(file, 'r') as json_file:
prefs = json.load(json_file)
except Exception as E:
print('Error reading preferences file ', file)
print(E)
print('Exiting. . .')
sys.exit(0)
return prefs
def write_default_prefs(file):
prefs = {}
prefs['conf_search'] = {}
prefs['conf_search']['iterations'] = 200
prefs['conf_search']['maxconfs'] = 100
prefs['conf_search']['RMSthresh'] = 0.1
prefs['conf_search']['Ethresh'] = 100000
prefs['mol'] = {}
prefs['mol']['charge'] = 0
prefs['mol']['multiplicity'] = 1
prefs['comp'] = {}
prefs['comp']['parallel'] = True
prefs['comp']['system'] = 'BC3'
prefs['comp']['python_env'] = 'env_IMP'
prefs['comp']['aE_directory'] = "../../aE/"
prefs['optimisation'] = {}
prefs['optimisation']['memory'] = 12
prefs['optimisation']['processors'] = 4
prefs['optimisation']['opt'] = 'tight'
prefs['optimisation']['freq'] = False
prefs['optimisation']['functional'] = 'mPW1PW'
prefs['optimisation']['basisset'] = '6-311g(d,p)'
prefs['optimisation']['solvent'] = 'none'
prefs['optimisation']['grid'] = 'ultrafine'
prefs['optimisation']['custom_cmd_line'] = False
prefs['optimisation']['nodes'] = 1
prefs['optimisation']['walltime'] = '100:00:00'
prefs['NMR'] = {}
prefs['NMR']['memory'] = 12
prefs['NMR']['processors'] = 4
prefs['NMR']['functional'] = 'wB97X-D3'
prefs['NMR']['basisset'] = '6-311g(d,p)'
prefs['NMR']['aux_basis_set'] = 'def2/JK'
prefs['NMR']['solvent'] = 'none'
prefs['NMR']['custom_cmd_line'] = False
prefs['NMR']['nodes'] = 1
prefs['NMR']['walltime'] = '100:00:00'
prefs['NMR']['shift_nuclei'] = ['H', 'C', 'N', 'O', 'F']
prefs['NMR']['spin_nuclei'] = ['H', 'C']
prefs['NMR']['spin_thresh'] = 20.0
json.dump(prefs, open(file, 'w'), indent=4) | PypiClean |
/django-nimda-0.0.1.tar.gz/django-nimda-0.0.1/nimda/static/admin/js/inlines.js | (function($) {
$.fn.formset = function(opts) {
var options = $.extend({}, $.fn.formset.defaults, opts);
var $this = $(this);
var $parent = $this.parent();
var updateElementIndex = function(el, prefix, ndx) {
var id_regex = new RegExp("(" + prefix + "-(\\d+|__prefix__))");
var replacement = prefix + "-" + ndx;
if ($(el).prop("for")) {
$(el).prop("for", $(el).prop("for").replace(id_regex, replacement));
}
if (el.id) {
el.id = el.id.replace(id_regex, replacement);
}
if (el.name) {
el.name = el.name.replace(id_regex, replacement);
}
};
var totalForms = $("#id_" + options.prefix + "-TOTAL_FORMS").prop("autocomplete", "off");
var nextIndex = parseInt(totalForms.val(), 10);
var maxForms = $("#id_" + options.prefix + "-MAX_NUM_FORMS").prop("autocomplete", "off");
// only show the add button if we are allowed to add more items,
// note that max_num = None translates to a blank string.
var showAddButton = maxForms.val() === '' || (maxForms.val()-totalForms.val()) > 0;
$this.each(function(i) {
$(this).not("." + options.emptyCssClass).addClass(options.formCssClass);
});
if ($this.length && showAddButton) {
var addButton;
if ($this.prop("tagName") == "TR") {
// If forms are laid out as table rows, insert the
// "add" button in a new table row:
var numCols = this.eq(-1).children().length;
$parent.append('<tr class="' + options.addCssClass + '"><td colspan="' + numCols + '"><a href="javascript:void(0)">' + options.addText + "</a></tr>");
addButton = $parent.find("tr:last a");
} else {
// Otherwise, insert it immediately after the last form:
$this.filter(":last").after('<div class="' + options.addCssClass + '"><a href="javascript:void(0)">' + options.addText + "</a></div>");
addButton = $this.filter(":last").next().find("a");
}
addButton.click(function(e) {
e.preventDefault();
var totalForms = $("#id_" + options.prefix + "-TOTAL_FORMS");
var template = $("#" + options.prefix + "-empty");
var row = template.clone(true);
row.removeClass(options.emptyCssClass)
.addClass(options.formCssClass)
.attr("id", options.prefix + "-" + nextIndex);
if (row.is("tr")) {
// If the forms are laid out in table rows, insert
// the remove button into the last table cell:
row.children(":last").append('<div><a class="' + options.deleteCssClass +'" href="javascript:void(0)">' + options.deleteText + "</a></div>");
} else if (row.is("ul") || row.is("ol")) {
// If they're laid out as an ordered/unordered list,
// insert an <li> after the last list item:
row.append('<li><a class="' + options.deleteCssClass +'" href="javascript:void(0)">' + options.deleteText + "</a></li>");
} else {
// Otherwise, just insert the remove button as the
// last child element of the form's container:
row.children(":first").append('<span><a class="' + options.deleteCssClass + '" href="javascript:void(0)">' + options.deleteText + "</a></span>");
}
row.find("*").each(function() {
updateElementIndex(this, options.prefix, totalForms.val());
});
// Insert the new form when it has been fully edited
row.insertBefore($(template));
// Update number of total forms
$(totalForms).val(parseInt(totalForms.val(), 10) + 1);
nextIndex += 1;
// Hide add button in case we've hit the max, except we want to add infinitely
if ((maxForms.val() !== '') && (maxForms.val()-totalForms.val()) <= 0) {
addButton.parent().hide();
}
// The delete button of each row triggers a bunch of other things
row.find("a." + options.deleteCssClass).click(function(e) {
e.preventDefault();
// Remove the parent form containing this button:
var row = $(this).parents("." + options.formCssClass);
row.remove();
nextIndex -= 1;
// If a post-delete callback was provided, call it with the deleted form:
if (options.removed) {
options.removed(row);
}
// Update the TOTAL_FORMS form count.
var forms = $("." + options.formCssClass);
$("#id_" + options.prefix + "-TOTAL_FORMS").val(forms.length);
// Show add button again once we drop below max
if ((maxForms.val() === '') || (maxForms.val()-forms.length) > 0) {
addButton.parent().show();
}
// Also, update names and ids for all remaining form controls
// so they remain in sequence:
for (var i=0, formCount=forms.length; i<formCount; i++)
{
updateElementIndex($(forms).get(i), options.prefix, i);
$(forms.get(i)).find("*").each(function() {
updateElementIndex(this, options.prefix, i);
});
}
});
// If a post-add callback was supplied, call it with the added form:
if (options.added) {
options.added(row);
}
});
}
return this;
};
/* Setup plugin defaults */
$.fn.formset.defaults = {
prefix: "form", // The form prefix for your django formset
addText: "add another", // Text for the add link
deleteText: "remove", // Text for the delete link
addCssClass: "add-row", // CSS class applied to the add link
deleteCssClass: "delete-row", // CSS class applied to the delete link
emptyCssClass: "empty-row", // CSS class applied to the empty row
formCssClass: "dynamic-form", // CSS class applied to each form in a formset
added: null, // Function called each time a new form is added
removed: null // Function called each time a form is deleted
};
// Tabular inlines ---------------------------------------------------------
$.fn.tabularFormset = function(options) {
var $rows = $(this);
var alternatingRows = function(row) {
$($rows.selector).not(".add-row").removeClass("row1 row2")
.filter(":even").addClass("row1").end()
.filter(":odd").addClass("row2");
};
var reinitDateTimeShortCuts = function() {
// Reinitialize the calendar and clock widgets by force
if (typeof DateTimeShortcuts != "undefined") {
$(".datetimeshortcuts").remove();
DateTimeShortcuts.init();
}
};
var updateSelectFilter = function() {
// If any SelectFilter widgets are a part of the new form,
// instantiate a new SelectFilter instance for it.
if (typeof SelectFilter != 'undefined'){
$('.selectfilter').each(function(index, value){
var namearr = value.name.split('-');
SelectFilter.init(value.id, namearr[namearr.length-1], false, options.adminStaticPrefix );
});
$('.selectfilterstacked').each(function(index, value){
var namearr = value.name.split('-');
SelectFilter.init(value.id, namearr[namearr.length-1], true, options.adminStaticPrefix );
});
}
};
var initPrepopulatedFields = function(row) {
row.find('.prepopulated_field').each(function() {
var field = $(this),
input = field.find('input, select, textarea'),
dependency_list = input.data('dependency_list') || [],
dependencies = [];
$.each(dependency_list, function(i, field_name) {
dependencies.push('#' + row.find('.field-' + field_name).find('input, select, textarea').attr('id'));
});
if (dependencies.length) {
input.prepopulate(dependencies, input.attr('maxlength'));
}
});
};
$rows.formset({
prefix: options.prefix,
addText: options.addText,
formCssClass: "dynamic-" + options.prefix,
deleteCssClass: "inline-deletelink",
deleteText: options.deleteText,
emptyCssClass: "empty-form",
removed: alternatingRows,
added: function(row) {
initPrepopulatedFields(row);
reinitDateTimeShortCuts();
updateSelectFilter();
alternatingRows(row);
}
});
return $rows;
};
// Stacked inlines ---------------------------------------------------------
$.fn.stackedFormset = function(options) {
var $rows = $(this);
var updateInlineLabel = function(row) {
$($rows.selector).find(".inline_label").each(function(i) {
var count = i + 1;
$(this).html($(this).html().replace(/(#\d+)/g, "#" + count));
});
};
var reinitDateTimeShortCuts = function() {
// Reinitialize the calendar and clock widgets by force, yuck.
if (typeof DateTimeShortcuts != "undefined") {
$(".datetimeshortcuts").remove();
DateTimeShortcuts.init();
}
};
var updateSelectFilter = function() {
// If any SelectFilter widgets were added, instantiate a new instance.
if (typeof SelectFilter != "undefined"){
$(".selectfilter").each(function(index, value){
var namearr = value.name.split('-');
SelectFilter.init(value.id, namearr[namearr.length-1], false, options.adminStaticPrefix);
});
$(".selectfilterstacked").each(function(index, value){
var namearr = value.name.split('-');
SelectFilter.init(value.id, namearr[namearr.length-1], true, options.adminStaticPrefix);
});
}
};
var initPrepopulatedFields = function(row) {
row.find('.prepopulated_field').each(function() {
var field = $(this),
input = field.find('input, select, textarea'),
dependency_list = input.data('dependency_list') || [],
dependencies = [];
$.each(dependency_list, function(i, field_name) {
dependencies.push('#' + row.find('.form-row .field-' + field_name).find('input, select, textarea').attr('id'));
});
if (dependencies.length) {
input.prepopulate(dependencies, input.attr('maxlength'));
}
});
};
$rows.formset({
prefix: options.prefix,
addText: options.addText,
formCssClass: "dynamic-" + options.prefix,
deleteCssClass: "inline-deletelink",
deleteText: options.deleteText,
emptyCssClass: "empty-form",
removed: updateInlineLabel,
added: (function(row) {
initPrepopulatedFields(row);
reinitDateTimeShortCuts();
updateSelectFilter();
updateInlineLabel(row);
})
});
return $rows;
};
})(django.jQuery); | PypiClean |
/android-tv-remote-0.1.5.tar.gz/android-tv-remote-0.1.5/LICENSE.md | # The MIT License (MIT)
Copyright (c) 2019 Tim Santor
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
| PypiClean |
/aidenva-0.0.1-py3-none-any.whl/vaengine/firesmoke_va_6.py | import logging
import cv2
import numpy as np
from service.va_service import VAService
from models.classify import Classify
from misc import labelmap_util
from misc.firesmoke_detector_v4 import FiresmokeDetector
from misc.estimator.condition_estimate import ConditionEstimator
from misc.estimator.action_estimate import ActionFrameEstimate
tracelogger = logging.getLogger('trace')
# motion으로 탐지된 최대 box 개수
MOTION_MAX_NUM = 30
# num of classification batch size
CLASSIFICATION_IMG_SIZE = 20
CROP_EXPEND_RATIO = 0.5
class FiresmokeVA(VAService):
def __init__(self, config, va_type, min_threshold=0.5):
super().__init__(config, va_type)
self.classify_shape = 299
self.action_classify_shape = 224
self.classify = Classify(self.enabled, config, 'firesmoke', in_shape=(self.classify_shape, self.classify_shape))
self.action_classify = Classify(self.enabled, config, 'firesmoke_action', in_shape=(16, self.action_classify_shape, self.action_classify_shape))
self.min_score_threshold = config.getvalue(self.conf_prefix + 'min_score_threshold', min_threshold)
self.reload_conf = config.getbool(self.conf_prefix + 'reload_conf')
self.motion = {}
self.estimator = {}
self.action = {}
def _set_lables(self, path_to_labels):
labels = labelmap_util.create_categories_from_labelmap(path_to_labels)
return dict([(item['id'], item['name']) for item in labels])
def _execute(self, sc):
# channel 순서를 가지고 있는 image numpy
# list -> [ch_idx, image]
image_by_ch = sc.get_in_by_vatype(self.is_support_vatype_fc)
# 처리할 정보가 없는 경우 return
if len(image_by_ch) == 0: return sc
motion_inference = []
for _, image, ch_id, cfg_json in image_by_ch:
if ch_id not in self.motion:
self.motion[ch_id] = FiresmokeDetector(ch_id, self.reload_conf)
if ch_id not in self.estimator:
self.estimator[ch_id] = ConditionEstimator(image.shape[:2],
self.config.getvalue(self.conf_prefix + 'queue_size'),
self.config.getvalue(self.conf_prefix + 'activation_per_queue'),
self.config.getvalue(self.conf_prefix + 'iou_threshold'))
if ch_id not in self.action:
self.action[ch_id] = ActionFrameEstimate()
motion_inference.append(self.motion[ch_id].run_single_image(image, cfg_json))
# for debug
debug_dict = {}
for (_, _, ch_id, cfg_json), inference in zip (image_by_ch, motion_inference):
if self.motion[ch_id].debug:
if inference is not None:
debug_dict[ch_id] = [inference, [1 for i in range(len(inference))], ['fire' for i in range(len(inference))]]
motion_firesmoke = {}
for (_, image_np, ch_id, cfg_json), inference in zip (image_by_ch, motion_inference):
# preset 또는 화면 변화 시 근접인 경우 detection 으로 처리
if inference is None:
motion_firesmoke[ch_id] = [[], [], []]
else:
# motion 개수가 많은 경우 제외
if len(inference) > MOTION_MAX_NUM :
motion_firesmoke[ch_id] = [[], [], []]
else :
motion_firesmoke[ch_id] = [inference, [1 for i in range(len(inference))], ['fire' for i in range(len(inference))]]
crop_img_np = []
crop_box_np = []
inf_results = []
crop_per_channels = {}
for (_, firesmoke, ch_id, cfg_json) in image_by_ch:
# motion detection box를 이용하여 classification
motion_detect = motion_firesmoke[ch_id]
b, _, _ = motion_detect
self.action[ch_id].add_frame(firesmoke, b)
boxes, firesmoke_corp_np_list = self.expend_box_n_resize(firesmoke, motion_detect, self.classify_shape)
if len(firesmoke_corp_np_list) > 0 :
crop_img_np.extend(firesmoke_corp_np_list)
crop_box_np.extend(boxes)
crop_per_channels[ch_id] = len(firesmoke_corp_np_list)
if len(crop_img_np) > 0 :
if len(crop_img_np) < CLASSIFICATION_IMG_SIZE:
inf_results.extend(self.classify._inference(crop_img_np))
else:
for i in range(0, len(crop_img_np), CLASSIFICATION_IMG_SIZE):
c = crop_img_np[i: i + CLASSIFICATION_IMG_SIZE]
inf_results.extend(self.classify._inference(c))
idx = 0
# motion 및 detection 없으면 skip
for (seq, firesmoke, ch_id, cfg_json) in image_by_ch:
num = crop_per_channels[ch_id]
if num == 0:
sc.set_out_by_ch(self.va_type, seq, [[], [], []])
continue
# falldown class id 만 추출하여 response 정보 생성
# response format : boxes, score, class
if ch_id in debug_dict:
r = self.aggregate_classify_result(crop_box_np[idx:idx+num], inf_results[idx:idx+num], self.estimator[ch_id])
r[0] = r[0] + debug_dict[ch_id][0]
r[1] = r[1] + debug_dict[ch_id][1]
r[2] = r[2] + debug_dict[ch_id][2]
sc.set_out_by_ch(self.va_type, seq, r)
else:
# tracelogger.debug('slice ch %d : %d, %d', ch, idx, idx+num)
sc.set_out_by_ch(self.va_type, seq, self.fire_action(self.aggregate_classify_result(crop_box_np[idx:idx+num], inf_results[idx:idx+num], self.estimator[ch_id]), self.action[ch_id]))
idx = idx + num
return sc
def fire_action(self, target, action):
action_resulit = [[],[],[]]
for box, score, classes in zip(*target):
action_frame = []
frames = action.frames()
if len(frames) == 16:
crop_frame = []
box_t = tuple(box)
for idx, f in enumerate(frames):
crop, _ = self.__crop_expend_ares(f, box_t, 1.8)
crop = cv2.resize(crop, (self.action_classify_shape, self.action_classify_shape))
crop_frame.append(crop)
action_frame.append(np.stack(crop_frame, 0))
logits = self.action_classify._inference(action_frame)
for i, l in enumerate(logits):
prob = l[0:]
sorted_inds = [i[0] for i in sorted(enumerate(-prob), key=lambda x: x[1])]
# print(' ====> idx %d, score %.2f, FireSmoke score %.2f 2: %d-%.2f, 3: %d-%.2f' % (sorted_inds[0], prob[sorted_inds[0]] * 100, prob[101]* 100, sorted_inds[1], prob[sorted_inds[1]] * 100, sorted_inds[2], prob[sorted_inds[2]] * 100))
if sorted_inds[0] == 101 and ( prob[sorted_inds[0]] * 100 > 50):
action_resulit[0].append(box)
action_resulit[1].append(score)
action_resulit[2].append(classes)
return action_resulit
def aggregate_classify_result(self, boxes, inf_result, e):
detects = []
canditate = []
res_boxes = []
res_scores = []
res_classes = []
if inf_result is not None:
for box, logits in zip(boxes, inf_result):
prob = logits[0:]
sorted_inds = [i[0] for i in sorted(enumerate(-prob), key=lambda x: x[1])]
# if sorted_inds[0] < 2 and prob[sorted_inds[0]] > self.min_score_threshold: # 0: fire, 1: smoke, 2: unknown
if sorted_inds[0] > 0 and prob[sorted_inds[0]] > self.min_score_threshold: # 0: unknown, 1: fire, 2: smoke
# response format : boxes, score, class
res_boxes.append(box)
res_scores.append(prob[sorted_inds[0]])
res_classes.append(self.label_map[1])
canditate.append(box)
else:
detects.append(box)
find_conditions = e.estimate(detects, canditate)
f_boxes = []
f_scores = []
f_classes = []
# motion detection box취합
for fd_box in find_conditions:
find_flag = False
for f_box, f_score, f_classe in zip(res_boxes, res_scores, res_classes):
if np.array_equal(fd_box,f_box):
f_boxes.append(f_box)
f_scores.append(f_score)
f_classes.append(f_classe)
find_flag = True
break
if not find_flag:
f_boxes.append(fd_box)
f_scores.append(0.7)
f_classes.append(self.label_map[1])
return [f_boxes, f_scores, f_classes]
def expend_box_n_resize(self, image_np, detects, shape, ratio=CROP_EXPEND_RATIO):
crop_list = []
boxes, scores, classes = detects
for idx in range(len(boxes)):
box_t = tuple(boxes[idx])
crop, box = self.__crop_expend_ares(image_np, box_t, ratio)
crop = cv2.resize(crop, (shape, shape))
crop_list.append(crop)
return boxes, crop_list
def __crop_expend_ares(self, image_np, box, ratio=0, coordiante=True):
im_height, im_width = image_np.shape[:2]
ymin, xmin, ymax, xmax = box
if (coordiante):
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
if ratio > 0:
vh_ratio = (bottom - top) / (right - left)
hv_ratio = (right - left) / (bottom - top)
if vh_ratio > 0: # 세로가 긴 경우
width_ratio = int(((right - left) * (ratio * vh_ratio)) / 2)
height_ratio = int(((bottom - top) * ratio) / 2)
else:
width_ratio = int(((right - left) * ratio) / 2)
height_ratio = int(((bottom - top) * (ratio * hv_ratio)) / 2)
top = (top - height_ratio) if 0 < (top - height_ratio) else 0
bottom = (bottom + height_ratio) if im_height > (bottom + height_ratio) else im_height
left = (left - width_ratio) if 0 < (left - width_ratio) else 0
right = (right + width_ratio) if im_width > (right + width_ratio) else im_width
return image_np[int(top):int(bottom), int(left):int(right), :], (left, right, top, bottom) | PypiClean |
/pynmsnn-0.0.3-py3-none-any.whl/pyNM/cf_matrix.py | __author__ = 'Ajay Arunachalam'
__version__ = '0.0.1'
__date__ = '17.07.2021'
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def make_confusion_matrix(cf,
group_names=None,
categories='auto',
count=True,
percent=True,
cbar=True,
xyticks=True,
xyplotlabels=True,
sum_stats=True,
figsize=None,
cmap='Blues',
title=None):
'''
This function will make a pretty plot of an sklearn Confusion Matrix cm using a Seaborn heatmap visualization.
Arguments
---------
cf: confusion matrix to be passed in
group_names: List of strings that represent the labels row by row to be shown in each square.
categories: List of strings containing the categories to be displayed on the x,y axis. Default is 'auto'
count: If True, show the raw number in the confusion matrix. Default is True.
normalize: If True, show the proportions for each category. Default is True.
cbar: If True, show the color bar. The cbar values are based off the values in the confusion matrix.
Default is True.
xyticks: If True, show x and y ticks. Default is True.
xyplotlabels: If True, show 'True Label' and 'Predicted Label' on the figure. Default is True.
sum_stats: If True, display summary statistics below the figure. Default is True.
figsize: Tuple representing the figure size. Default will be the matplotlib rcParams value.
cmap: Colormap of the values displayed from matplotlib.pyplot.cm. Default is 'Blues'
See http://matplotlib.org/examples/color/colormaps_reference.html
title: Title for the heatmap. Default is None.
'''
# CODE TO GENERATE TEXT INSIDE EACH SQUARE
blanks = ['' for i in range(cf.size)]
if group_names and len(group_names)==cf.size:
group_labels = ["{}\n".format(value) for value in group_names]
else:
group_labels = blanks
if count:
group_counts = ["{0:0.0f}\n".format(value) for value in cf.flatten()]
else:
group_counts = blanks
if percent:
group_percentages = ["{0:.2%}".format(value) for value in cf.flatten()/np.sum(cf)]
else:
group_percentages = blanks
box_labels = [f"{v1}{v2}{v3}".strip() for v1, v2, v3 in zip(group_labels,group_counts,group_percentages)]
box_labels = np.asarray(box_labels).reshape(cf.shape[0],cf.shape[1])
# CODE TO GENERATE SUMMARY STATISTICS & TEXT FOR SUMMARY STATS
if sum_stats:
#Accuracy is sum of diagonal divided by total observations
accuracy = np.trace(cf) / float(np.sum(cf))
#if it is a binary confusion matrix, show some more stats
if len(cf)==2:
#Metrics for Binary Confusion Matrices
precision = cf[1,1] / sum(cf[:,1])
recall = cf[1,1] / sum(cf[1,:])
f1_score = 2*precision*recall / (precision + recall)
stats_text = "\n\nAccuracy={:0.3f}\nPrecision={:0.3f}\nRecall={:0.3f}\nF1 Score={:0.3f}".format(
accuracy,precision,recall,f1_score)
else:
stats_text = "\n\nAccuracy={:0.3f}".format(accuracy)
else:
stats_text = ""
# SET FIGURE PARAMETERS ACCORDING TO OTHER ARGUMENTS
if figsize==None:
#Get default figure size if not set
figsize = plt.rcParams.get('figure.figsize')
if xyticks==False:
#Do not show categories if xyticks is False
categories=False
# MAKE THE HEATMAP VISUALIZATION
plt.figure(figsize=figsize)
sns.heatmap(cf,annot=box_labels,fmt="",cmap=cmap,cbar=cbar,xticklabels=categories,yticklabels=categories)
if xyplotlabels:
plt.ylabel('True label')
plt.xlabel('Predicted label' + stats_text)
else:
plt.xlabel(stats_text)
if title:
plt.title(title) | PypiClean |
/aglite_test.core-0.7.0b20230314-py3-none-any.whl/autogluon/core/searcher/local_searcher.py | import logging
import pickle
from collections import OrderedDict
from ..space import Categorical, Space
__all__ = ['LocalSearcher']
logger = logging.getLogger(__name__)
class LocalSearcher(object):
"""Local Searcher (virtual class to inherit from if you are creating a custom Searcher).
Parameters
----------
search_space: dict
The configuration space to sample from. It contains the full
specification of the Hyperparameters with their priors
"""
def __init__(self, search_space: dict, reward_attribute: str = 'reward', **kwargs):
"""
:param search_space: Configuration space to sample from or search in
:param reward_attribute: Reward attribute passed to update.
Default: 'reward'
"""
self.search_space = search_space
self._results = OrderedDict()
self._reward_attribute = reward_attribute
self._params_static = self._get_params_static()
self._params_default = self._get_params_default(self._params_static)
self._params_order = list(self._params_default.keys())
self._params_cat_dict = self._get_params_cat_dict()
# FIXME: Consider removing
def configure_scheduler(self, scheduler):
"""
Some searchers need to obtain information from the scheduler they are
used with, in order to configure themselves.
This method has to be called before the searcher can be used.
The implementation here sets _reward_attribute for schedulers which
specify it.
Args:
scheduler: TaskScheduler
Scheduler the searcher is used with.
"""
from ..scheduler.seq_scheduler import LocalSequentialScheduler
if isinstance(scheduler, LocalSequentialScheduler):
self._reward_attribute = scheduler._reward_attr
@staticmethod
def _reward_while_pending():
"""Defines the reward value which is assigned to config, while it is pending."""
return float("-inf")
def get_config(self, **kwargs):
"""Function to sample a new configuration
This function is called inside TaskScheduler to query a new configuration
Args:
kwargs:
Extra information may be passed from scheduler to searcher
returns: (config, info_dict)
must return a valid configuration and a (possibly empty) info dict
"""
raise NotImplementedError(f'This function needs to be overwritten in {self.__class__.__name__}.')
def update(self, config: dict, **kwargs):
"""
Update the searcher with the newest metric report.
Will error if config contains unknown parameters, values outside the valid search space, or is missing parameters.
"""
reward = kwargs.get(self._reward_attribute, None)
assert reward is not None, "Missing reward attribute '{}'".format(self._reward_attribute)
self._add_result(config=config, result=reward)
def register_pending(self, config, milestone=None):
"""
Signals to searcher that evaluation for config has started, but not
yet finished, which allows model-based searchers to register this
evaluation as pending.
For multi-fidelity schedulers, milestone is the next milestone the
evaluation will attend, so that model registers (config, milestone)
as pending.
In general, the searcher may assume that update is called with that
config at a later time.
"""
pass
def evaluation_failed(self, config, **kwargs):
"""
Called by scheduler if an evaluation job for config failed. The
searcher should react appropriately (e.g., remove pending evaluations
for this config, and blacklist config).
"""
pass
def get_best_reward(self):
"""Calculates the reward (i.e. validation performance) produced by training under the best configuration identified so far.
Assumes higher reward values indicate better performance.
"""
if self._results:
return max(self._results.values())
return self._reward_while_pending()
def get_reward(self, config):
"""Calculates the reward (i.e. validation performance) produced by training with the given configuration.
"""
config_pkl = self._pickle_config(config=config)
assert config_pkl in self._results
return self._results[config_pkl]
def get_best_config(self):
"""Returns the best configuration found so far.
"""
if self._results:
config_pkl = max(self._results, key=self._results.get)
return self._unpickle_config(config_pkl=config_pkl)
else:
return dict()
def get_results(self, sort=True) -> list:
"""
Gets a list of results in the form (config, reward).
Parameters
----------
sort : bool, default = True
If True, sorts the configs in order from best to worst reward.
If False, config order is undefined.
"""
results = []
for config_pkl, reward in self._results.items():
config = self._unpickle_config(config_pkl=config_pkl)
results.append((config, reward))
if sort:
results = sorted(results, key=lambda x: x[1], reverse=True)
return results
def _get_params_static(self) -> dict:
"""
Gets a dictionary of static key values, where no search space is used and therefore the values are always the same in all configs.
"""
params_static = dict()
for key, val in self.search_space.items():
if not isinstance(val, Space):
params_static[key] = val
return params_static
def _get_params_default(self, params_static: dict) -> dict:
"""
Gets the default config by calling `val.default` on every search space parameter, plus the static key values.
"""
params_default = dict()
for key, val in self.search_space.items():
if isinstance(val, Space):
params_default[key] = val.default
params_default.update(params_static)
return params_default
def _get_params_cat_dict(self) -> dict:
"""
Gets the dictionary of pickled category value -> index mapping for Category search spaces.
This is used in `self._pickle_config` to map values to idx when pickling the config. This compresses the size of the pkl file.
When being later unpickled via `self._unpickle_config`, the idx can be used to get the key value via `self.search_space[key][idx]`.
"""
params_cat_dict = dict()
for key, val in self.search_space.items():
if isinstance(val, Categorical):
cat_map = dict()
for i, cat in enumerate(val.data):
cat_pkl = pickle.dumps(cat)
cat_map[cat_pkl] = i
params_cat_dict[key] = cat_map
return params_cat_dict
def _add_result(self, config: dict, result: float):
assert isinstance(result, (float, int)), f"result must be a float or int! Was instead {type(result)} | Value: {result}"
config_pkl = self._pickle_config(config=config)
self._results[config_pkl] = result
def _pickle_config(self, config: dict) -> bytes:
assert isinstance(config, dict), f"config must be a dict! Was instead {type(config)} | Value: {config}"
assert len(config) == len(self._params_order), f'Config length does not match expected params count!\n' \
f'Expected: {self._params_order}\n' \
f'Actual: {list(config.keys())}'
# Note: This code is commented out because it can be computationally and memory expensive if user sends large objects in search space, such as datasets.
"""
for key in self._params_static:
assert pickle.dumps(config[key]) == pickle.dumps(self._params_static[key]), \
f'Invalid config value for search space parameter "{key}" | Invalid Value: {config[key]} | Expected Value: {self._params_static[key]}'
"""
config_to_pkl = []
for key in self._params_order:
if key in self._params_static:
pass
elif key in self._params_cat_dict:
try:
cat_idx = self._params_cat_dict[key][pickle.dumps(config[key])]
except KeyError:
raise AssertionError(f'Invalid config value for search space parameter "{key}" | '
f'Invalid Value: {config[key]} | Valid Values: {self.search_space[key].data}')
config_to_pkl.append(cat_idx)
else:
config_to_pkl.append(config[key])
return pickle.dumps(config_to_pkl)
def _unpickle_config(self, config_pkl: bytes) -> dict:
assert isinstance(config_pkl, bytes), f"config_pkl must be a bytes object! Was instead {type(config_pkl)} | Value: {config_pkl}"
config_compressed = pickle.loads(config_pkl)
config = dict()
i = -1
for key in self._params_order:
if key in self._params_static:
config[key] = self._params_static[key]
else:
i += 1
val = config_compressed[i]
if key in self._params_cat_dict:
config[key] = self.search_space[key][val]
else:
config[key] = val
return config | PypiClean |
/DLex_DeaL-0.0.3-py3-none-any.whl/DLex/genfa.py | from .lexer import *
import argparse
from .GrParser import LexParser
from graphviz import Digraph
from . import LexIO
class FAVisualizer:
def __init__(self, nfa={}, dfa={}) -> None:
self.nfa = nfa
self.dfa = dfa
self.dot = Digraph(name="NFA", node_attr={
"shape": "circle",
"fontsize": "12",
"fontname": "Courier", "height": ".1"
},
edge_attr={"arrowsize": ".5"},
format="png",
graph_attr={"ranksep": ".3", "layout": "dot"})
def bfsN(self):
q = [Start]
vis = set(Start)
while(len(q) > 0):
u = q.pop(0)
for e in self.nfa.mp[u]:
for v in self.nfa.mp[u][e]:
if e == '\\':
e = '\\\\'
self.dot.edge(str(u), str(v), label=e)
if (v not in vis):
q.append(v)
vis.add(v)
for it in self.nfa.nodes:
if self.nfa.nodes[it].isEnd:
self.dot.node(str(it), shape='doublecircle')
def bfsD(self):
q = [self.dfa.root]
vis = {self.dfa.root}
while len(q) > 0:
u = q.pop(0)
for e in self.dfa.dfa[u]:
v = self.dfa.dfa[u][e]
if v not in vis:
vis.add(v)
q.append(v)
if e == '\\':
e = '\\\\'
self.dot.edge(str(u), str(v), label=e)
for it in self.dfa.ends:
self.dot.node(str(it), shape='doublecircle')
def main():
parser = argparse.ArgumentParser(
description='Draw the FA graph'
)
parser.add_argument('inputfile', help='rule file')
parser.add_argument(
'mode', help='NFA or DFA or sDFA (serialized DFA pickle file)')
args = parser.parse_args()
if(args.mode == 'NFA'):
text = open(args.inputfile, encoding='utf-8')
nfa = NFA()
LexParser().genNFA(nfa=nfa, text=text)
nfaVis = FAVisualizer(nfa=nfa)
nfaVis.bfsN()
nfaVis.dot.view('NFA')
elif(args.mode == 'DFA'):
text = open(args.inputfile, encoding='utf-8')
nfa = NFA()
LexParser().genNFA(nfa, text)
dfa = DFA(nfa)
dfaVis = FAVisualizer(dfa=dfa)
dfaVis.bfsD()
dfaVis.dot.view('DFA')
elif(args.mode == 'sDFA'):
dfa = LexIO.readPickle(args.inputfile)
dfaVis = FAVisualizer(dfa=dfa)
dfaVis.bfsD()
dfaVis.dot.view('DFA')
if __name__ == '__main__':
main() | PypiClean |
/pulumi_tls-5.1.0a1687758130.tar.gz/pulumi_tls-5.1.0a1687758130/pulumi_tls/locally_signed_cert.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['LocallySignedCertArgs', 'LocallySignedCert']
@pulumi.input_type
class LocallySignedCertArgs:
def __init__(__self__, *,
allowed_uses: pulumi.Input[Sequence[pulumi.Input[str]]],
ca_cert_pem: pulumi.Input[str],
ca_private_key_pem: pulumi.Input[str],
cert_request_pem: pulumi.Input[str],
validity_period_hours: pulumi.Input[int],
early_renewal_hours: Optional[pulumi.Input[int]] = None,
is_ca_certificate: Optional[pulumi.Input[bool]] = None,
set_subject_key_id: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a LocallySignedCert resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_uses: List of key usages allowed for the issued certificate. Values are defined in [RFC 5280](https://datatracker.ietf.org/doc/html/rfc5280) and combine flags defined by both [Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.3) and [Extended Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.12). Accepted values: `any_extended`, `cert_signing`, `client_auth`, `code_signing`, `content_commitment`, `crl_signing`, `data_encipherment`, `decipher_only`, `digital_signature`, `email_protection`, `encipher_only`, `ipsec_end_system`, `ipsec_tunnel`, `ipsec_user`, `key_agreement`, `key_encipherment`, `microsoft_commercial_code_signing`, `microsoft_kernel_code_signing`, `microsoft_server_gated_crypto`, `netscape_server_gated_crypto`, `ocsp_signing`, `server_auth`, `timestamping`.
:param pulumi.Input[str] ca_cert_pem: Certificate data of the Certificate Authority (CA) in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
:param pulumi.Input[str] ca_private_key_pem: Private key of the Certificate Authority (CA) used to sign the certificate, in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
:param pulumi.Input[str] cert_request_pem: Certificate request data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
:param pulumi.Input[int] validity_period_hours: Number of hours, after initial issuing, that the certificate will remain valid for.
:param pulumi.Input[int] early_renewal_hours: The resource will consider the certificate to have expired the given number of hours before its actual expiry time. This
can be useful to deploy an updated certificate in advance of the expiration of the current certificate. However, the old
certificate remains valid until its true expiration time, since this resource does not (and cannot) support certificate
revocation. Also, this advance update can only be performed should the Terraform configuration be applied during the
early renewal period. (default: `0`)
:param pulumi.Input[bool] is_ca_certificate: Is the generated certificate representing a Certificate Authority (CA) (default: `false`).
:param pulumi.Input[bool] set_subject_key_id: Should the generated certificate include a [subject key identifier](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2) (default: `false`).
"""
pulumi.set(__self__, "allowed_uses", allowed_uses)
pulumi.set(__self__, "ca_cert_pem", ca_cert_pem)
pulumi.set(__self__, "ca_private_key_pem", ca_private_key_pem)
pulumi.set(__self__, "cert_request_pem", cert_request_pem)
pulumi.set(__self__, "validity_period_hours", validity_period_hours)
if early_renewal_hours is not None:
pulumi.set(__self__, "early_renewal_hours", early_renewal_hours)
if is_ca_certificate is not None:
pulumi.set(__self__, "is_ca_certificate", is_ca_certificate)
if set_subject_key_id is not None:
pulumi.set(__self__, "set_subject_key_id", set_subject_key_id)
@property
@pulumi.getter(name="allowedUses")
def allowed_uses(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of key usages allowed for the issued certificate. Values are defined in [RFC 5280](https://datatracker.ietf.org/doc/html/rfc5280) and combine flags defined by both [Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.3) and [Extended Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.12). Accepted values: `any_extended`, `cert_signing`, `client_auth`, `code_signing`, `content_commitment`, `crl_signing`, `data_encipherment`, `decipher_only`, `digital_signature`, `email_protection`, `encipher_only`, `ipsec_end_system`, `ipsec_tunnel`, `ipsec_user`, `key_agreement`, `key_encipherment`, `microsoft_commercial_code_signing`, `microsoft_kernel_code_signing`, `microsoft_server_gated_crypto`, `netscape_server_gated_crypto`, `ocsp_signing`, `server_auth`, `timestamping`.
"""
return pulumi.get(self, "allowed_uses")
@allowed_uses.setter
def allowed_uses(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "allowed_uses", value)
@property
@pulumi.getter(name="caCertPem")
def ca_cert_pem(self) -> pulumi.Input[str]:
"""
Certificate data of the Certificate Authority (CA) in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
"""
return pulumi.get(self, "ca_cert_pem")
@ca_cert_pem.setter
def ca_cert_pem(self, value: pulumi.Input[str]):
pulumi.set(self, "ca_cert_pem", value)
@property
@pulumi.getter(name="caPrivateKeyPem")
def ca_private_key_pem(self) -> pulumi.Input[str]:
"""
Private key of the Certificate Authority (CA) used to sign the certificate, in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
"""
return pulumi.get(self, "ca_private_key_pem")
@ca_private_key_pem.setter
def ca_private_key_pem(self, value: pulumi.Input[str]):
pulumi.set(self, "ca_private_key_pem", value)
@property
@pulumi.getter(name="certRequestPem")
def cert_request_pem(self) -> pulumi.Input[str]:
"""
Certificate request data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
"""
return pulumi.get(self, "cert_request_pem")
@cert_request_pem.setter
def cert_request_pem(self, value: pulumi.Input[str]):
pulumi.set(self, "cert_request_pem", value)
@property
@pulumi.getter(name="validityPeriodHours")
def validity_period_hours(self) -> pulumi.Input[int]:
"""
Number of hours, after initial issuing, that the certificate will remain valid for.
"""
return pulumi.get(self, "validity_period_hours")
@validity_period_hours.setter
def validity_period_hours(self, value: pulumi.Input[int]):
pulumi.set(self, "validity_period_hours", value)
@property
@pulumi.getter(name="earlyRenewalHours")
def early_renewal_hours(self) -> Optional[pulumi.Input[int]]:
"""
The resource will consider the certificate to have expired the given number of hours before its actual expiry time. This
can be useful to deploy an updated certificate in advance of the expiration of the current certificate. However, the old
certificate remains valid until its true expiration time, since this resource does not (and cannot) support certificate
revocation. Also, this advance update can only be performed should the Terraform configuration be applied during the
early renewal period. (default: `0`)
"""
return pulumi.get(self, "early_renewal_hours")
@early_renewal_hours.setter
def early_renewal_hours(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "early_renewal_hours", value)
@property
@pulumi.getter(name="isCaCertificate")
def is_ca_certificate(self) -> Optional[pulumi.Input[bool]]:
"""
Is the generated certificate representing a Certificate Authority (CA) (default: `false`).
"""
return pulumi.get(self, "is_ca_certificate")
@is_ca_certificate.setter
def is_ca_certificate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_ca_certificate", value)
@property
@pulumi.getter(name="setSubjectKeyId")
def set_subject_key_id(self) -> Optional[pulumi.Input[bool]]:
"""
Should the generated certificate include a [subject key identifier](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2) (default: `false`).
"""
return pulumi.get(self, "set_subject_key_id")
@set_subject_key_id.setter
def set_subject_key_id(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "set_subject_key_id", value)
@pulumi.input_type
class _LocallySignedCertState:
def __init__(__self__, *,
allowed_uses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ca_cert_pem: Optional[pulumi.Input[str]] = None,
ca_key_algorithm: Optional[pulumi.Input[str]] = None,
ca_private_key_pem: Optional[pulumi.Input[str]] = None,
cert_pem: Optional[pulumi.Input[str]] = None,
cert_request_pem: Optional[pulumi.Input[str]] = None,
early_renewal_hours: Optional[pulumi.Input[int]] = None,
is_ca_certificate: Optional[pulumi.Input[bool]] = None,
ready_for_renewal: Optional[pulumi.Input[bool]] = None,
set_subject_key_id: Optional[pulumi.Input[bool]] = None,
validity_end_time: Optional[pulumi.Input[str]] = None,
validity_period_hours: Optional[pulumi.Input[int]] = None,
validity_start_time: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering LocallySignedCert resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_uses: List of key usages allowed for the issued certificate. Values are defined in [RFC 5280](https://datatracker.ietf.org/doc/html/rfc5280) and combine flags defined by both [Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.3) and [Extended Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.12). Accepted values: `any_extended`, `cert_signing`, `client_auth`, `code_signing`, `content_commitment`, `crl_signing`, `data_encipherment`, `decipher_only`, `digital_signature`, `email_protection`, `encipher_only`, `ipsec_end_system`, `ipsec_tunnel`, `ipsec_user`, `key_agreement`, `key_encipherment`, `microsoft_commercial_code_signing`, `microsoft_kernel_code_signing`, `microsoft_server_gated_crypto`, `netscape_server_gated_crypto`, `ocsp_signing`, `server_auth`, `timestamping`.
:param pulumi.Input[str] ca_cert_pem: Certificate data of the Certificate Authority (CA) in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
:param pulumi.Input[str] ca_key_algorithm: Name of the algorithm used when generating the private key provided in `ca_private_key_pem`.
:param pulumi.Input[str] ca_private_key_pem: Private key of the Certificate Authority (CA) used to sign the certificate, in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
:param pulumi.Input[str] cert_pem: Certificate data in PEM (RFC 1421).
:param pulumi.Input[str] cert_request_pem: Certificate request data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
:param pulumi.Input[int] early_renewal_hours: The resource will consider the certificate to have expired the given number of hours before its actual expiry time. This
can be useful to deploy an updated certificate in advance of the expiration of the current certificate. However, the old
certificate remains valid until its true expiration time, since this resource does not (and cannot) support certificate
revocation. Also, this advance update can only be performed should the Terraform configuration be applied during the
early renewal period. (default: `0`)
:param pulumi.Input[bool] is_ca_certificate: Is the generated certificate representing a Certificate Authority (CA) (default: `false`).
:param pulumi.Input[bool] ready_for_renewal: Is the certificate either expired (i.e. beyond the `validity_period_hours`) or ready for an early renewal (i.e. within the `early_renewal_hours`)?
:param pulumi.Input[bool] set_subject_key_id: Should the generated certificate include a [subject key identifier](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2) (default: `false`).
:param pulumi.Input[str] validity_end_time: The time until which the certificate is invalid, expressed as an [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
:param pulumi.Input[int] validity_period_hours: Number of hours, after initial issuing, that the certificate will remain valid for.
:param pulumi.Input[str] validity_start_time: The time after which the certificate is valid, expressed as an [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
"""
if allowed_uses is not None:
pulumi.set(__self__, "allowed_uses", allowed_uses)
if ca_cert_pem is not None:
pulumi.set(__self__, "ca_cert_pem", ca_cert_pem)
if ca_key_algorithm is not None:
pulumi.set(__self__, "ca_key_algorithm", ca_key_algorithm)
if ca_private_key_pem is not None:
pulumi.set(__self__, "ca_private_key_pem", ca_private_key_pem)
if cert_pem is not None:
pulumi.set(__self__, "cert_pem", cert_pem)
if cert_request_pem is not None:
pulumi.set(__self__, "cert_request_pem", cert_request_pem)
if early_renewal_hours is not None:
pulumi.set(__self__, "early_renewal_hours", early_renewal_hours)
if is_ca_certificate is not None:
pulumi.set(__self__, "is_ca_certificate", is_ca_certificate)
if ready_for_renewal is not None:
pulumi.set(__self__, "ready_for_renewal", ready_for_renewal)
if set_subject_key_id is not None:
pulumi.set(__self__, "set_subject_key_id", set_subject_key_id)
if validity_end_time is not None:
pulumi.set(__self__, "validity_end_time", validity_end_time)
if validity_period_hours is not None:
pulumi.set(__self__, "validity_period_hours", validity_period_hours)
if validity_start_time is not None:
pulumi.set(__self__, "validity_start_time", validity_start_time)
@property
@pulumi.getter(name="allowedUses")
def allowed_uses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of key usages allowed for the issued certificate. Values are defined in [RFC 5280](https://datatracker.ietf.org/doc/html/rfc5280) and combine flags defined by both [Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.3) and [Extended Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.12). Accepted values: `any_extended`, `cert_signing`, `client_auth`, `code_signing`, `content_commitment`, `crl_signing`, `data_encipherment`, `decipher_only`, `digital_signature`, `email_protection`, `encipher_only`, `ipsec_end_system`, `ipsec_tunnel`, `ipsec_user`, `key_agreement`, `key_encipherment`, `microsoft_commercial_code_signing`, `microsoft_kernel_code_signing`, `microsoft_server_gated_crypto`, `netscape_server_gated_crypto`, `ocsp_signing`, `server_auth`, `timestamping`.
"""
return pulumi.get(self, "allowed_uses")
@allowed_uses.setter
def allowed_uses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "allowed_uses", value)
@property
@pulumi.getter(name="caCertPem")
def ca_cert_pem(self) -> Optional[pulumi.Input[str]]:
"""
Certificate data of the Certificate Authority (CA) in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
"""
return pulumi.get(self, "ca_cert_pem")
@ca_cert_pem.setter
def ca_cert_pem(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ca_cert_pem", value)
@property
@pulumi.getter(name="caKeyAlgorithm")
def ca_key_algorithm(self) -> Optional[pulumi.Input[str]]:
"""
Name of the algorithm used when generating the private key provided in `ca_private_key_pem`.
"""
return pulumi.get(self, "ca_key_algorithm")
@ca_key_algorithm.setter
def ca_key_algorithm(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ca_key_algorithm", value)
@property
@pulumi.getter(name="caPrivateKeyPem")
def ca_private_key_pem(self) -> Optional[pulumi.Input[str]]:
"""
Private key of the Certificate Authority (CA) used to sign the certificate, in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
"""
return pulumi.get(self, "ca_private_key_pem")
@ca_private_key_pem.setter
def ca_private_key_pem(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ca_private_key_pem", value)
@property
@pulumi.getter(name="certPem")
def cert_pem(self) -> Optional[pulumi.Input[str]]:
"""
Certificate data in PEM (RFC 1421).
"""
return pulumi.get(self, "cert_pem")
@cert_pem.setter
def cert_pem(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cert_pem", value)
@property
@pulumi.getter(name="certRequestPem")
def cert_request_pem(self) -> Optional[pulumi.Input[str]]:
"""
Certificate request data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
"""
return pulumi.get(self, "cert_request_pem")
@cert_request_pem.setter
def cert_request_pem(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cert_request_pem", value)
@property
@pulumi.getter(name="earlyRenewalHours")
def early_renewal_hours(self) -> Optional[pulumi.Input[int]]:
"""
The resource will consider the certificate to have expired the given number of hours before its actual expiry time. This
can be useful to deploy an updated certificate in advance of the expiration of the current certificate. However, the old
certificate remains valid until its true expiration time, since this resource does not (and cannot) support certificate
revocation. Also, this advance update can only be performed should the Terraform configuration be applied during the
early renewal period. (default: `0`)
"""
return pulumi.get(self, "early_renewal_hours")
@early_renewal_hours.setter
def early_renewal_hours(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "early_renewal_hours", value)
@property
@pulumi.getter(name="isCaCertificate")
def is_ca_certificate(self) -> Optional[pulumi.Input[bool]]:
"""
Is the generated certificate representing a Certificate Authority (CA) (default: `false`).
"""
return pulumi.get(self, "is_ca_certificate")
@is_ca_certificate.setter
def is_ca_certificate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_ca_certificate", value)
@property
@pulumi.getter(name="readyForRenewal")
def ready_for_renewal(self) -> Optional[pulumi.Input[bool]]:
"""
Is the certificate either expired (i.e. beyond the `validity_period_hours`) or ready for an early renewal (i.e. within the `early_renewal_hours`)?
"""
return pulumi.get(self, "ready_for_renewal")
@ready_for_renewal.setter
def ready_for_renewal(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "ready_for_renewal", value)
@property
@pulumi.getter(name="setSubjectKeyId")
def set_subject_key_id(self) -> Optional[pulumi.Input[bool]]:
"""
Should the generated certificate include a [subject key identifier](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2) (default: `false`).
"""
return pulumi.get(self, "set_subject_key_id")
@set_subject_key_id.setter
def set_subject_key_id(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "set_subject_key_id", value)
@property
@pulumi.getter(name="validityEndTime")
def validity_end_time(self) -> Optional[pulumi.Input[str]]:
"""
The time until which the certificate is invalid, expressed as an [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
"""
return pulumi.get(self, "validity_end_time")
@validity_end_time.setter
def validity_end_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "validity_end_time", value)
@property
@pulumi.getter(name="validityPeriodHours")
def validity_period_hours(self) -> Optional[pulumi.Input[int]]:
"""
Number of hours, after initial issuing, that the certificate will remain valid for.
"""
return pulumi.get(self, "validity_period_hours")
@validity_period_hours.setter
def validity_period_hours(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "validity_period_hours", value)
@property
@pulumi.getter(name="validityStartTime")
def validity_start_time(self) -> Optional[pulumi.Input[str]]:
"""
The time after which the certificate is valid, expressed as an [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
"""
return pulumi.get(self, "validity_start_time")
@validity_start_time.setter
def validity_start_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "validity_start_time", value)
class LocallySignedCert(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_uses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ca_cert_pem: Optional[pulumi.Input[str]] = None,
ca_private_key_pem: Optional[pulumi.Input[str]] = None,
cert_request_pem: Optional[pulumi.Input[str]] = None,
early_renewal_hours: Optional[pulumi.Input[int]] = None,
is_ca_certificate: Optional[pulumi.Input[bool]] = None,
set_subject_key_id: Optional[pulumi.Input[bool]] = None,
validity_period_hours: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Create a LocallySignedCert resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_uses: List of key usages allowed for the issued certificate. Values are defined in [RFC 5280](https://datatracker.ietf.org/doc/html/rfc5280) and combine flags defined by both [Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.3) and [Extended Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.12). Accepted values: `any_extended`, `cert_signing`, `client_auth`, `code_signing`, `content_commitment`, `crl_signing`, `data_encipherment`, `decipher_only`, `digital_signature`, `email_protection`, `encipher_only`, `ipsec_end_system`, `ipsec_tunnel`, `ipsec_user`, `key_agreement`, `key_encipherment`, `microsoft_commercial_code_signing`, `microsoft_kernel_code_signing`, `microsoft_server_gated_crypto`, `netscape_server_gated_crypto`, `ocsp_signing`, `server_auth`, `timestamping`.
:param pulumi.Input[str] ca_cert_pem: Certificate data of the Certificate Authority (CA) in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
:param pulumi.Input[str] ca_private_key_pem: Private key of the Certificate Authority (CA) used to sign the certificate, in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
:param pulumi.Input[str] cert_request_pem: Certificate request data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
:param pulumi.Input[int] early_renewal_hours: The resource will consider the certificate to have expired the given number of hours before its actual expiry time. This
can be useful to deploy an updated certificate in advance of the expiration of the current certificate. However, the old
certificate remains valid until its true expiration time, since this resource does not (and cannot) support certificate
revocation. Also, this advance update can only be performed should the Terraform configuration be applied during the
early renewal period. (default: `0`)
:param pulumi.Input[bool] is_ca_certificate: Is the generated certificate representing a Certificate Authority (CA) (default: `false`).
:param pulumi.Input[bool] set_subject_key_id: Should the generated certificate include a [subject key identifier](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2) (default: `false`).
:param pulumi.Input[int] validity_period_hours: Number of hours, after initial issuing, that the certificate will remain valid for.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LocallySignedCertArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a LocallySignedCert resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param LocallySignedCertArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LocallySignedCertArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allowed_uses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ca_cert_pem: Optional[pulumi.Input[str]] = None,
ca_private_key_pem: Optional[pulumi.Input[str]] = None,
cert_request_pem: Optional[pulumi.Input[str]] = None,
early_renewal_hours: Optional[pulumi.Input[int]] = None,
is_ca_certificate: Optional[pulumi.Input[bool]] = None,
set_subject_key_id: Optional[pulumi.Input[bool]] = None,
validity_period_hours: Optional[pulumi.Input[int]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LocallySignedCertArgs.__new__(LocallySignedCertArgs)
if allowed_uses is None and not opts.urn:
raise TypeError("Missing required property 'allowed_uses'")
__props__.__dict__["allowed_uses"] = allowed_uses
if ca_cert_pem is None and not opts.urn:
raise TypeError("Missing required property 'ca_cert_pem'")
__props__.__dict__["ca_cert_pem"] = ca_cert_pem
if ca_private_key_pem is None and not opts.urn:
raise TypeError("Missing required property 'ca_private_key_pem'")
__props__.__dict__["ca_private_key_pem"] = None if ca_private_key_pem is None else pulumi.Output.secret(ca_private_key_pem)
if cert_request_pem is None and not opts.urn:
raise TypeError("Missing required property 'cert_request_pem'")
__props__.__dict__["cert_request_pem"] = cert_request_pem
__props__.__dict__["early_renewal_hours"] = early_renewal_hours
__props__.__dict__["is_ca_certificate"] = is_ca_certificate
__props__.__dict__["set_subject_key_id"] = set_subject_key_id
if validity_period_hours is None and not opts.urn:
raise TypeError("Missing required property 'validity_period_hours'")
__props__.__dict__["validity_period_hours"] = validity_period_hours
__props__.__dict__["ca_key_algorithm"] = None
__props__.__dict__["cert_pem"] = None
__props__.__dict__["ready_for_renewal"] = None
__props__.__dict__["validity_end_time"] = None
__props__.__dict__["validity_start_time"] = None
secret_opts = pulumi.ResourceOptions(additional_secret_outputs=["caPrivateKeyPem"])
opts = pulumi.ResourceOptions.merge(opts, secret_opts)
super(LocallySignedCert, __self__).__init__(
'tls:index/locallySignedCert:LocallySignedCert',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allowed_uses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ca_cert_pem: Optional[pulumi.Input[str]] = None,
ca_key_algorithm: Optional[pulumi.Input[str]] = None,
ca_private_key_pem: Optional[pulumi.Input[str]] = None,
cert_pem: Optional[pulumi.Input[str]] = None,
cert_request_pem: Optional[pulumi.Input[str]] = None,
early_renewal_hours: Optional[pulumi.Input[int]] = None,
is_ca_certificate: Optional[pulumi.Input[bool]] = None,
ready_for_renewal: Optional[pulumi.Input[bool]] = None,
set_subject_key_id: Optional[pulumi.Input[bool]] = None,
validity_end_time: Optional[pulumi.Input[str]] = None,
validity_period_hours: Optional[pulumi.Input[int]] = None,
validity_start_time: Optional[pulumi.Input[str]] = None) -> 'LocallySignedCert':
"""
Get an existing LocallySignedCert resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] allowed_uses: List of key usages allowed for the issued certificate. Values are defined in [RFC 5280](https://datatracker.ietf.org/doc/html/rfc5280) and combine flags defined by both [Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.3) and [Extended Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.12). Accepted values: `any_extended`, `cert_signing`, `client_auth`, `code_signing`, `content_commitment`, `crl_signing`, `data_encipherment`, `decipher_only`, `digital_signature`, `email_protection`, `encipher_only`, `ipsec_end_system`, `ipsec_tunnel`, `ipsec_user`, `key_agreement`, `key_encipherment`, `microsoft_commercial_code_signing`, `microsoft_kernel_code_signing`, `microsoft_server_gated_crypto`, `netscape_server_gated_crypto`, `ocsp_signing`, `server_auth`, `timestamping`.
:param pulumi.Input[str] ca_cert_pem: Certificate data of the Certificate Authority (CA) in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
:param pulumi.Input[str] ca_key_algorithm: Name of the algorithm used when generating the private key provided in `ca_private_key_pem`.
:param pulumi.Input[str] ca_private_key_pem: Private key of the Certificate Authority (CA) used to sign the certificate, in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
:param pulumi.Input[str] cert_pem: Certificate data in PEM (RFC 1421).
:param pulumi.Input[str] cert_request_pem: Certificate request data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
:param pulumi.Input[int] early_renewal_hours: The resource will consider the certificate to have expired the given number of hours before its actual expiry time. This
can be useful to deploy an updated certificate in advance of the expiration of the current certificate. However, the old
certificate remains valid until its true expiration time, since this resource does not (and cannot) support certificate
revocation. Also, this advance update can only be performed should the Terraform configuration be applied during the
early renewal period. (default: `0`)
:param pulumi.Input[bool] is_ca_certificate: Is the generated certificate representing a Certificate Authority (CA) (default: `false`).
:param pulumi.Input[bool] ready_for_renewal: Is the certificate either expired (i.e. beyond the `validity_period_hours`) or ready for an early renewal (i.e. within the `early_renewal_hours`)?
:param pulumi.Input[bool] set_subject_key_id: Should the generated certificate include a [subject key identifier](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2) (default: `false`).
:param pulumi.Input[str] validity_end_time: The time until which the certificate is invalid, expressed as an [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
:param pulumi.Input[int] validity_period_hours: Number of hours, after initial issuing, that the certificate will remain valid for.
:param pulumi.Input[str] validity_start_time: The time after which the certificate is valid, expressed as an [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LocallySignedCertState.__new__(_LocallySignedCertState)
__props__.__dict__["allowed_uses"] = allowed_uses
__props__.__dict__["ca_cert_pem"] = ca_cert_pem
__props__.__dict__["ca_key_algorithm"] = ca_key_algorithm
__props__.__dict__["ca_private_key_pem"] = ca_private_key_pem
__props__.__dict__["cert_pem"] = cert_pem
__props__.__dict__["cert_request_pem"] = cert_request_pem
__props__.__dict__["early_renewal_hours"] = early_renewal_hours
__props__.__dict__["is_ca_certificate"] = is_ca_certificate
__props__.__dict__["ready_for_renewal"] = ready_for_renewal
__props__.__dict__["set_subject_key_id"] = set_subject_key_id
__props__.__dict__["validity_end_time"] = validity_end_time
__props__.__dict__["validity_period_hours"] = validity_period_hours
__props__.__dict__["validity_start_time"] = validity_start_time
return LocallySignedCert(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowedUses")
def allowed_uses(self) -> pulumi.Output[Sequence[str]]:
"""
List of key usages allowed for the issued certificate. Values are defined in [RFC 5280](https://datatracker.ietf.org/doc/html/rfc5280) and combine flags defined by both [Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.3) and [Extended Key Usages](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.12). Accepted values: `any_extended`, `cert_signing`, `client_auth`, `code_signing`, `content_commitment`, `crl_signing`, `data_encipherment`, `decipher_only`, `digital_signature`, `email_protection`, `encipher_only`, `ipsec_end_system`, `ipsec_tunnel`, `ipsec_user`, `key_agreement`, `key_encipherment`, `microsoft_commercial_code_signing`, `microsoft_kernel_code_signing`, `microsoft_server_gated_crypto`, `netscape_server_gated_crypto`, `ocsp_signing`, `server_auth`, `timestamping`.
"""
return pulumi.get(self, "allowed_uses")
@property
@pulumi.getter(name="caCertPem")
def ca_cert_pem(self) -> pulumi.Output[str]:
"""
Certificate data of the Certificate Authority (CA) in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
"""
return pulumi.get(self, "ca_cert_pem")
@property
@pulumi.getter(name="caKeyAlgorithm")
def ca_key_algorithm(self) -> pulumi.Output[str]:
"""
Name of the algorithm used when generating the private key provided in `ca_private_key_pem`.
"""
return pulumi.get(self, "ca_key_algorithm")
@property
@pulumi.getter(name="caPrivateKeyPem")
def ca_private_key_pem(self) -> pulumi.Output[str]:
"""
Private key of the Certificate Authority (CA) used to sign the certificate, in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
"""
return pulumi.get(self, "ca_private_key_pem")
@property
@pulumi.getter(name="certPem")
def cert_pem(self) -> pulumi.Output[str]:
"""
Certificate data in PEM (RFC 1421).
"""
return pulumi.get(self, "cert_pem")
@property
@pulumi.getter(name="certRequestPem")
def cert_request_pem(self) -> pulumi.Output[str]:
"""
Certificate request data in [PEM (RFC 1421)](https://datatracker.ietf.org/doc/html/rfc1421) format.
"""
return pulumi.get(self, "cert_request_pem")
@property
@pulumi.getter(name="earlyRenewalHours")
def early_renewal_hours(self) -> pulumi.Output[int]:
"""
The resource will consider the certificate to have expired the given number of hours before its actual expiry time. This
can be useful to deploy an updated certificate in advance of the expiration of the current certificate. However, the old
certificate remains valid until its true expiration time, since this resource does not (and cannot) support certificate
revocation. Also, this advance update can only be performed should the Terraform configuration be applied during the
early renewal period. (default: `0`)
"""
return pulumi.get(self, "early_renewal_hours")
@property
@pulumi.getter(name="isCaCertificate")
def is_ca_certificate(self) -> pulumi.Output[bool]:
"""
Is the generated certificate representing a Certificate Authority (CA) (default: `false`).
"""
return pulumi.get(self, "is_ca_certificate")
@property
@pulumi.getter(name="readyForRenewal")
def ready_for_renewal(self) -> pulumi.Output[bool]:
"""
Is the certificate either expired (i.e. beyond the `validity_period_hours`) or ready for an early renewal (i.e. within the `early_renewal_hours`)?
"""
return pulumi.get(self, "ready_for_renewal")
@property
@pulumi.getter(name="setSubjectKeyId")
def set_subject_key_id(self) -> pulumi.Output[bool]:
"""
Should the generated certificate include a [subject key identifier](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2) (default: `false`).
"""
return pulumi.get(self, "set_subject_key_id")
@property
@pulumi.getter(name="validityEndTime")
def validity_end_time(self) -> pulumi.Output[str]:
"""
The time until which the certificate is invalid, expressed as an [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
"""
return pulumi.get(self, "validity_end_time")
@property
@pulumi.getter(name="validityPeriodHours")
def validity_period_hours(self) -> pulumi.Output[int]:
"""
Number of hours, after initial issuing, that the certificate will remain valid for.
"""
return pulumi.get(self, "validity_period_hours")
@property
@pulumi.getter(name="validityStartTime")
def validity_start_time(self) -> pulumi.Output[str]:
"""
The time after which the certificate is valid, expressed as an [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
"""
return pulumi.get(self, "validity_start_time") | PypiClean |
/hikari_tanjun-2.17.0.tar.gz/hikari_tanjun-2.17.0/tanjun/context/menu.py | """Menu context implementation."""
from __future__ import annotations
__all__: list[str] = ["MenuContext"]
import typing
import hikari
from .. import _internal
from .. import abc as tanjun
from . import slash
if typing.TYPE_CHECKING:
import asyncio
from collections import abc as collections
from typing_extensions import Self
_T = typing.TypeVar("_T")
_ResponseTypeT = typing.Union[
hikari.api.InteractionMessageBuilder, hikari.api.InteractionDeferredBuilder, hikari.api.InteractionModalBuilder
]
_VALID_TYPES: frozenset[typing.Literal[hikari.CommandType.USER, hikari.CommandType.MESSAGE]] = frozenset(
[hikari.CommandType.USER, hikari.CommandType.MESSAGE]
)
class MenuContext(slash.AppCommandContext, tanjun.MenuContext):
"""Standard menu command execution context."""
__slots__ = ("_command", "_marked_not_found", "_on_not_found")
def __init__(
self,
client: tanjun.Client,
interaction: hikari.CommandInteraction,
register_task: collections.Callable[[asyncio.Task[typing.Any]], None],
*,
default_to_ephemeral: bool = False,
future: typing.Optional[asyncio.Future[_ResponseTypeT]] = None,
on_not_found: typing.Optional[collections.Callable[[tanjun.MenuContext], collections.Awaitable[None]]] = None,
) -> None:
"""Initialise a menu command context.
Parameters
----------
client
The Tanjun client this context is bound to.
interaction
The command interaction this context is for.
register_task
Callback used to register long-running tasks spawned by this context.
future
A future used to set the initial response if this is being called
through the REST webhook flow.
default_to_ephemeral
Whether to default to ephemeral responses.
on_not_found
Callback used to indicate no matching command was found.
"""
super().__init__(client, interaction, register_task, default_to_ephemeral=default_to_ephemeral, future=future)
self._command: typing.Optional[tanjun.MenuCommand[typing.Any, typing.Any]] = None
self._marked_not_found = False
self._on_not_found = on_not_found
self._set_type_special_case(tanjun.MenuContext, self)._set_type_special_case(MenuContext, self)
@property
def command(self) -> typing.Optional[tanjun.MenuCommand[typing.Any, typing.Any]]:
# <<inherited docstring from tanjun.abc.MenuContext>>.
return self._command
@property
def target_id(self) -> hikari.Snowflake:
# <<inherited docstring from tanjun.abc.MenuContext>>.
assert self._interaction.resolved
mapping = self._interaction.resolved.users or self._interaction.resolved.messages
if not mapping:
raise RuntimeError("Unknown menu type")
return next(iter(mapping.keys()))
@property
def target(self) -> typing.Union[hikari.InteractionMember, hikari.User, hikari.Message]:
# <<inherited docstring from tanjun.abc.MenuContext>>.
assert self._interaction.resolved
mapping = (
self._interaction.resolved.members
or self._interaction.resolved.users
or self._interaction.resolved.messages
)
if not mapping:
raise RuntimeError("Unknown menu type")
return next(iter(mapping.values()))
@property
def triggering_name(self) -> str:
# <<inherited docstring from tanjun.abc.Context>>.
return self._interaction.command_name
@property
def type(self) -> typing.Literal[hikari.CommandType.USER, hikari.CommandType.MESSAGE]:
# <<inherited docstring from tanjun.abc.MenuContext>>.
command_type = hikari.CommandType(self._interaction.command_type)
assert command_type in _VALID_TYPES
return command_type
async def mark_not_found(self) -> None:
# <<inherited docstring from tanjun.abc.AppCommandContext>>.
# TODO: assert not finalised?
if self._on_not_found and not self._marked_not_found:
self._marked_not_found = True
await self._on_not_found(self)
def set_command(self, command: typing.Optional[tanjun.MenuCommand[typing.Any, typing.Any]], /) -> Self:
# <<inherited docstring from tanjun.abc.MenuContext>>.
if command:
self._set_type_special_case(tanjun.MenuCommand, command)
elif self._command:
self._remove_type_special_case(tanjun.MenuContext)
self._command = command
return self
@typing.overload
def resolve_to_member(self) -> hikari.InteractionMember:
...
@typing.overload
def resolve_to_member(self, *, default: _T) -> typing.Union[hikari.InteractionMember, _T]:
...
def resolve_to_member(
self, *, default: typing.Union[_T, _internal.Default] = _internal.DEFAULT
) -> typing.Union[hikari.InteractionMember, _T]:
# <<inherited docstring from tanjun.abc.MenuContext>>.
assert self._interaction.resolved
if self._interaction.resolved.members:
return next(iter(self._interaction.resolved.members.values()))
if self._interaction.resolved.users:
if default is not _internal.DEFAULT:
return default
raise LookupError("User isn't in the current guild")
raise TypeError("Cannot resolve message menu context to a user")
def resolve_to_message(self) -> hikari.Message:
# <<inherited docstring from tanjun.abc.MenuContext>>.
assert self._interaction.resolved
if self._interaction.resolved.messages:
return next(iter(self._interaction.resolved.messages.values()))
raise TypeError("Cannot resolve user menu context to a message")
def resolve_to_user(self) -> typing.Union[hikari.User, hikari.Member]:
# <<inherited docstring from tanjun.abc.MenuContext>>.
assert self._interaction.resolved
return self.resolve_to_member(default=None) or next(iter(self._interaction.resolved.users.values())) | PypiClean |
/aicmder-0.5.8-py3-none-any.whl/dl/baili.py | import math
def getAngle(x1, y1, x2, y2):
inRads = math.atan2(y2 - y1, x2 - x1)
# We need to map to coord system when 0 degree is at 3 O'clock, 270 at 12 O'clock
# print(inRads)
if inRads < 0:
inRads = abs(inRads)
else:
inRads = 2 * math.pi - inRads
return inRads, math.degrees(inRads)
def cal_offset(x1, y1, x2, y2, scale=20):
rad, degree = getAngle(x1, y1, x2, y2)
new_degree = min(degree, 360 - degree)
rad = math.radians(new_degree)
y_offset = math.sin(rad) * scale
x_offset = math.cos(rad) * scale
x_offset, y_offset = abs(round(x_offset)), abs(round(y_offset))
# if x2 > x1 and y2 > y1:
# pass
if x2 > x1 and y2 < y1:
y_offset = -y_offset
if x2 < x1 and y2 < y1:
y_offset = -y_offset
x_offset = -x_offset
if x2 < x1 and y2 > y1:
x_offset = -x_offset
# if x2 > x1 and y2 > y1 + 20:
# y_offset += 40
# print(round(x_offset), round(y_offset), new_degree, x1, y1, x2, y2)
return round(x_offset), round(y_offset)
def shoot(resp_d, base_center_x, base_center_y):
for i, enermy in enumerate(resp_d["data"]):
start_x, end_x, end_y, start_y = enermy["start_x"], enermy["end_x"], enermy["end_y"], enermy["start_y"]
width = end_x - start_x
# height = end_y - start_y
origin_shoot_x = start_x + width / 2
origin_shoot_y = end_y
shoot_x, shoot_y = origin_shoot_x, origin_shoot_y
print(base_center_x, origin_shoot_x, base_center_y, origin_shoot_y)
# right down
if base_center_x > origin_shoot_x and base_center_y > origin_shoot_y + 40:
shoot_y -= 60
if abs(base_center_x - origin_shoot_x) < 90:
print("add left")
shoot_x += 20
if abs(base_center_y - origin_shoot_y) < 100:
print("add Y")
shoot_y += 45
print("right down")
if base_center_x > origin_shoot_x and base_center_y < origin_shoot_y + 40:
shoot_y += 60
shoot_x += 30
if abs(base_center_y - origin_shoot_y) < 30:
shoot_y -= 30
print("Minus y")
print("right down2")
if base_center_x < origin_shoot_x and base_center_y > origin_shoot_y + 40:
shoot_x += 20
# if abs(base_center_x - origin_shoot_x) < 90:
# print("add left")
# shoot_x += 20
if abs(base_center_y - origin_shoot_y) < 100:
print("add Y")
shoot_y += 20
print("left down")
if base_center_x < origin_shoot_x and base_center_y < origin_shoot_y + 40:
shoot_y += 40
if abs(base_center_x - origin_shoot_x) < 40:
shoot_x += 10
print("Add x 10")
if abs(base_center_x - origin_shoot_x) > 140:
shoot_x += 30
print("Add x 30")
if abs(base_center_y - origin_shoot_y) < 90 and abs(base_center_y - origin_shoot_y) > 40:
shoot_y += 25
print("Add y 25")
if abs(base_center_x - origin_shoot_x) > 150 and abs(base_center_y - origin_shoot_y) < 40:
shoot_y += 35
print("Add y 35")
print("left down2")
# if base_center_y > end_y + 20:
# print("1")
# shoot_y = end_y - 50
# else:
# print("2")
# shoot_y = end_y + 30
# if base_center_x < shoot_x:
# if abs(base_center_x - shoot_x) < 155:
# print("3")
# shoot_x -= 10
# else:
# print("4")
# shoot_x -= 20
# else:
# print("5")
# shoot_x += 10
# x_offset, y_offset = cal_offset(base_center_x, base_center_y, shoot_x, shoot_y, scale=80)
resp_d["data"][i]["shoot_x"] = shoot_x
resp_d["data"][i]["shoot_y"] = shoot_y
# x_offset, y_offset = cal_offset(325, 199, shoot_x, shoot_y, scale=80)
# resp_d["data"][i]["shoot_x"] = 580 + x_offset
# resp_d["data"][i]["shoot_y"] = 285 + y_offset | PypiClean |
/RxPy3-1.0.2-py3-none-any.whl/rx3/core/operators/delaywithmapper.py | from typing import Callable
from rx3.core import Observable, typing
from rx3.disposable import CompositeDisposable, SingleAssignmentDisposable, SerialDisposable
def _delay_with_mapper(subscription_delay=None, delay_duration_mapper=None) -> Callable[[Observable], Observable]:
def delay_with_mapper(source: Observable) -> Observable:
"""Time shifts the observable sequence based on a subscription
delay and a delay mapper function for each element.
Examples:
>>> obs = delay_with_selector(source)
Args:
subscription_delay: [Optional] Sequence indicating the
delay for the subscription to the source.
delay_duration_mapper: [Optional] Selector function to
retrieve a sequence indicating the delay for each given
element.
Returns:
Time-shifted observable sequence.
"""
sub_delay, mapper = None, None
if isinstance(subscription_delay, typing.Observable):
mapper = delay_duration_mapper
sub_delay = subscription_delay
else:
mapper = subscription_delay
def subscribe(observer, scheduler=None):
delays = CompositeDisposable()
at_end = [False]
def done():
if (at_end[0] and delays.length == 0):
observer.on_completed()
subscription = SerialDisposable()
def start():
def on_next(x):
try:
delay = mapper(x)
except Exception as error:
observer.on_error(error)
return
d = SingleAssignmentDisposable()
delays.add(d)
def on_next(_):
observer.on_next(x)
delays.remove(d)
done()
def on_completed():
observer.on_next(x)
delays.remove(d)
done()
d.disposable = delay.subscribe_(on_next, observer.on_error, on_completed, scheduler)
def on_completed():
at_end[0] = True
subscription.dispose()
done()
subscription.disposable = source.subscribe_(on_next, observer.on_error, on_completed, scheduler)
if not sub_delay:
start()
else:
subscription.disposable(sub_delay.subscribe_(
lambda _: start(),
observer.on_error,
start))
return CompositeDisposable(subscription, delays)
return Observable(subscribe)
return delay_with_mapper | PypiClean |
/tensorflow_cpu-2.14.0rc1-cp311-cp311-macosx_10_15_x86_64.whl/tensorflow/_api/v2/compat/v2/linalg/__init__.py | import sys as _sys
from . import experimental
from tensorflow.python.ops.array_ops import matrix_diag as diag
from tensorflow.python.ops.array_ops import matrix_diag_part as diag_part
from tensorflow.python.ops.array_ops import matrix_set_diag as set_diag
from tensorflow.python.ops.array_ops import matrix_transpose
from tensorflow.python.ops.array_ops import tensor_diag_part
from tensorflow.python.ops.clip_ops import global_norm
from tensorflow.python.ops.gen_array_ops import diag as tensor_diag
from tensorflow.python.ops.gen_array_ops import matrix_band_part as band_part
from tensorflow.python.ops.gen_linalg_ops import cholesky
from tensorflow.python.ops.gen_linalg_ops import log_matrix_determinant as slogdet
from tensorflow.python.ops.gen_linalg_ops import lu
from tensorflow.python.ops.gen_linalg_ops import matrix_determinant as det
from tensorflow.python.ops.gen_linalg_ops import matrix_inverse as inv
from tensorflow.python.ops.gen_linalg_ops import matrix_logarithm as logm
from tensorflow.python.ops.gen_linalg_ops import matrix_solve as solve
from tensorflow.python.ops.gen_linalg_ops import matrix_square_root as sqrtm
from tensorflow.python.ops.gen_linalg_ops import qr
from tensorflow.python.ops.gen_math_ops import cross
from tensorflow.python.ops.linalg.linalg_impl import adjoint
from tensorflow.python.ops.linalg.linalg_impl import banded_triangular_solve
from tensorflow.python.ops.linalg.linalg_impl import eigh_tridiagonal
from tensorflow.python.ops.linalg.linalg_impl import logdet
from tensorflow.python.ops.linalg.linalg_impl import lu_matrix_inverse
from tensorflow.python.ops.linalg.linalg_impl import lu_reconstruct
from tensorflow.python.ops.linalg.linalg_impl import lu_solve
from tensorflow.python.ops.linalg.linalg_impl import matrix_exponential as expm
from tensorflow.python.ops.linalg.linalg_impl import matrix_rank
from tensorflow.python.ops.linalg.linalg_impl import pinv
from tensorflow.python.ops.linalg.linalg_impl import tridiagonal_matmul
from tensorflow.python.ops.linalg.linalg_impl import tridiagonal_solve
from tensorflow.python.ops.linalg.linear_operator import LinearOperator
from tensorflow.python.ops.linalg.linear_operator_adjoint import LinearOperatorAdjoint
from tensorflow.python.ops.linalg.linear_operator_block_diag import LinearOperatorBlockDiag
from tensorflow.python.ops.linalg.linear_operator_block_lower_triangular import LinearOperatorBlockLowerTriangular
from tensorflow.python.ops.linalg.linear_operator_circulant import LinearOperatorCirculant
from tensorflow.python.ops.linalg.linear_operator_circulant import LinearOperatorCirculant2D
from tensorflow.python.ops.linalg.linear_operator_circulant import LinearOperatorCirculant3D
from tensorflow.python.ops.linalg.linear_operator_composition import LinearOperatorComposition
from tensorflow.python.ops.linalg.linear_operator_diag import LinearOperatorDiag
from tensorflow.python.ops.linalg.linear_operator_full_matrix import LinearOperatorFullMatrix
from tensorflow.python.ops.linalg.linear_operator_householder import LinearOperatorHouseholder
from tensorflow.python.ops.linalg.linear_operator_identity import LinearOperatorIdentity
from tensorflow.python.ops.linalg.linear_operator_identity import LinearOperatorScaledIdentity
from tensorflow.python.ops.linalg.linear_operator_inversion import LinearOperatorInversion
from tensorflow.python.ops.linalg.linear_operator_kronecker import LinearOperatorKronecker
from tensorflow.python.ops.linalg.linear_operator_low_rank_update import LinearOperatorLowRankUpdate
from tensorflow.python.ops.linalg.linear_operator_lower_triangular import LinearOperatorLowerTriangular
from tensorflow.python.ops.linalg.linear_operator_permutation import LinearOperatorPermutation
from tensorflow.python.ops.linalg.linear_operator_toeplitz import LinearOperatorToeplitz
from tensorflow.python.ops.linalg.linear_operator_tridiag import LinearOperatorTridiag
from tensorflow.python.ops.linalg.linear_operator_zeros import LinearOperatorZeros
from tensorflow.python.ops.linalg_ops import cholesky_solve
from tensorflow.python.ops.linalg_ops import eig
from tensorflow.python.ops.linalg_ops import eigvals
from tensorflow.python.ops.linalg_ops import eye
from tensorflow.python.ops.linalg_ops import matrix_solve_ls as lstsq
from tensorflow.python.ops.linalg_ops import matrix_triangular_solve as triangular_solve
from tensorflow.python.ops.linalg_ops import norm_v2 as norm
from tensorflow.python.ops.linalg_ops import self_adjoint_eig as eigh
from tensorflow.python.ops.linalg_ops import self_adjoint_eigvals as eigvalsh
from tensorflow.python.ops.linalg_ops import svd
from tensorflow.python.ops.math_ops import matmul
from tensorflow.python.ops.math_ops import matvec
from tensorflow.python.ops.math_ops import tensordot
from tensorflow.python.ops.math_ops import trace
from tensorflow.python.ops.nn_impl import l2_normalize
from tensorflow.python.ops.nn_impl import normalize
from tensorflow.python.ops.special_math_ops import einsum | PypiClean |
/flake8-plus-0.5.1.tar.gz/flake8-plus-0.5.1/README.md | # Flake8-plus
[](https://dl.circleci.com/status-badge/redirect/gh/sorenlind/flake8-plus/tree/main)
[](https://codecov.io/gh/sorenlind/flake8-plus)
[](https://github.com/sorenlind/flake8-plus/blob/main/LICENSE)
[](https://pypi.org/project/flake8-plus/)
[](https://github.com/psf/black)
Flake8-plus is a plugin for [Flake8](https://github.com/PyCQA/flake8) that detects
incorrect amounts of vertical whitespace before the first toplevel `import` statement,
before `return` statements and before `except`. The plugin can be configured to expect
any number of blank lines. By default, the plugin expects no blank lines before both the
`import` and `return` statements, and the `except` keyword.
## Installation
Flake8-plus can be installed from PyPI using `pip`:
```shell
$ pip install flake8-plus
```
You can verify that it has been installed as follows (the version numbers you see may
vary):
```shell
$ flake8 --version
5.0.4 (flake8-plus: 0.1.0, mccabe: 0.7.0, pycodestyle: 2.9.1, pyflakes: 2.5.0)
```
## Configuration
You can set the required number of blank lines before the first `import` as well as the
number of blank lines required before a `return` and before `except`. This can be done
from the command line:
```shell
$ flake8 --blanks-before-imports 1 --blanks-before-return 1 --blanks-before-except 1
```
Or from one of the `setup.cfg`, `tox.ini`, or `.flake8` files:
```ini
[flake8]
blanks-before-imports=1
blanks-before-return=1
blanks-before-except=1
```
## Why no blank lines?
### Before `import`
Neither [Black](https://github.com/psf/black), [Flake8](https://github.com/PyCQA/flake8)
nor [Pylint](https://github.com/PyCQA/pylint) enforces a specific number of blank lines
preceding the first `import` and consequently there seems to be no consensus or
standard. The table below shows the frequency of the number of blank lines before the
first toplevel `import` statement in the code bases for Black, Flake8 and Pylint (as of
October 2022).
| Package | Total files | 0 blanks | 1 blank | 2 blanks | Folder |
| ------- | ----------: | -------: | ------: | -------: | ------------- |
| Black | 33 | 21 | 12 | 0 | `src` |
| Flake8 | 32 | 32 | 0 | 0 | `src/flake8/` |
| Pylint | 177 | 3 | 170 | 4 | `pylint` |
Clearly, there is no real consensus. Black seems undecided, Flake8 consistently uses 0
blanks, and Pylint seems to prefer 1 blank line. However, it's worth noting that the
Pylint code does not consistently include module docstrings (thereby breaking
`pylint(missing-module-docstring)`). For that reason, and also because this is a Flake8
plugin, the plugin follows the style of Flake8 as the default.
### Before `return`
Neither Black, Flake8 nor Pylint enforces a specific number of blank lines preceding
`return`. However, they all use zero blank lines more frequently than they use any
other number of blanks. The table below shows the frequency of the number of blank
lines before a `return` statement in the code bases for Black, Flake8 and Pylint (as of
October 2022).
| Package | Total `return`s | 0 blanks | 1 blank | 2 blanks | Folder |
| ------- | --------------: | -------: | ------: | -------: | ------------- |
| Black | 618 | 544 | 74 | 0 | `src` |
| Flake8 | 174 | 155 | 19 | 0 | `src/flake8/` |
| Pylint | 1941 | 1852 | 89 | 0 | `pylint` |
Since zero blank lines is the style used most frequently, Flake8-plus uses that as that
as the default.
### Before `except`
Neither Black, Flake8 nor Pylint enforces a specific number of blank lines preceding
`except`. However, they all use zero blank lines more frequently than they use any other
number of blanks. The table below shows the frequency of the number of blank lines
before an `except` statement in the code bases for Black, Flake8 and Pylint (as of
October 2022).
| Package | Total `except`s | 0 blanks | 1 blank | 2 blanks | Folder |
| ------- | --------------: | -------: | ------: | -------: | ------------- |
| Black | 71 | 64 | 7 | 0 | `src` |
| Flake8 | 26 | 26 | 0 | 0 | `src/flake8/` |
| Pylint | 285 | 283 | 2 | 0 | `pylint` |
Since zero blank lines is the style used most frequently, Flake8-plus uses that as that
as the default.
## Reported problems
| Code | Description |
| ------ | ----------------------------------------------------------- |
| PLU001 | "expected {} blank lines before first import, found {}" |
| PLU002 | "expected {} blank lines before return statement, found {}" |
| PLU003 | "expected {} blank lines before except, found {}" |
| PypiClean |
/google-cloud-bare-metal-solution-1.4.2.tar.gz/google-cloud-bare-metal-solution-1.4.2/google/cloud/bare_metal_solution_v2/services/bare_metal_solution/transports/grpc_asyncio.py | from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import gapic_v1, grpc_helpers_async, operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.cloud.location import locations_pb2 # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.bare_metal_solution_v2.types import nfs_share as gcb_nfs_share
from google.cloud.bare_metal_solution_v2.types import instance
from google.cloud.bare_metal_solution_v2.types import instance as gcb_instance
from google.cloud.bare_metal_solution_v2.types import lun
from google.cloud.bare_metal_solution_v2.types import network
from google.cloud.bare_metal_solution_v2.types import network as gcb_network
from google.cloud.bare_metal_solution_v2.types import nfs_share
from google.cloud.bare_metal_solution_v2.types import volume
from google.cloud.bare_metal_solution_v2.types import volume as gcb_volume
from .base import DEFAULT_CLIENT_INFO, BareMetalSolutionTransport
from .grpc import BareMetalSolutionGrpcTransport
class BareMetalSolutionGrpcAsyncIOTransport(BareMetalSolutionTransport):
"""gRPC AsyncIO backend transport for BareMetalSolution.
Performs management operations on Bare Metal Solution servers.
The ``baremetalsolution.googleapis.com`` service provides management
capabilities for Bare Metal Solution servers. To access the API
methods, you must assign Bare Metal Solution IAM roles containing
the desired permissions to your staff in your Google Cloud project.
You must also enable the Bare Metal Solution API. Once enabled, the
methods act upon specific servers in your Bare Metal Solution
environment.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "baremetalsolution.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "baremetalsolution.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: Optional[aio.Channel] = None,
api_mtls_endpoint: Optional[str] = None,
client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_instances(
self,
) -> Callable[
[instance.ListInstancesRequest], Awaitable[instance.ListInstancesResponse]
]:
r"""Return a callable for the list instances method over gRPC.
List servers in a given project and location.
Returns:
Callable[[~.ListInstancesRequest],
Awaitable[~.ListInstancesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_instances" not in self._stubs:
self._stubs["list_instances"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/ListInstances",
request_serializer=instance.ListInstancesRequest.serialize,
response_deserializer=instance.ListInstancesResponse.deserialize,
)
return self._stubs["list_instances"]
@property
def get_instance(
self,
) -> Callable[[instance.GetInstanceRequest], Awaitable[instance.Instance]]:
r"""Return a callable for the get instance method over gRPC.
Get details about a single server.
Returns:
Callable[[~.GetInstanceRequest],
Awaitable[~.Instance]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_instance" not in self._stubs:
self._stubs["get_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/GetInstance",
request_serializer=instance.GetInstanceRequest.serialize,
response_deserializer=instance.Instance.deserialize,
)
return self._stubs["get_instance"]
@property
def update_instance(
self,
) -> Callable[
[gcb_instance.UpdateInstanceRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the update instance method over gRPC.
Update details of a single server.
Returns:
Callable[[~.UpdateInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_instance" not in self._stubs:
self._stubs["update_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/UpdateInstance",
request_serializer=gcb_instance.UpdateInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_instance"]
@property
def reset_instance(
self,
) -> Callable[[instance.ResetInstanceRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the reset instance method over gRPC.
Perform an ungraceful, hard reset on a server.
Equivalent to shutting the power off and then turning it
back on.
Returns:
Callable[[~.ResetInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "reset_instance" not in self._stubs:
self._stubs["reset_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/ResetInstance",
request_serializer=instance.ResetInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["reset_instance"]
@property
def start_instance(
self,
) -> Callable[[instance.StartInstanceRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the start instance method over gRPC.
Starts a server that was shutdown.
Returns:
Callable[[~.StartInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "start_instance" not in self._stubs:
self._stubs["start_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/StartInstance",
request_serializer=instance.StartInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["start_instance"]
@property
def stop_instance(
self,
) -> Callable[[instance.StopInstanceRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the stop instance method over gRPC.
Stop a running server.
Returns:
Callable[[~.StopInstanceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "stop_instance" not in self._stubs:
self._stubs["stop_instance"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/StopInstance",
request_serializer=instance.StopInstanceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["stop_instance"]
@property
def detach_lun(
self,
) -> Callable[[gcb_instance.DetachLunRequest], Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the detach lun method over gRPC.
Detach LUN from Instance.
Returns:
Callable[[~.DetachLunRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "detach_lun" not in self._stubs:
self._stubs["detach_lun"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/DetachLun",
request_serializer=gcb_instance.DetachLunRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["detach_lun"]
@property
def list_volumes(
self,
) -> Callable[[volume.ListVolumesRequest], Awaitable[volume.ListVolumesResponse]]:
r"""Return a callable for the list volumes method over gRPC.
List storage volumes in a given project and location.
Returns:
Callable[[~.ListVolumesRequest],
Awaitable[~.ListVolumesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_volumes" not in self._stubs:
self._stubs["list_volumes"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/ListVolumes",
request_serializer=volume.ListVolumesRequest.serialize,
response_deserializer=volume.ListVolumesResponse.deserialize,
)
return self._stubs["list_volumes"]
@property
def get_volume(
self,
) -> Callable[[volume.GetVolumeRequest], Awaitable[volume.Volume]]:
r"""Return a callable for the get volume method over gRPC.
Get details of a single storage volume.
Returns:
Callable[[~.GetVolumeRequest],
Awaitable[~.Volume]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_volume" not in self._stubs:
self._stubs["get_volume"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/GetVolume",
request_serializer=volume.GetVolumeRequest.serialize,
response_deserializer=volume.Volume.deserialize,
)
return self._stubs["get_volume"]
@property
def update_volume(
self,
) -> Callable[
[gcb_volume.UpdateVolumeRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the update volume method over gRPC.
Update details of a single storage volume.
Returns:
Callable[[~.UpdateVolumeRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_volume" not in self._stubs:
self._stubs["update_volume"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/UpdateVolume",
request_serializer=gcb_volume.UpdateVolumeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_volume"]
@property
def resize_volume(
self,
) -> Callable[
[gcb_volume.ResizeVolumeRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the resize volume method over gRPC.
Emergency Volume resize.
Returns:
Callable[[~.ResizeVolumeRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "resize_volume" not in self._stubs:
self._stubs["resize_volume"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/ResizeVolume",
request_serializer=gcb_volume.ResizeVolumeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["resize_volume"]
@property
def list_networks(
self,
) -> Callable[
[network.ListNetworksRequest], Awaitable[network.ListNetworksResponse]
]:
r"""Return a callable for the list networks method over gRPC.
List network in a given project and location.
Returns:
Callable[[~.ListNetworksRequest],
Awaitable[~.ListNetworksResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_networks" not in self._stubs:
self._stubs["list_networks"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/ListNetworks",
request_serializer=network.ListNetworksRequest.serialize,
response_deserializer=network.ListNetworksResponse.deserialize,
)
return self._stubs["list_networks"]
@property
def list_network_usage(
self,
) -> Callable[
[network.ListNetworkUsageRequest], Awaitable[network.ListNetworkUsageResponse]
]:
r"""Return a callable for the list network usage method over gRPC.
List all Networks (and used IPs for each Network) in
the vendor account associated with the specified
project.
Returns:
Callable[[~.ListNetworkUsageRequest],
Awaitable[~.ListNetworkUsageResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_network_usage" not in self._stubs:
self._stubs["list_network_usage"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/ListNetworkUsage",
request_serializer=network.ListNetworkUsageRequest.serialize,
response_deserializer=network.ListNetworkUsageResponse.deserialize,
)
return self._stubs["list_network_usage"]
@property
def get_network(
self,
) -> Callable[[network.GetNetworkRequest], Awaitable[network.Network]]:
r"""Return a callable for the get network method over gRPC.
Get details of a single network.
Returns:
Callable[[~.GetNetworkRequest],
Awaitable[~.Network]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_network" not in self._stubs:
self._stubs["get_network"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/GetNetwork",
request_serializer=network.GetNetworkRequest.serialize,
response_deserializer=network.Network.deserialize,
)
return self._stubs["get_network"]
@property
def update_network(
self,
) -> Callable[
[gcb_network.UpdateNetworkRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the update network method over gRPC.
Update details of a single network.
Returns:
Callable[[~.UpdateNetworkRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_network" not in self._stubs:
self._stubs["update_network"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/UpdateNetwork",
request_serializer=gcb_network.UpdateNetworkRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_network"]
@property
def get_lun(self) -> Callable[[lun.GetLunRequest], Awaitable[lun.Lun]]:
r"""Return a callable for the get lun method over gRPC.
Get details of a single storage logical unit
number(LUN).
Returns:
Callable[[~.GetLunRequest],
Awaitable[~.Lun]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_lun" not in self._stubs:
self._stubs["get_lun"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/GetLun",
request_serializer=lun.GetLunRequest.serialize,
response_deserializer=lun.Lun.deserialize,
)
return self._stubs["get_lun"]
@property
def list_luns(
self,
) -> Callable[[lun.ListLunsRequest], Awaitable[lun.ListLunsResponse]]:
r"""Return a callable for the list luns method over gRPC.
List storage volume luns for given storage volume.
Returns:
Callable[[~.ListLunsRequest],
Awaitable[~.ListLunsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_luns" not in self._stubs:
self._stubs["list_luns"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/ListLuns",
request_serializer=lun.ListLunsRequest.serialize,
response_deserializer=lun.ListLunsResponse.deserialize,
)
return self._stubs["list_luns"]
@property
def get_nfs_share(
self,
) -> Callable[[nfs_share.GetNfsShareRequest], Awaitable[nfs_share.NfsShare]]:
r"""Return a callable for the get nfs share method over gRPC.
Get details of a single NFS share.
Returns:
Callable[[~.GetNfsShareRequest],
Awaitable[~.NfsShare]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_nfs_share" not in self._stubs:
self._stubs["get_nfs_share"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/GetNfsShare",
request_serializer=nfs_share.GetNfsShareRequest.serialize,
response_deserializer=nfs_share.NfsShare.deserialize,
)
return self._stubs["get_nfs_share"]
@property
def list_nfs_shares(
self,
) -> Callable[
[nfs_share.ListNfsSharesRequest], Awaitable[nfs_share.ListNfsSharesResponse]
]:
r"""Return a callable for the list nfs shares method over gRPC.
List NFS shares.
Returns:
Callable[[~.ListNfsSharesRequest],
Awaitable[~.ListNfsSharesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_nfs_shares" not in self._stubs:
self._stubs["list_nfs_shares"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/ListNfsShares",
request_serializer=nfs_share.ListNfsSharesRequest.serialize,
response_deserializer=nfs_share.ListNfsSharesResponse.deserialize,
)
return self._stubs["list_nfs_shares"]
@property
def update_nfs_share(
self,
) -> Callable[
[gcb_nfs_share.UpdateNfsShareRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the update nfs share method over gRPC.
Update details of a single NFS share.
Returns:
Callable[[~.UpdateNfsShareRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_nfs_share" not in self._stubs:
self._stubs["update_nfs_share"] = self.grpc_channel.unary_unary(
"/google.cloud.baremetalsolution.v2.BareMetalSolution/UpdateNfsShare",
request_serializer=gcb_nfs_share.UpdateNfsShareRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_nfs_share"]
def close(self):
return self.grpc_channel.close()
@property
def list_locations(
self,
) -> Callable[
[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse
]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_locations" not in self._stubs:
self._stubs["list_locations"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/ListLocations",
request_serializer=locations_pb2.ListLocationsRequest.SerializeToString,
response_deserializer=locations_pb2.ListLocationsResponse.FromString,
)
return self._stubs["list_locations"]
@property
def get_location(
self,
) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_location" not in self._stubs:
self._stubs["get_location"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/GetLocation",
request_serializer=locations_pb2.GetLocationRequest.SerializeToString,
response_deserializer=locations_pb2.Location.FromString,
)
return self._stubs["get_location"]
__all__ = ("BareMetalSolutionGrpcAsyncIOTransport",) | PypiClean |
/MDsrv-0.3.5.tar.gz/MDsrv-0.3.5/CHANGELOG.md | # Change Log
All notable changes to this project will be documented in this file, following the suggestions of [Keep a CHANGELOG](http://keepachangelog.com/). This project adheres to [Semantic Versioning](http://semver.org/).
## [v0.3.5] - 2017-08-13
### Added
- WINDOWS support (tested for conda)
### Changed
- trajectory support switched from simpletraj to MDTraj with MDAnalysis as extra requirement
- NGL update to version 0.10.5-18 including bugfixes related to superpositioning, updated remove trajectory formats, IE11 workaround
## [v0.3.4] - 2017-07-31
### Added
- conda support via ngl channel
- delta time and time offset as CMD variables
## [v0.3.3] - 2017-07-27
### Changed
- NGL update to version 0.10.5-15 incl. bugfixes related to interpolation, change superpositioning on initial frame, add bounce direction for trajectoy player, animations
- script examples according to new NGL version
### Added
- conda support
- netcdf, gro, lammpstrj, hdf5, dtr, arc tng trajectory support from mdtraj
## [v0.3.2] - 2017-07-03
### Changed
- ngl update to version 0.10.5-2 incl. prmtop parser, traj time (delta time) settings, debug
## [v0.3.1] - 2017-07-02
### Changed
- major ngl update to version 0.10.5-1 incl. psf, netcdf, xtc (speedup), movable gui, ...
## [v0.3] - 2016-01-11
### Added
- --script arguments
- versioneer
- DOC: installation & deployment, usage, scripting
### Changed
- major ngl update to version 0.10.0-dev5
## [v0.2] - 2016-02-12
### Added
- --host and --port arguments
- DOC: described arguments of the comand line tool
[](http://dx.doi.org/10.5281/zenodo.45961)
## [v0.1.1] - 2016-01-02
### Added
- Initial release
[](http://dx.doi.org/10.5281/zenodo.44286)
| PypiClean |
/robot_grpc-0.0.4.tar.gz/robot_grpc-0.0.4/robot_grpc/node_modules/over/README.md | # over
JavaScript function overloading framework.
## Installation
```bash
$ npm install over
```
## Quick Examples
```javascript
var over = require('over');
var myfn = over([
[over.string, function (str) { console.log('got a string' + str); }],
[over.string, over.numberOptionalWithDefault(5), over.callbackOptional, function (str, number, callback) {
console.log('got a string and a number and a callback');
callback(str, number);
}],
function() {
// default function if nothing else matches
}
]);
```
## Builtin Test functions
* func
* funcOptional
* funcOptionalWithDefault
* callbackOptional // Will return an empty function of parameter is not given
* string
* stringOptional
* stringOptionalWithDefault
* number
* numberOptional
* numberOptionalWithDefault
* array
* arrayOptional
* arrayOptionalWithDefault
* object
* objectOptional
* objectOptionalWithDefault
The built in optional functions with a suffix of "WithDefault" take a default value as well which will be used if
it is not passed in.
```javascript
var myfn = over([
[over.stringOptionalWithDefault('default value'), function (str) { console.log('got a string' + str); }],
]);
```
## Write your own test functions
Simple optional test
```javascript
function greaterThan5Optional(arg) {
return arg > 5;
}
greaterThan5Optional.optional = true; // mark it as an optional parameter
var myfn = over([
[greaterThan5Optional, function (v) { console.log('got a value' + v); }]
]);
```
Optional test with default
```javascript
function greaterThan5OptionalWithDefault(def) {
return function greaterThan5OptionalWithDefault2(arg) {
if (arg === undefined) {
return { defaultValue: def };
}
return arg > 5;
}
}
greaterThan5OptionalWithDefault.optional = true; // mark it as an optional parameter
var myfn = over([
[greaterThan5OptionalWithDefault, function (v) { console.log('got a value' + v); }]
]);
```
## License
(The MIT License)
Copyright (c) 2012 Near Infinity Corporation
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| PypiClean |
/sparkfun-circuitpython-serlcd-1.0.4.tar.gz/sparkfun-circuitpython-serlcd-1.0.4/CODE_OF_CONDUCT.md | <!--
SPDX-FileCopyrightText: 2014 Coraline Ada Ehmke
SPDX-FileCopyrightText: 2019-2021 Kattni Rembor for Adafruit Industries
SPDX-License-Identifier: CC-BY-4.0
-->
# Adafruit Community Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and leaders pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level or type of
experience, education, socio-economic status, nationality, personal appearance,
race, religion, or sexual identity and orientation.
## Our Standards
We are committed to providing a friendly, safe and welcoming environment for
all.
Examples of behavior that contributes to creating a positive environment
include:
* Be kind and courteous to others
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Collaborating with other community members
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and sexual attention or advances
* The use of inappropriate images, including in a community member's avatar
* The use of inappropriate language, including in a community member's nickname
* Any spamming, flaming, baiting or other attention-stealing behavior
* Excessive or unwelcome helping; answering outside the scope of the question
asked
* Discussion or promotion of activities or projects that intend or pose a risk of
significant harm
* Trolling, insulting/derogatory comments, and personal or political attacks
* Promoting or spreading disinformation, lies, or conspiracy theories against
a person, group, organisation, project, or community
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate
The goal of the standards and moderation guidelines outlined here is to build
and maintain a respectful community. We ask that you don’t just aim to be
"technically unimpeachable", but rather try to be your best self.
We value many things beyond technical expertise, including collaboration and
supporting others within our community. Providing a positive experience for
other community members can have a much more significant impact than simply
providing the correct answer.
## Our Responsibilities
Project leaders are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project leaders have the right and responsibility to remove, edit, or
reject messages, comments, commits, code, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any community member for other behaviors that they deem
inappropriate, threatening, offensive, or harmful.
## Moderation
Instances of behaviors that violate the Adafruit Community Code of Conduct
may be reported by any member of the community. Community members are
encouraged to report these situations, including situations they witness
involving other community members.
You may report in the following ways:
In any situation, you may email <support@adafruit.com>.
On the Adafruit Discord, you may send an open message from any channel
to all Community Moderators by tagging @community moderators. You may
also send an open message from any channel, or a direct message to
any Community Moderator.
Email and direct message reports will be kept confidential.
In situations on Discord where the issue is particularly offensive, possibly
illegal, requires immediate action, or violates the Discord terms of service,
you should also report the message directly to [Discord](https://discord.com/safety).
These are the steps for upholding our community’s standards of conduct.
1. Any member of the community may report any situation that violates the
CircuitPython Community Code of Conduct. All reports will be reviewed and
investigated.
2. If the behavior is a severe violation, the community member who
committed the violation may be banned immediately, without warning.
3. Otherwise, moderators will first respond to such behavior with a warning.
4. Moderators follow a soft "three strikes" policy - the community member may
be given another chance, if they are receptive to the warning and change their
behavior.
5. If the community member is unreceptive or unreasonable when warned by a
moderator, or the warning goes unheeded, they may be banned for a first or
second offense. Repeated offenses will result in the community member being
banned.
6. Disciplinary actions (warnings, bans, etc) for Code of Conduct violations apply
to the platform where the violation occurred. However, depending on the severity
of the violation, the disciplinary action may be applied across Adafruit's other
community platforms. For example, a severe violation on the Adafruit Discord
server may result in a ban on not only the Adafruit Discord server, but also on
the Adafruit GitHub organisation, Adafruit Forums, Adafruit Twitter, etc.
## Scope
This Code of Conduct and the enforcement policies listed above apply to all
Adafruit Community venues. This includes but is not limited to any community
spaces (both public and private), the entire Adafruit Discord server, and
Adafruit GitHub repositories. Examples of Adafruit Community spaces include
but are not limited to meet-ups, audio chats on the Adafruit Discord, or
interaction at a conference.
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. As a community
member, you are representing our community, and are expected to behave
accordingly.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/),
version 1.4, available on [contributor-covenant.org](https://www.contributor-covenant.org/version/1/4/code-of-conduct.html),
and the [Rust Code of Conduct](https://www.rust-lang.org/en-US/conduct.html).
For other projects adopting the Adafruit Community Code of
Conduct, please contact the maintainers of those projects for enforcement.
If you wish to use this code of conduct for your own project, consider
explicitly mentioning your moderation policy or making a copy with your
own moderation policy so as to avoid confusion.
| PypiClean |
/convo_nlu2-2.0.0.tar.gz/convo_nlu2-2.0.0/convo/nlu/classifiers/keyword_intent_classifier.py | import os
import logging
import re
from typing import Any, Dict, Optional, Text
from convo.shared.constants import DOCS_URL_COMPONENTS
from convo.nlu import utils
from convo.nlu.classifiers.classifier import IntentClassifier
from convo.shared.nlu.constants import INTENT, TEXT
import convo.shared.utils.io
from convo.nlu.config import ConvoNLUModelConfig
from convo.shared.nlu.training_data.training_data import TrainingData
from convo.shared.nlu.training_data.message import Message
from convo.nlu.model import Metadata
logger = logging.getLogger(__name__)
class KeywordIntentClassifier(IntentClassifier):
"""Intent classifier using simple keyword matching.
The classifier takes a list of keywords and associated intents as an input.
An input sentence is checked for the keywords and the intent is returned.
"""
defaults = {"case_sensitive": True}
def __init__(
self,
component_config: Optional[Dict[Text, Any]] = None,
intent_keyword_map: Optional[Dict] = None,
) -> None:
super(KeywordIntentClassifier, self).__init__(component_config)
self.case_sensitive = self.component_config.get("case_sensitive")
self.intent_keyword_map = intent_keyword_map or {}
def train(
self,
training_data: TrainingData,
config: Optional[ConvoNLUModelConfig] = None,
**kwargs: Any,
) -> None:
duplicate_examples = set()
for ex in training_data.training_examples:
if (
ex.get(TEXT) in self.intent_keyword_map.keys()
and ex.get(INTENT) != self.intent_keyword_map[ex.get(TEXT)]
):
duplicate_examples.add(ex.get(TEXT))
convo.shared.utils.io.raise_warning(
f"Keyword '{ex.get(TEXT)}' is a keyword to trigger intent "
f"'{self.intent_keyword_map[ex.get(TEXT)]}' and also "
f"intent '{ex.get(INTENT)}', it will be removed "
f"from the list of keywords for both of them. "
f"Remove (one of) the duplicates from the training data.",
docs=DOCS_URL_COMPONENTS + "#keyword-intent-classifier",
)
else:
self.intent_keyword_map[ex.get(TEXT)] = ex.get(INTENT)
for keyword in duplicate_examples:
self.intent_keyword_map.pop(keyword)
logger.debug(
f"Removed '{keyword}' from the list of keywords because it was "
"a keyword for more than one intent."
)
self._validate_keyword_map()
def _validate_keyword_map(self) -> None:
re_flag = 0 if self.case_sensitive else re.IGNORECASE
ambiguous_mappings = []
for keyword1, intent1 in self.intent_keyword_map.items():
for keyword2, intent2 in self.intent_keyword_map.items():
if (
re.search(r"\b" + keyword1 + r"\b", keyword2, flags=re_flag)
and intent1 != intent2
):
ambiguous_mappings.append((intent1, keyword1))
convo.shared.utils.io.raise_warning(
f"Keyword '{keyword1}' is a keyword of intent '{intent1}', "
f"but also a substring of '{keyword2}', which is a "
f"keyword of intent '{intent2}."
f" '{keyword1}' will be removed from the list of keywords.\n"
f"Remove (one of) the conflicting keywords from the"
f" training data.",
docs=DOCS_URL_COMPONENTS + "#keyword-intent-classifier",
)
for intent, keyword in ambiguous_mappings:
self.intent_keyword_map.pop(keyword)
logger.debug(
f"Removed keyword '{keyword}' from intent '{intent}' because it matched a "
"keyword of another intent."
)
def process(self, message: Message, **kwargs: Any) -> None:
intent_name = self._map_keyword_to_intent(message.get(TEXT))
confidence = 0.0 if intent_name is None else 1.0
intent = {"name": intent_name, "confidence": confidence}
if message.get(INTENT) is None or intent is not None:
message.set(INTENT, intent, add_to_output=True)
def _map_keyword_to_intent(self, text: Text) -> Optional[Text]:
re_flag = 0 if self.case_sensitive else re.IGNORECASE
for keyword, intent in self.intent_keyword_map.items():
if re.search(r"\b" + keyword + r"\b", text, flags=re_flag):
logger.debug(
f"KeywordClassifier matched keyword '{keyword}' to"
f" intent '{intent}'."
)
return intent
logger.debug("KeywordClassifier did not find any keywords in the message.")
return None
def persist(self, file_name: Text, model_dir: Text) -> Dict[Text, Any]:
"""Persist this model into the passed directory.
Return the metadata necessary to load the model again.
"""
file_name = file_name + ".json"
keyword_file = os.path.join(model_dir, file_name)
utils.write_json_to_file(keyword_file, self.intent_keyword_map)
return {"file": file_name}
@classmethod
def load(
cls,
meta: Dict[Text, Any],
model_dir: Optional[Text] = None,
model_metadata: Metadata = None,
cached_component: Optional["KeywordIntentClassifier"] = None,
**kwargs: Any,
) -> "KeywordIntentClassifier":
if model_dir and meta.get("file"):
file_name = meta.get("file")
keyword_file = os.path.join(model_dir, file_name)
if os.path.exists(keyword_file):
intent_keyword_map = convo.shared.utils.io.read_json_file(keyword_file)
else:
convo.shared.utils.io.raise_warning(
f"Failed to load key word file for `IntentKeywordClassifier`, "
f"maybe {keyword_file} does not exist?"
)
intent_keyword_map = None
return cls(meta, intent_keyword_map)
else:
raise Exception(
f"Failed to load keyword intent classifier model. "
f"Path {os.path.abspath(meta.get('file'))} doesn't exist."
) | PypiClean |
/ensmallen_graph-0.6.0-cp37-cp37m-manylinux2010_x86_64.whl/ensmallen_graph/datasets/networkrepository/miscjungcodedep.py | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def MiscJungCodeDep(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the misc-jung-code-dep graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of misc-jung-code-dep graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 11:17:34.418845
The undirected graph misc-jung-code-dep has 6120 nodes and 50290 weighted
edges, of which none are self-loops. The graph is sparse as it has a density
of 0.00269 and is connected, as it has a single component. The graph median
node degree is 7, the mean node degree is 16.43, and the node degree mode
is 4. The top 5 most central nodes are 5 (degree 5655), 13 (degree 5557),
21 (degree 5380), 15 (degree 643) and 14 (degree 625).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
@inproceedings{jung-code-dep,
author = {\v{S}ubelj,
Lovro and Bajec, Marko},
title = {Software Systems through Complex Networks Science: Review, Analysis and Applications},
booktitle = {Proc. Int. Workshop on Software Mining},
year = {2012},
isbn = {978-1-4503-1560-9},
pages = {9--16},
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import MiscJungCodeDep
# Then load the graph
graph = MiscJungCodeDep()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="MiscJungCodeDep",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)() | PypiClean |
/django-servee-uploadify-0.1.tar.gz/django-servee-uploadify-0.1/servee_uploadify/static/uploadify/swfobject.js | var swfobject=function(){var D="undefined",r="object",S="Shockwave Flash",W="ShockwaveFlash.ShockwaveFlash",q="application/x-shockwave-flash",R="SWFObjectExprInst",x="onreadystatechange",O=window,j=document,t=navigator,T=false,U=[h],o=[],N=[],I=[],l,Q,E,B,J=false,a=false,n,G,m=true,M=function(){var aa=typeof j.getElementById!=D&&typeof j.getElementsByTagName!=D&&typeof j.createElement!=D,ah=t.userAgent.toLowerCase(),Y=t.platform.toLowerCase(),ae=Y?/win/.test(Y):/win/.test(ah),ac=Y?/mac/.test(Y):/mac/.test(ah),af=/webkit/.test(ah)?parseFloat(ah.replace(/^.*webkit\/(\d+(\.\d+)?).*$/,"$1")):false,X=!+"\v1",ag=[0,0,0],ab=null;if(typeof t.plugins!=D&&typeof t.plugins[S]==r){ab=t.plugins[S].description;if(ab&&!(typeof t.mimeTypes!=D&&t.mimeTypes[q]&&!t.mimeTypes[q].enabledPlugin)){T=true;X=false;ab=ab.replace(/^.*\s+(\S+\s+\S+$)/,"$1");ag[0]=parseInt(ab.replace(/^(.*)\..*$/,"$1"),10);ag[1]=parseInt(ab.replace(/^.*\.(.*)\s.*$/,"$1"),10);ag[2]=/[a-zA-Z]/.test(ab)?parseInt(ab.replace(/^.*[a-zA-Z]+(.*)$/,"$1"),10):0}}else{if(typeof O.ActiveXObject!=D){try{var ad=new ActiveXObject(W);if(ad){ab=ad.GetVariable("$version");if(ab){X=true;ab=ab.split(" ")[1].split(",");ag=[parseInt(ab[0],10),parseInt(ab[1],10),parseInt(ab[2],10)]}}}catch(Z){}}}return{w3:aa,pv:ag,wk:af,ie:X,win:ae,mac:ac}}(),k=function(){if(!M.w3){return}if((typeof j.readyState!=D&&j.readyState=="complete")||(typeof j.readyState==D&&(j.getElementsByTagName("body")[0]||j.body))){f()}if(!J){if(typeof j.addEventListener!=D){j.addEventListener("DOMContentLoaded",f,false)}if(M.ie&&M.win){j.attachEvent(x,function(){if(j.readyState=="complete"){j.detachEvent(x,arguments.callee);f()}});if(O==top){(function(){if(J){return}try{j.documentElement.doScroll("left")}catch(X){setTimeout(arguments.callee,0);return}f()})()}}if(M.wk){(function(){if(J){return}if(!/loaded|complete/.test(j.readyState)){setTimeout(arguments.callee,0);return}f()})()}s(f)}}();function f(){if(J){return}try{var Z=j.getElementsByTagName("body")[0].appendChild(C("span"));Z.parentNode.removeChild(Z)}catch(aa){return}J=true;var X=U.length;for(var Y=0;Y<X;Y++){U[Y]()}}function K(X){if(J){X()}else{U[U.length]=X}}function s(Y){if(typeof O.addEventListener!=D){O.addEventListener("load",Y,false)}else{if(typeof j.addEventListener!=D){j.addEventListener("load",Y,false)}else{if(typeof O.attachEvent!=D){i(O,"onload",Y)}else{if(typeof O.onload=="function"){var X=O.onload;O.onload=function(){X();Y()}}else{O.onload=Y}}}}}function h(){if(T){V()}else{H()}}function V(){var X=j.getElementsByTagName("body")[0];var aa=C(r);aa.setAttribute("type",q);var Z=X.appendChild(aa);if(Z){var Y=0;(function(){if(typeof Z.GetVariable!=D){var ab=Z.GetVariable("$version");if(ab){ab=ab.split(" ")[1].split(",");M.pv=[parseInt(ab[0],10),parseInt(ab[1],10),parseInt(ab[2],10)]}}else{if(Y<10){Y++;setTimeout(arguments.callee,10);return}}X.removeChild(aa);Z=null;H()})()}else{H()}}function H(){var ag=o.length;if(ag>0){for(var af=0;af<ag;af++){var Y=o[af].id;var ab=o[af].callbackFn;var aa={success:false,id:Y};if(M.pv[0]>0){var ae=c(Y);if(ae){if(F(o[af].swfVersion)&&!(M.wk&&M.wk<312)){w(Y,true);if(ab){aa.success=true;aa.ref=z(Y);ab(aa)}}else{if(o[af].expressInstall&&A()){var ai={};ai.data=o[af].expressInstall;ai.width=ae.getAttribute("width")||"0";ai.height=ae.getAttribute("height")||"0";if(ae.getAttribute("class")){ai.styleclass=ae.getAttribute("class")}if(ae.getAttribute("align")){ai.align=ae.getAttribute("align")}var ah={};var X=ae.getElementsByTagName("param");var ac=X.length;for(var ad=0;ad<ac;ad++){if(X[ad].getAttribute("name").toLowerCase()!="movie"){ah[X[ad].getAttribute("name")]=X[ad].getAttribute("value")}}P(ai,ah,Y,ab)}else{p(ae);if(ab){ab(aa)}}}}}else{w(Y,true);if(ab){var Z=z(Y);if(Z&&typeof Z.SetVariable!=D){aa.success=true;aa.ref=Z}ab(aa)}}}}}function z(aa){var X=null;var Y=c(aa);if(Y&&Y.nodeName=="OBJECT"){if(typeof Y.SetVariable!=D){X=Y}else{var Z=Y.getElementsByTagName(r)[0];if(Z){X=Z}}}return X}function A(){return !a&&F("6.0.65")&&(M.win||M.mac)&&!(M.wk&&M.wk<312)}function P(aa,ab,X,Z){a=true;E=Z||null;B={success:false,id:X};var ae=c(X);if(ae){if(ae.nodeName=="OBJECT"){l=g(ae);Q=null}else{l=ae;Q=X}aa.id=R;if(typeof aa.width==D||(!/%$/.test(aa.width)&&parseInt(aa.width,10)<310)){aa.width="310"}if(typeof aa.height==D||(!/%$/.test(aa.height)&&parseInt(aa.height,10)<137)){aa.height="137"}j.title=j.title.slice(0,47)+" - Flash Player Installation";var ad=M.ie&&M.win?"ActiveX":"PlugIn",ac="MMredirectURL="+O.location.toString().replace(/&/g,"%26")+"&MMplayerType="+ad+"&MMdoctitle="+j.title;if(typeof ab.flashvars!=D){ab.flashvars+="&"+ac}else{ab.flashvars=ac}if(M.ie&&M.win&&ae.readyState!=4){var Y=C("div");X+="SWFObjectNew";Y.setAttribute("id",X);ae.parentNode.insertBefore(Y,ae);ae.style.display="none";(function(){if(ae.readyState==4){ae.parentNode.removeChild(ae)}else{setTimeout(arguments.callee,10)}})()}u(aa,ab,X)}}function p(Y){if(M.ie&&M.win&&Y.readyState!=4){var X=C("div");Y.parentNode.insertBefore(X,Y);X.parentNode.replaceChild(g(Y),X);Y.style.display="none";(function(){if(Y.readyState==4){Y.parentNode.removeChild(Y)}else{setTimeout(arguments.callee,10)}})()}else{Y.parentNode.replaceChild(g(Y),Y)}}function g(ab){var aa=C("div");if(M.win&&M.ie){aa.innerHTML=ab.innerHTML}else{var Y=ab.getElementsByTagName(r)[0];if(Y){var ad=Y.childNodes;if(ad){var X=ad.length;for(var Z=0;Z<X;Z++){if(!(ad[Z].nodeType==1&&ad[Z].nodeName=="PARAM")&&!(ad[Z].nodeType==8)){aa.appendChild(ad[Z].cloneNode(true))}}}}}return aa}function u(ai,ag,Y){var X,aa=c(Y);if(M.wk&&M.wk<312){return X}if(aa){if(typeof ai.id==D){ai.id=Y}if(M.ie&&M.win){var ah="";for(var ae in ai){if(ai[ae]!=Object.prototype[ae]){if(ae.toLowerCase()=="data"){ag.movie=ai[ae]}else{if(ae.toLowerCase()=="styleclass"){ah+=' class="'+ai[ae]+'"'}else{if(ae.toLowerCase()!="classid"){ah+=" "+ae+'="'+ai[ae]+'"'}}}}}var af="";for(var ad in ag){if(ag[ad]!=Object.prototype[ad]){af+='<param name="'+ad+'" value="'+ag[ad]+'" />'}}aa.outerHTML='<object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"'+ah+">"+af+"</object>";N[N.length]=ai.id;X=c(ai.id)}else{var Z=C(r);Z.setAttribute("type",q);for(var ac in ai){if(ai[ac]!=Object.prototype[ac]){if(ac.toLowerCase()=="styleclass"){Z.setAttribute("class",ai[ac])}else{if(ac.toLowerCase()!="classid"){Z.setAttribute(ac,ai[ac])}}}}for(var ab in ag){if(ag[ab]!=Object.prototype[ab]&&ab.toLowerCase()!="movie"){e(Z,ab,ag[ab])}}aa.parentNode.replaceChild(Z,aa);X=Z}}return X}function e(Z,X,Y){var aa=C("param");aa.setAttribute("name",X);aa.setAttribute("value",Y);Z.appendChild(aa)}function y(Y){var X=c(Y);if(X&&X.nodeName=="OBJECT"){if(M.ie&&M.win){X.style.display="none";(function(){if(X.readyState==4){b(Y)}else{setTimeout(arguments.callee,10)}})()}else{X.parentNode.removeChild(X)}}}function b(Z){var Y=c(Z);if(Y){for(var X in Y){if(typeof Y[X]=="function"){Y[X]=null}}Y.parentNode.removeChild(Y)}}function c(Z){var X=null;try{X=j.getElementById(Z)}catch(Y){}return X}function C(X){return j.createElement(X)}function i(Z,X,Y){Z.attachEvent(X,Y);I[I.length]=[Z,X,Y]}function F(Z){var Y=M.pv,X=Z.split(".");X[0]=parseInt(X[0],10);X[1]=parseInt(X[1],10)||0;X[2]=parseInt(X[2],10)||0;return(Y[0]>X[0]||(Y[0]==X[0]&&Y[1]>X[1])||(Y[0]==X[0]&&Y[1]==X[1]&&Y[2]>=X[2]))?true:false}function v(ac,Y,ad,ab){if(M.ie&&M.mac){return}var aa=j.getElementsByTagName("head")[0];if(!aa){return}var X=(ad&&typeof ad=="string")?ad:"screen";if(ab){n=null;G=null}if(!n||G!=X){var Z=C("style");Z.setAttribute("type","text/css");Z.setAttribute("media",X);n=aa.appendChild(Z);if(M.ie&&M.win&&typeof j.styleSheets!=D&&j.styleSheets.length>0){n=j.styleSheets[j.styleSheets.length-1]}G=X}if(M.ie&&M.win){if(n&&typeof n.addRule==r){n.addRule(ac,Y)}}else{if(n&&typeof j.createTextNode!=D){n.appendChild(j.createTextNode(ac+" {"+Y+"}"))}}}function w(Z,X){if(!m){return}var Y=X?"visible":"hidden";if(J&&c(Z)){c(Z).style.visibility=Y}else{v("#"+Z,"visibility:"+Y)}}function L(Y){var Z=/[\\\"<>\.;]/;var X=Z.exec(Y)!=null;return X&&typeof encodeURIComponent!=D?encodeURIComponent(Y):Y}var d=function(){if(M.ie&&M.win){window.attachEvent("onunload",function(){var ac=I.length;for(var ab=0;ab<ac;ab++){I[ab][0].detachEvent(I[ab][1],I[ab][2])}var Z=N.length;for(var aa=0;aa<Z;aa++){y(N[aa])}for(var Y in M){M[Y]=null}M=null;for(var X in swfobject){swfobject[X]=null}swfobject=null})}}();return{registerObject:function(ab,X,aa,Z){if(M.w3&&ab&&X){var Y={};Y.id=ab;Y.swfVersion=X;Y.expressInstall=aa;Y.callbackFn=Z;o[o.length]=Y;w(ab,false)}else{if(Z){Z({success:false,id:ab})}}},getObjectById:function(X){if(M.w3){return z(X)}},embedSWF:function(ab,ah,ae,ag,Y,aa,Z,ad,af,ac){var X={success:false,id:ah};if(M.w3&&!(M.wk&&M.wk<312)&&ab&&ah&&ae&&ag&&Y){w(ah,false);K(function(){ae+="";ag+="";var aj={};if(af&&typeof af===r){for(var al in af){aj[al]=af[al]}}aj.data=ab;aj.width=ae;aj.height=ag;var am={};if(ad&&typeof ad===r){for(var ak in ad){am[ak]=ad[ak]}}if(Z&&typeof Z===r){for(var ai in Z){if(typeof am.flashvars!=D){am.flashvars+="&"+ai+"="+Z[ai]}else{am.flashvars=ai+"="+Z[ai]}}}if(F(Y)){var an=u(aj,am,ah);if(aj.id==ah){w(ah,true)}X.success=true;X.ref=an}else{if(aa&&A()){aj.data=aa;P(aj,am,ah,ac);return}else{w(ah,true)}}if(ac){ac(X)}})}else{if(ac){ac(X)}}},switchOffAutoHideShow:function(){m=false},ua:M,getFlashPlayerVersion:function(){return{major:M.pv[0],minor:M.pv[1],release:M.pv[2]}},hasFlashPlayerVersion:F,createSWF:function(Z,Y,X){if(M.w3){return u(Z,Y,X)}else{return undefined}},showExpressInstall:function(Z,aa,X,Y){if(M.w3&&A()){P(Z,aa,X,Y)}},removeSWF:function(X){if(M.w3){y(X)}},createCSS:function(aa,Z,Y,X){if(M.w3){v(aa,Z,Y,X)}},addDomLoadEvent:K,addLoadEvent:s,getQueryParamValue:function(aa){var Z=j.location.search||j.location.hash;if(Z){if(/\?/.test(Z)){Z=Z.split("?")[1]}if(aa==null){return L(Z)}var Y=Z.split("&");for(var X=0;X<Y.length;X++){if(Y[X].substring(0,Y[X].indexOf("="))==aa){return L(Y[X].substring((Y[X].indexOf("=")+1)))}}}return""},expressInstallCallback:function(){if(a){var X=c(R);if(X&&l){X.parentNode.replaceChild(l,X);if(Q){w(Q,true);if(M.ie&&M.win){l.style.display="block"}}if(E){E(B)}}a=false}}}}(); | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.