text
stringlengths 2
999k
|
|---|
from django import forms
from django.contrib import admin, messages
from django.contrib.admin.helpers import ActionForm
from base.admin import ImportExportTimeStampedAdmin
from .aws_utils import (
delete_workers,
restart_workers,
scale_workers,
start_workers,
stop_workers,
)
from .admin_filters import ChallengeFilter
from .models import (
Challenge,
ChallengeConfiguration,
ChallengeEvaluationCluster,
ChallengePhase,
ChallengePhaseSplit,
ChallengeTemplate,
DatasetSplit,
Leaderboard,
LeaderboardData,
PWCChallengeLeaderboard,
StarChallenge,
UserInvitation,
)
class UpdateNumOfWorkersForm(ActionForm):
label = "Number of workers. (Enter a whole number while scaling. Otherwise, ignore.)"
num_of_tasks = forms.IntegerField(initial=-1, label=label, required=False)
@admin.register(Challenge)
class ChallengeAdmin(ImportExportTimeStampedAdmin):
readonly_fields = ("created_at",)
list_display = (
"id",
"title",
"start_date",
"end_date",
"creator",
"published",
"is_registration_open",
"enable_forum",
"anonymous_leaderboard",
"featured",
"created_at",
"is_docker_based",
"is_static_dataset_code_upload",
"slug",
"submission_time_limit",
"banned_email_ids",
"workers",
"task_def_arn",
"github_repository",
)
list_filter = (
ChallengeFilter,
"published",
"is_registration_open",
"enable_forum",
"anonymous_leaderboard",
"featured",
"start_date",
"end_date",
)
search_fields = (
"id",
"title",
"creator__team_name",
"slug",
"github_repository",
)
actions = [
"start_selected_workers",
"stop_selected_workers",
"scale_selected_workers",
"restart_selected_workers",
"delete_selected_workers",
]
action_form = UpdateNumOfWorkersForm
def start_selected_workers(self, request, queryset):
response = start_workers(queryset)
count, failures = response["count"], response["failures"]
if count == queryset.count():
message = "All selected challenge workers successfully started."
messages.success(request, message)
else:
messages.success(
request,
"{} challenge workers were succesfully started.".format(count),
)
for fail in failures:
challenge_pk, message = fail["challenge_pk"], fail["message"]
display_message = "Challenge {}: {}".format(
challenge_pk, message
)
messages.error(request, display_message)
start_selected_workers.short_description = (
"Start all selected challenge workers."
)
def stop_selected_workers(self, request, queryset):
response = stop_workers(queryset)
count, failures = response["count"], response["failures"]
if count == queryset.count():
message = "All selected challenge workers successfully stopped."
messages.success(request, message)
else:
messages.success(
request,
"{} challenge workers were succesfully stopped.".format(count),
)
for fail in failures:
challenge_pk, message = fail["challenge_pk"], fail["message"]
display_message = "Challenge {}: {}".format(
challenge_pk, message
)
messages.error(request, display_message)
stop_selected_workers.short_description = (
"Stop all selected challenge workers."
)
def scale_selected_workers(self, request, queryset):
num_of_tasks = int(request.POST["num_of_tasks"])
if num_of_tasks >= 0 and num_of_tasks % 1 == 0:
response = scale_workers(queryset, num_of_tasks)
count, failures = response["count"], response["failures"]
if count == queryset.count():
message = "All selected challenge workers successfully scaled."
messages.success(request, message)
else:
messages.success(
request,
"{} challenge workers were succesfully scaled.".format(
count
),
)
for fail in failures:
challenge_pk, message = (
fail["challenge_pk"],
fail["message"],
)
display_message = "Challenge {}: {}".format(
challenge_pk, message
)
messages.error(request, display_message)
else:
messages.warning(
request, "Please enter a valid whole number to scale."
)
scale_selected_workers.short_description = (
"Scale all selected challenge workers to a given number."
)
def restart_selected_workers(self, request, queryset):
response = restart_workers(queryset)
count, failures = response["count"], response["failures"]
if count == queryset.count():
message = "All selected challenge workers successfully restarted."
messages.success(request, message)
else:
messages.success(
request,
"{} challenge workers were succesfully restarted.".format(
count
),
)
for fail in failures:
challenge_pk, message = fail["challenge_pk"], fail["message"]
display_message = "Challenge {}: {}".format(
challenge_pk, message
)
messages.error(request, display_message)
restart_selected_workers.short_description = (
"Restart all selected challenge workers."
)
def delete_selected_workers(self, request, queryset):
response = delete_workers(queryset)
count, failures = response["count"], response["failures"]
if count == queryset.count():
message = "All selected challenge workers successfully deleted."
messages.success(request, message)
else:
messages.success(
request,
"{} challenge workers were succesfully deleted.".format(count),
)
for fail in failures:
challenge_pk, message = fail["challenge_pk"], fail["message"]
display_message = "Challenge {}: {}".format(
challenge_pk, message
)
messages.error(request, display_message)
delete_selected_workers.short_description = (
"Delete all selected challenge workers."
)
@admin.register(ChallengeConfiguration)
class ChallengeConfigurationAdmin(ImportExportTimeStampedAdmin):
list_display = (
"id",
"user",
"challenge",
"created_at",
"is_created",
"zip_configuration",
)
list_filter = ("is_created", "created_at")
search_fields = ("id", "challenge__title")
@admin.register(ChallengePhase)
class ChallengePhaseAdmin(ImportExportTimeStampedAdmin):
list_display = (
"id",
"name",
"get_challenge_name_and_id",
"start_date",
"end_date",
"test_annotation",
"is_public",
"is_submission_public",
"leaderboard_public",
)
list_filter = ("leaderboard_public", "start_date", "end_date")
search_fields = ("name", "challenge__title")
def get_challenge_name_and_id(self, obj):
"""Return challenge name corresponding to phase"""
return "%s - %s" % (obj.challenge.title, obj.challenge.id)
get_challenge_name_and_id.short_description = "Challenge"
get_challenge_name_and_id.admin_order_field = "challenge"
@admin.register(ChallengePhaseSplit)
class ChallengePhaseSplitAdmin(ImportExportTimeStampedAdmin):
raw_id_fields = ["challenge_phase", "leaderboard", "dataset_split"]
list_display = (
"id",
"get_challenge",
"challenge_phase",
"dataset_split",
"leaderboard",
"visibility",
"leaderboard_decimal_precision",
"is_leaderboard_order_descending",
"show_execution_time",
)
list_filter = ("visibility",)
search_fields = (
"id",
"challenge_phase__name",
"dataset_split__name",
"leaderboard__id",
"dataset_split__codename",
)
def get_challenge(self, obj):
"""Returns challenge name corresponding to phase-split"""
return obj.challenge_phase.challenge
get_challenge.short_description = "Challenge"
get_challenge.admin_order_field = "challenge_phase__challenge"
@admin.register(ChallengeTemplate)
class ChallengeTemplate(ImportExportTimeStampedAdmin):
list_display = (
"id",
"title",
"image",
"dataset",
"eval_metrics",
"phases",
"splits",
)
list_filter = ("title", "dataset", "phases", "splits", "eval_metrics")
search_fields = (
"id",
"title",
"dataset",
"phases",
"splits",
"eval_metrics",
)
@admin.register(DatasetSplit)
class DatasetSplitAdmin(ImportExportTimeStampedAdmin):
list_display = ("name", "codename")
list_filter = ("name", "codename")
search_fields = ("id", "name", "codename")
@admin.register(Leaderboard)
class LeaderboardAdmin(ImportExportTimeStampedAdmin):
list_display = ("id", "schema")
search_fields = ("id", "schema")
@admin.register(LeaderboardData)
class LeaderboardDataAdmin(ImportExportTimeStampedAdmin):
list_display = (
"id",
"get_challenge",
"challenge_phase_split",
"submission",
"leaderboard",
"result",
)
list_filter = ("challenge_phase_split", "created_at", "modified_at")
search_fields = (
"id",
"challenge_phase_split__challenge_phase__name",
"submission__participant_team__team_name",
"leaderboard__schema",
"result",
)
def get_challenge(self, obj):
"""Returns challenge name corresponding to leaderboard data entry"""
return obj.challenge_phase_split.challenge_phase.challenge
get_challenge.short_description = "Challenge"
get_challenge.admin_order_field = "challenge_phase__challenge"
@admin.register(StarChallenge)
class StarChallengeAdmin(ImportExportTimeStampedAdmin):
list_display = ("user", "challenge", "is_starred")
list_filter = ("is_starred",)
search_fields = ("id", "user__username", "challenge__title")
@admin.register(UserInvitation)
class UserInvitationAdmin(ImportExportTimeStampedAdmin):
list_display = (
"email",
"invitation_key",
"status",
"get_challenge_name_and_id",
"get_username_and_id",
"get_host_team_and_member_name",
)
list_filter = ("status", "challenge__title")
search_fields = ("email",)
def get_challenge_name_and_id(self, obj):
"""Return challenge name and id for a challenge"""
return "%s - %s" % (obj.challenge.title, obj.challenge.id)
get_challenge_name_and_id.short_description = "Challenge"
get_challenge_name_and_id.admin_order_field = "challenge"
def get_username_and_id(self, obj):
"""Return username and id of a user"""
return "%s - %s" % (obj.user.username, obj.user.id)
get_username_and_id.short_description = "User"
get_username_and_id.admin_order_field = "username"
def get_host_team_and_member_name(self, obj):
"""Returns the host team name and the member name"""
return "%s - %s" % (
obj.invited_by.team_name.team_name,
obj.invited_by.user.username,
)
get_host_team_and_member_name.short_description = "Invited by"
get_host_team_and_member_name.admin_order_field = "invited_by"
@admin.register(ChallengeEvaluationCluster)
class ChallengeEvaluationClusterAdmin(ImportExportTimeStampedAdmin):
readonly_fields = ("created_at",)
list_display = ("id", "name", "cluster_yaml", "kube_config")
list_filter = ("name",)
search_fields = ("id", "name")
@admin.register(PWCChallengeLeaderboard)
class PWCChallengeLeaderboardAdmin(ImportExportTimeStampedAdmin):
raw_id_fields = ["phase_split"]
readonly_fields = ("created_at",)
list_display = (
"id",
"get_challenge_name_and_id",
"phase_split",
"get_challenge_phase_split_id",
"area",
"task",
"dataset",
"enable_sync",
)
list_filter = ("area", "enable_sync")
search_fields = ("id", "area", "task", "dataset")
def get_challenge_phase_split_id(self, obj):
"""Return challenge phase split id for a challenge phase and split"""
return "%s" % (obj.phase_split.id)
get_challenge_phase_split_id.short_description = "Challenge Phase Split ID"
get_challenge_phase_split_id.admin_order_field = "phase_split"
def get_challenge_name_and_id(self, obj):
"""Return challenge name and id for a challenge"""
return "%s - %s" % (
obj.phase_split.challenge_phase.challenge.title,
obj.phase_split.challenge_phase.challenge.id,
)
get_challenge_name_and_id.short_description = "Challenge Name - ID"
get_challenge_name_and_id.admin_order_field = "challenge_phase__challenge"
|
from fid import fid
from kid import kid_kid, kid_is
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-m", "--metric", dest="metric", default="all",
help="Set batch size to use for InceptionV3 network",
type=str)
parser.add_option("--p1", "--path1", dest="path1", default=None,
help="Path to directory containing the real images")
parser.add_option("--p2", "--path2", dest="path2", default=None,
help="Path to directory containing the generated images")
parser.add_option("-b", "--batch-size", dest="batch_size", default=1,
help="Set batch size to use for InceptionV3 network",
type=int)
print ('------------------------options-------------------------')
options, _ = parser.parse_args()
train_A_path = 'dataset/selfie2anime_64_64/trainA'
train_B_path = 'dataset/selfie2anime_64_64/trainB'
print ('here')
if options.metric == 'all':
print ('calculating is now...')
print ('is score trainA vs output_B2A:', kid_is(options.path1, 16))
print ('is score trainB vs output_A2B:', kid_is(options.path2, 16))
print ('calculating fid now...')
print ('fid score trainA vs output_B2A:', fid(train_A_path, options.path1, 8))
print ('fid score trainB vs output_A2B:', fid(train_B_path, options.path2, 8))
print ('calculating kid now...')
print ('kid score trainA vs output_B2A:', kid_kid(train_A_path, options.path1, 16))
print ('kid score trainB vs output_A2B:', kid_kid(train_B_path, options.path2, 16))
if options.metric == 'fid':
print ('calculating fid now...')
print ('fid score trainA vs output_B2A:', fid(train_A_path, options.path1, 8))
print ('fid score trainB vs output_A2B:', fid(train_B_path, options.path2, 8))
if options.metric == 'is':
print ('calculating is now...')
print ('is score trainA vs output_B2A:', kid_is(options.path1, 16))
print ('is score trainB vs output_A2B:', kid_is(options.path2, 16))
if options.metric == 'kid':
print ('calculating kid now...')
print ('kid score trainA vs output_B2A:', kid_kid(train_A_path, options.path1, 16))
print ('kid score trainB vs output_A2B:', kid_kid(train_B_path, options.path2, 16))
|
import sys
import os
sys.path.append(os.getcwd())
import torch
from training_structures.Contrastive_Learning import train, test
from fusions.common_fusions import Concat
from datasets.imdb.get_data import get_dataloader
from unimodals.common_models import MLP, VGG16, MaxOut_MLP, Linear
traindata, validdata, testdata = get_dataloader('../video/multimodal_imdb.hdf5', vgg=True, batch_size=128)
encoders=[MaxOut_MLP(512, 512, 300, linear_layer=False), MaxOut_MLP(512, 1024, 4096, 512, False)]
#encoders=[MLP(300, 512, 512), MLP(4096, 1000, 512)]
#encoders=[MLP(300, 512, 512), VGG16(512)]
#head=MLP(1024,512,23).cuda()
head=Linear(1024,23).cuda()
refiner = MLP(1024,3072,4396).cuda()
#refiner = MLP(1024,2048,1024).cuda()
fusion=Concat().cuda()
train(encoders,fusion,head,refiner,traindata,validdata,1000, early_stop=True,task="multilabel",\
save="best_contrast.pt", optimtype=torch.optim.AdamW,lr=1e-2,weight_decay=0.01, criterion=torch.nn.BCEWithLogitsLoss())
print("Testing:")
model=torch.load('best_contrast.pt').cuda()
test(model,testdata,criterion=torch.nn.BCEWithLogitsLoss(),task="multilabel")
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""EdgeQL to IR compiler context."""
from __future__ import annotations
from typing import *
import collections
import dataclasses
import enum
import uuid
import weakref
from edb.common import compiler
from edb.common import parsing
from edb.edgeql import ast as qlast
from edb.edgeql import qltypes
from edb.ir import ast as irast
from edb.ir import typeutils as irtyputils
from edb.schema import expraliases as s_aliases
from edb.schema import functions as s_func
from edb.schema import name as s_name
from edb.schema import objects as s_obj
from edb.schema import pointers as s_pointers
from edb.schema import schema as s_schema
from edb.schema import types as s_types
from .options import GlobalCompilerOptions
if TYPE_CHECKING:
from edb.schema import objtypes as s_objtypes
from edb.schema import sources as s_sources
class ContextSwitchMode(enum.Enum):
NEW = enum.auto()
SUBQUERY = enum.auto()
NEWSCOPE = enum.auto()
NEWFENCE = enum.auto()
DETACHED = enum.auto()
class ViewRPtr:
def __init__(
self,
source: s_sources.Source,
*,
ptrcls: Optional[s_pointers.Pointer],
ptrcls_name: Optional[s_name.QualName] = None,
base_ptrcls: Optional[s_pointers.Pointer] = None,
ptrcls_is_linkprop: bool = False,
ptrcls_is_alias: bool = False,
rptr: Optional[irast.Pointer] = None,
is_insert: bool = False,
is_update: bool = False,
) -> None:
self.source = source
self.ptrcls = ptrcls
self.base_ptrcls = base_ptrcls
self.ptrcls_name = ptrcls_name
self.ptrcls_is_linkprop = ptrcls_is_linkprop
self.ptrcls_is_alias = ptrcls_is_alias
self.rptr = rptr
self.is_insert = is_insert
self.is_update = is_update
@dataclasses.dataclass
class StatementMetadata:
is_unnest_fence: bool = False
iterator_target: bool = False
@dataclasses.dataclass
class ScopeInfo:
path_scope: irast.ScopeTreeNode
pinned_path_id_ns: Optional[FrozenSet[str]] = None
class PointerRefCache(Dict[irtyputils.PtrRefCacheKey, irast.BasePointerRef]):
_rcache: Dict[irast.BasePointerRef, s_pointers.PointerLike]
def __init__(self) -> None:
super().__init__()
self._rcache = {}
def __setitem__(
self,
key: irtyputils.PtrRefCacheKey,
val: irast.BasePointerRef,
) -> None:
super().__setitem__(key, val)
self._rcache[val] = key[0]
def get_ptrcls_for_ref(
self,
ref: irast.BasePointerRef,
) -> Optional[s_pointers.PointerLike]:
return self._rcache.get(ref)
# Volatility inference computes two volatility results:
# A basic one, and one for consumption by materialization
InferredVolatility = Union[
qltypes.Volatility, Tuple[qltypes.Volatility, qltypes.Volatility]]
class Environment:
"""Compilation environment."""
schema: s_schema.Schema
"""A Schema instance to use for class resolution."""
orig_schema: s_schema.Schema
"""A Schema as it was at the start of the compilation."""
options: GlobalCompilerOptions
"""Compiler options."""
path_scope: irast.ScopeTreeNode
"""Overrall expression path scope tree."""
schema_view_cache: Dict[s_types.Type, s_types.Type]
"""Type cache used by schema-level views."""
query_parameters: Dict[str, irast.Param]
"""A mapping of query parameters to their types. Gets populated during
the compilation."""
set_types: Dict[irast.Set, s_types.Type]
"""A dictionary of all Set instances and their schema types."""
type_origins: Dict[s_types.Type, parsing.ParserContext]
"""A dictionary of notable types and their source origins.
This is used to trace where a particular type instance originated in
order to provide useful diagnostics for type errors.
"""
inferred_types: Dict[irast.Base, s_types.Type]
"""A dictionary of all expressions and their inferred schema types."""
inferred_volatility: Dict[
irast.Base,
InferredVolatility]
"""A dictionary of expressions and their inferred volatility."""
inferred_multiplicity: Dict[
irast.Base,
qltypes.Multiplicity]
"""A dictionary of expressions and their inferred multiplicity."""
view_shapes: Dict[
Union[s_types.Type, s_pointers.PointerLike],
List[Tuple[s_pointers.Pointer, qlast.ShapeOp]]
]
"""Object output or modification shapes."""
pointer_derivation_map: Dict[
s_pointers.Pointer,
List[s_pointers.Pointer],
]
"""A parent: children mapping of derived pointer classes."""
pointer_specified_info: Dict[
s_pointers.Pointer,
Tuple[Optional[qltypes.SchemaCardinality], bool,
Optional[parsing.ParserContext]],
]
"""Cardinality/source context for pointers with unclear cardinality."""
view_shapes_metadata: Dict[s_types.Type, irast.ViewShapeMetadata]
schema_refs: Set[s_obj.Object]
"""A set of all schema objects referenced by an expression."""
schema_ref_exprs: Optional[Dict[s_obj.Object, Set[qlast.Base]]]
"""Map from all schema objects referenced to the ast referants.
This is used for rewriting expressions in the schema after a rename. """
created_schema_objects: Set[s_obj.Object]
"""A set of all schema objects derived by this compilation."""
# Caches for costly operations in edb.ir.typeutils
ptr_ref_cache: PointerRefCache
type_ref_cache: Dict[irtyputils.TypeRefCacheKey, irast.TypeRef]
dml_exprs: List[qlast.Base]
"""A list of DML expressions (statements and DML-containing
functions) that appear in a function body.
"""
#: A list of bindings that should be assumed to be singletons.
singletons: List[irast.PathId]
scope_tree_nodes: MutableMapping[int, irast.ScopeTreeNode]
"""Map from unique_id to nodes."""
materialized_sets: Dict[
Union[s_types.Type, s_pointers.PointerLike],
qlast.Statement,
]
"""A mapping of computed sets that must be computed only once."""
compiled_stmts: Dict[qlast.Statement, irast.Stmt]
"""A mapping of from input edgeql to compiled IR"""
def __init__(
self,
*,
schema: s_schema.Schema,
path_scope: Optional[irast.ScopeTreeNode] = None,
options: Optional[GlobalCompilerOptions] = None,
) -> None:
if options is None:
options = GlobalCompilerOptions()
if path_scope is None:
path_scope = irast.new_scope_tree()
self.options = options
self.schema = schema
self.orig_schema = schema
self.path_scope = path_scope
self.schema_view_cache = {}
self.query_parameters = {}
self.set_types = {}
self.type_origins = {}
self.inferred_types = {}
self.inferred_volatility = {}
self.inferred_multiplicity = {}
self.view_shapes = collections.defaultdict(list)
self.view_shapes_metadata = collections.defaultdict(
irast.ViewShapeMetadata)
self.schema_refs = set()
self.schema_ref_exprs = {} if options.track_schema_ref_exprs else None
self.created_schema_objects = set()
self.ptr_ref_cache = PointerRefCache()
self.type_ref_cache = {}
self.dml_exprs = []
self.pointer_derivation_map = collections.defaultdict(list)
self.pointer_specified_info = {}
self.singletons = []
self.scope_tree_nodes = weakref.WeakValueDictionary()
self.materialized_sets = {}
self.compiled_stmts = {}
def add_schema_ref(
self, sobj: s_obj.Object, expr: Optional[qlast.Base]) -> None:
self.schema_refs.add(sobj)
if self.schema_ref_exprs is not None and expr:
self.schema_ref_exprs.setdefault(sobj, set()).add(expr)
@overload
def get_track_schema_object( # NoQA: F811
self,
name: s_name.Name,
expr: Optional[qlast.Base],
*,
modaliases: Optional[Mapping[Optional[str], str]] = None,
type: Optional[Type[s_obj.Object]] = None,
default: Union[s_obj.Object, s_obj.NoDefaultT] = s_obj.NoDefault,
label: Optional[str] = None,
condition: Optional[Callable[[s_obj.Object], bool]] = None,
) -> s_obj.Object:
...
@overload
def get_track_schema_object( # NoQA: F811
self,
name: s_name.Name,
expr: Optional[qlast.Base],
*,
modaliases: Optional[Mapping[Optional[str], str]] = None,
type: Optional[Type[s_obj.Object]] = None,
default: Union[s_obj.Object, s_obj.NoDefaultT, None] = s_obj.NoDefault,
label: Optional[str] = None,
condition: Optional[Callable[[s_obj.Object], bool]] = None,
) -> Optional[s_obj.Object]:
...
def get_track_schema_object( # NoQA: F811
self,
name: s_name.Name,
expr: Optional[qlast.Base],
*,
modaliases: Optional[Mapping[Optional[str], str]] = None,
type: Optional[Type[s_obj.Object]] = None,
default: Union[s_obj.Object, s_obj.NoDefaultT, None] = s_obj.NoDefault,
label: Optional[str] = None,
condition: Optional[Callable[[s_obj.Object], bool]] = None,
) -> Optional[s_obj.Object]:
sobj = self.schema.get(
name, module_aliases=modaliases, type=type,
condition=condition, label=label,
default=default)
if sobj is not None and sobj is not default:
self.add_schema_ref(sobj, expr)
if (
isinstance(sobj, s_types.Type)
and sobj.get_expr(self.schema) is not None
):
# If the type is derived from an ALIAS declaration,
# make sure we record the reference to the Alias object
# as well for correct delta ordering.
alias_objs = self.schema.get_referrers(
sobj,
scls_type=s_aliases.Alias,
field_name='type',
)
for obj in alias_objs:
self.add_schema_ref(obj, expr)
return sobj
def get_track_schema_type(
self,
name: s_name.Name,
expr: Optional[qlast.Base]=None,
*,
modaliases: Optional[Mapping[Optional[str], str]] = None,
default: Union[None, s_obj.Object, s_obj.NoDefaultT] = s_obj.NoDefault,
label: Optional[str]=None,
condition: Optional[Callable[[s_obj.Object], bool]]=None,
) -> s_types.Type:
stype = self.get_track_schema_object(
name, expr, modaliases=modaliases, default=default, label=label,
condition=condition, type=s_types.Type,
)
return cast(s_types.Type, stype)
class ContextLevel(compiler.ContextLevel):
env: Environment
"""Compilation environment common for all context levels."""
derived_target_module: Optional[str]
"""The name of the module for classes derived by views."""
anchors: Dict[
Union[str, Type[qlast.SpecialAnchor]],
irast.Set,
]
"""A mapping of anchor variables (aliases to path expressions passed
to the compiler programmatically).
"""
modaliases: Dict[Optional[str], str]
"""A combined list of module name aliases declared in the WITH block,
or passed to the compiler programmatically.
"""
func: Optional[s_func.Function]
"""Schema function object required when compiling functions bodies."""
stmt_metadata: Dict[qlast.Statement, StatementMetadata]
"""Extra statement metadata needed by the compiler, but not in AST."""
source_map: Dict[s_pointers.PointerLike, irast.ComputableInfo]
"""A mapping of computable pointers to QL source AST and context."""
view_nodes: Dict[s_name.Name, s_types.Type]
"""A dictionary of newly derived Node classes representing views."""
view_sets: Dict[s_types.Type, irast.Set]
"""A dictionary of IR expressions for views declared in the query."""
type_rewrites: Dict[s_types.Type, irast.Set]
"""Access policy rewrites for schema-level types."""
aliased_views: ChainMap[s_name.Name, Optional[s_types.Type]]
"""A dictionary of views aliased in a statement body."""
must_use_views: Dict[
s_types.Type,
Tuple[s_name.Name, parsing.ParserContext],
]
"""A set of views that *must* be used in an expression."""
expr_view_cache: Dict[Tuple[qlast.Base, s_name.Name], irast.Set]
"""Type cache used by expression-level views."""
shape_type_cache: Dict[
Tuple[
s_objtypes.ObjectType,
bool, bool, bool,
Tuple[qlast.ShapeElement, ...],
],
s_objtypes.ObjectType,
]
"""Type cache for shape expressions."""
class_view_overrides: Dict[uuid.UUID, s_types.Type]
"""Object mapping used by implicit view override in SELECT."""
clause: Optional[str]
"""Statement clause the compiler is currently in."""
toplevel_stmt: Optional[irast.Stmt]
"""Top-level statement."""
stmt: Optional[irast.Stmt]
"""Statement node currently being built."""
qlstmt: Optional[qlast.Statement]
"""Statement source node currently being built."""
path_id_namespace: FrozenSet[str]
"""A namespace to use for all path ids."""
pending_stmt_own_path_id_namespace: FrozenSet[str]
"""A path id namespace to add to the fence of the next statement."""
pending_stmt_full_path_id_namespace: FrozenSet[str]
"""A set of path id namespaces to use in path ids in the next statement."""
banned_paths: Set[irast.PathId]
"""A set of path ids that are considered invalid in this context."""
view_map: ChainMap[irast.PathId, Tuple[irast.PathId, irast.Set]]
"""Set translation map. Used for mapping computable sources..
When compiling a computable, we need to be able to map references to
the source back to the correct source set.
This maps from a namespace-stripped source path_id to the expected
computable-internal path_id and the actual source set.
The namespace stripping is necessary to handle the case where
bindings have added more namespaces to the source set reference.
(See test_edgeql_scope_computables_13.)
"""
path_scope: irast.ScopeTreeNode
"""Path scope tree, with per-lexical-scope levels."""
path_scope_map: Dict[irast.Set, ScopeInfo]
"""A dictionary of scope info that are appropriate for a given view."""
iterator_ctx: Optional[ContextLevel]
"""The context of the statement where all iterators should be placed."""
iterator_path_ids: FrozenSet[irast.PathId]
"""The path ids of all in scope iterator variables"""
scope_id_ctr: compiler.SimpleCounter
"""Path scope id counter."""
view_rptr: Optional[ViewRPtr]
"""Pointer information for the top-level view of the substatement."""
view_scls: Optional[s_types.Type]
"""Schema class for the top-level set of the substatement."""
toplevel_result_view_name: Optional[s_name.QualName]
"""The name to use for the view that is the result of the top statement."""
partial_path_prefix: Optional[irast.Set]
"""The set used as a prefix for partial paths."""
implicit_id_in_shapes: bool
"""Whether to include the id property in object shapes implicitly."""
implicit_tid_in_shapes: bool
"""Whether to include the type id property in object shapes implicitly."""
implicit_tname_in_shapes: bool
"""Whether to include the type name property in object shapes
implicitly."""
implicit_limit: int
"""Implicit LIMIT clause in SELECT statements."""
inhibit_implicit_limit: bool
"""Whether implicit limit injection should be inhibited."""
special_computables_in_mutation_shape: FrozenSet[str]
"""A set of "special" computable pointers allowed in mutation shape."""
empty_result_type_hint: Optional[s_types.Type]
"""Type to use if the statement result expression is an empty set ctor."""
defining_view: Optional[s_types.Type]
"""Whether a view is currently being defined (as opposed to be compiled)"""
recompiling_schema_alias: bool
"""Whether we are currently recompiling a schema-level expression alias."""
compiling_update_shape: bool
"""Whether an UPDATE shape is currently being compiled."""
in_conditional: Optional[parsing.ParserContext]
"""Whether currently in a conditional branch."""
disable_shadowing: Set[Union[s_obj.Object, s_pointers.PseudoPointer]]
"""A set of schema objects for which the shadowing rewrite should be
disabled."""
path_log: Optional[List[irast.PathId]]
"""An optional list of path ids added to the scope tree in this context."""
def __init__(
self,
prevlevel: Optional[ContextLevel],
mode: ContextSwitchMode,
*,
env: Optional[Environment] = None,
) -> None:
self.mode = mode
if prevlevel is None:
assert env is not None
self.env = env
self.derived_target_module = None
self.aliases = compiler.AliasGenerator()
self.anchors = {}
self.modaliases = {}
self.stmt_metadata = {}
self.source_map = {}
self.view_nodes = {}
self.view_sets = {}
self.type_rewrites = {}
self.aliased_views = collections.ChainMap()
self.must_use_views = {}
self.expr_view_cache = {}
self.shape_type_cache = {}
self.class_view_overrides = {}
self.toplevel_stmt = None
self.stmt = None
self.qlstmt = None
self.path_id_namespace = frozenset()
self.pending_stmt_own_path_id_namespace = frozenset()
self.pending_stmt_full_path_id_namespace = frozenset()
self.banned_paths = set()
self.view_map = collections.ChainMap()
self.path_scope = env.path_scope
self.path_scope_map = {}
self.iterator_ctx = None
self.iterator_path_ids = frozenset()
self.scope_id_ctr = compiler.SimpleCounter()
self.view_scls = None
self.expr_exposed = False
self.partial_path_prefix = None
self.view_rptr = None
self.toplevel_result_view_name = None
self.implicit_id_in_shapes = False
self.implicit_tid_in_shapes = False
self.implicit_tname_in_shapes = False
self.implicit_limit = 0
self.inhibit_implicit_limit = False
self.special_computables_in_mutation_shape = frozenset()
self.empty_result_type_hint = None
self.defining_view = None
self.compiling_update_shape = False
self.in_conditional = None
self.disable_shadowing = set()
self.path_log = None
self.recompiling_schema_alias = False
else:
self.env = prevlevel.env
self.derived_target_module = prevlevel.derived_target_module
self.aliases = prevlevel.aliases
self.stmt_metadata = prevlevel.stmt_metadata
self.source_map = prevlevel.source_map
self.view_nodes = prevlevel.view_nodes
self.view_sets = prevlevel.view_sets
self.type_rewrites = prevlevel.type_rewrites
self.must_use_views = prevlevel.must_use_views
self.expr_view_cache = prevlevel.expr_view_cache
self.shape_type_cache = prevlevel.shape_type_cache
self.iterator_ctx = prevlevel.iterator_ctx
self.iterator_path_ids = prevlevel.iterator_path_ids
self.path_id_namespace = prevlevel.path_id_namespace
self.pending_stmt_own_path_id_namespace = \
prevlevel.pending_stmt_own_path_id_namespace
self.pending_stmt_full_path_id_namespace = \
prevlevel.pending_stmt_full_path_id_namespace
self.banned_paths = prevlevel.banned_paths
self.view_map = prevlevel.view_map
if prevlevel.path_scope is None:
prevlevel.path_scope = self.env.path_scope
self.path_scope = prevlevel.path_scope
self.path_scope_map = prevlevel.path_scope_map
self.scope_id_ctr = prevlevel.scope_id_ctr
self.view_scls = prevlevel.view_scls
self.expr_exposed = prevlevel.expr_exposed
self.partial_path_prefix = prevlevel.partial_path_prefix
self.toplevel_stmt = prevlevel.toplevel_stmt
self.implicit_id_in_shapes = prevlevel.implicit_id_in_shapes
self.implicit_tid_in_shapes = prevlevel.implicit_tid_in_shapes
self.implicit_tname_in_shapes = prevlevel.implicit_tname_in_shapes
self.implicit_limit = prevlevel.implicit_limit
self.inhibit_implicit_limit = prevlevel.inhibit_implicit_limit
self.special_computables_in_mutation_shape = \
prevlevel.special_computables_in_mutation_shape
self.empty_result_type_hint = prevlevel.empty_result_type_hint
self.defining_view = prevlevel.defining_view
self.compiling_update_shape = prevlevel.compiling_update_shape
self.in_conditional = prevlevel.in_conditional
self.disable_shadowing = prevlevel.disable_shadowing
self.path_log = prevlevel.path_log
self.recompiling_schema_alias = prevlevel.recompiling_schema_alias
if mode == ContextSwitchMode.SUBQUERY:
self.anchors = prevlevel.anchors.copy()
self.modaliases = prevlevel.modaliases.copy()
self.aliased_views = prevlevel.aliased_views.new_child()
self.class_view_overrides = \
prevlevel.class_view_overrides.copy()
self.pending_stmt_own_path_id_namespace = frozenset()
self.pending_stmt_full_path_id_namespace = frozenset()
self.banned_paths = prevlevel.banned_paths.copy()
self.view_rptr = None
self.view_scls = None
self.stmt = None
self.qlstmt = None
self.view_rptr = None
self.toplevel_result_view_name = None
elif mode == ContextSwitchMode.DETACHED:
self.anchors = prevlevel.anchors.copy()
self.modaliases = prevlevel.modaliases.copy()
self.aliased_views = collections.ChainMap()
self.class_view_overrides = {}
self.expr_exposed = prevlevel.expr_exposed
self.view_nodes = {}
self.view_sets = {}
self.path_id_namespace = frozenset({self.aliases.get('ns')})
self.pending_stmt_own_path_id_namespace = frozenset()
self.pending_stmt_full_path_id_namespace = frozenset()
self.banned_paths = set()
self.iterator_ctx = None
self.iterator_path_ids = frozenset()
self.view_rptr = None
self.view_scls = None
self.stmt = prevlevel.stmt
self.qlstmt = prevlevel.qlstmt
self.partial_path_prefix = None
self.view_rptr = None
self.toplevel_result_view_name = None
self.disable_shadowing = prevlevel.disable_shadowing.copy()
else:
self.anchors = prevlevel.anchors
self.modaliases = prevlevel.modaliases
self.aliased_views = prevlevel.aliased_views
self.class_view_overrides = prevlevel.class_view_overrides
self.stmt = prevlevel.stmt
self.qlstmt = prevlevel.qlstmt
self.view_rptr = prevlevel.view_rptr
self.toplevel_result_view_name = \
prevlevel.toplevel_result_view_name
if mode == ContextSwitchMode.NEWFENCE:
self.path_scope = self.path_scope.attach_fence()
if mode == ContextSwitchMode.NEWSCOPE:
self.path_scope = self.path_scope.attach_branch()
def subquery(self) -> compiler.CompilerContextManager[ContextLevel]:
return self.new(ContextSwitchMode.SUBQUERY)
def newscope(
self,
*,
fenced: bool = False,
) -> compiler.CompilerContextManager[ContextLevel]:
if fenced:
mode = ContextSwitchMode.NEWFENCE
else:
mode = ContextSwitchMode.NEWSCOPE
return self.new(mode)
def detached(self) -> compiler.CompilerContextManager[ContextLevel]:
return self.new(ContextSwitchMode.DETACHED)
class CompilerContext(compiler.CompilerContext[ContextLevel]):
ContextLevelClass = ContextLevel
default_mode = ContextSwitchMode.NEW
|
"""Library for exceptions in RTSPtoWebRTC Client."""
class ClientError(Exception):
"""Exception communicating with the server."""
class ResponseError(ClientError):
"""Exception after receiving a response from the server."""
|
import numpy as np
from .div1D import div1D
from scipy.sparse import spdiags
def div1DNonUniform(k, ticks, dx=1.):
"""Computes a m+2 by m+1 one-dimensional non-uniform mimetic divergence
operator
Arguments:
k (int): Order of accuracy
ticks (:obj:`ndarray`): Edges' ticks e.g. [0 0.1 0.15 0.2 0.3 0.4 0.45]
dx (float): Grid step size
Returns:
:obj:`ndarray` containing discrete divergence operator
"""
"""
Get uniform operator without scaling
"""
D = div(k, ticks.size-1, dx)
m = D.shape[0]
"""
Compute the Jacobian using the uniform operator and the ticks
"""
J = spdiags(np.power(np.dot(D.toarray(), ticks), -1), 0, m, m)
return np.dot(J, D)
if __name__ == '__main__':
print(div1DNonUniform(2, np.array([0, 0.1, 0.15, 0.2, 0.3, 0.4, 0.45]), 1))
|
{
"targets": [
{
"target_name": "hx711",
"sources": [
"source/binding.cpp",
"source/hx711.cpp"
],
"include_dirs": [
"<!@(node -p \"require('node-addon-api').include\")",
"./headers"
],
"defines": ["NAPI_DISABLE_CPP_EXCEPTIONS"],
"conditions":[
["target_arch=='arm'", {
"dependencies": ["<!(node -p \"require('node-addon-api').gyp\")"],
"libraries": ["-lwiringPi"],
}],
],
},
],
}
|
"""Support for KNX/IP weather station."""
from xknx.devices import Weather as XknxWeather
from homeassistant.components.weather import WeatherEntity
from homeassistant.const import TEMP_CELSIUS
from .const import DOMAIN
from .knx_entity import KnxEntity
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the scenes for KNX platform."""
entities = []
for device in hass.data[DOMAIN].xknx.devices:
if isinstance(device, XknxWeather):
entities.append(KNXWeather(device))
async_add_entities(entities)
class KNXWeather(KnxEntity, WeatherEntity):
"""Representation of a KNX weather device."""
def __init__(self, device: XknxWeather):
"""Initialize of a KNX sensor."""
super().__init__(device)
@property
def temperature(self):
"""Return current temperature."""
return self._device.temperature
@property
def temperature_unit(self):
"""Return temperature unit."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return current air pressure."""
# KNX returns pA - HA requires hPa
return (
self._device.air_pressure / 100
if self._device.air_pressure is not None
else None
)
@property
def condition(self):
"""Return current weather condition."""
return self._device.ha_current_state().value
@property
def humidity(self):
"""Return current humidity."""
return self._device.humidity
@property
def wind_bearing(self):
"""Return current wind bearing in degrees."""
return self._device.wind_bearing
@property
def wind_speed(self):
"""Return current wind speed in km/h."""
# KNX only supports wind speed in m/s
return (
self._device.wind_speed * 3.6
if self._device.wind_speed is not None
else None
)
|
# Copyright (c) 2015-2019 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0, which is in the LICENSE file.
"""
Defines model components to describe transmission dispatch for the
Switch model.
"""
from pyomo.environ import *
dependencies = 'switch_model.timescales', 'switch_model.balancing.load_zones',\
'switch_model.financials', 'switch_model.transmission.transport.build'
def define_components(mod):
"""
Adds components to a Pyomo abstract model object to describe the
dispatch of transmission resources in an electric grid. This
includes parameters, dispatch decisions and constraints. Unless
otherwise stated, all power capacity is specified in units of MW,
all energy amounts are specified in units of MWh, and all sets and
parameters are mandatory.
TRANS_TIMEPOINTS describes the scope that transmission dispatch
decisions must be made over. It is defined as the set of
DIRECTIONAL_TX crossed with TIMEPOINTS. It is indexed as
(load_zone_from, load_zone_to, timepoint) and may be abbreviated as
[z_from, zone_to, tp] for brevity.
DispatchTx[z_from, zone_to, tp] is the decision of how much power
to send along each transmission line in a particular direction in
each timepoint.
Maximum_DispatchTx is a constraint that forces DispatchTx to
stay below the bounds of installed capacity.
TxPowerSent[z_from, zone_to, tp] is an expression that describes the
power sent down a transmission line. This is completely determined by
DispatchTx[z_from, zone_to, tp].
TxPowerReceived[z_from, zone_to, tp] is an expression that describes the
power sent down a transmission line. This is completely determined by
DispatchTx[z_from, zone_to, tp] and trans_efficiency[tx].
TXPowerNet[z, tp] is an expression that returns the net power from
transmission for a load zone. This is the sum of TxPowerReceived by
the load zone minus the sum of TxPowerSent by the load zone.
"""
mod.TRANS_TIMEPOINTS = Set(
dimen=3,
initialize=lambda m: m.DIRECTIONAL_TX * m.TIMEPOINTS
)
mod.DispatchTx = Var(mod.TRANS_TIMEPOINTS, within=NonNegativeReals)
mod.Maximum_DispatchTx = Constraint(
mod.TRANS_TIMEPOINTS,
rule=lambda m, zone_from, zone_to, tp: (
m.DispatchTx[zone_from, zone_to, tp] <=
m.TxCapacityNameplateAvailable[m.trans_d_line[zone_from, zone_to],
m.tp_period[tp]]))
mod.TxPowerSent = Expression(
mod.TRANS_TIMEPOINTS,
rule=lambda m, zone_from, zone_to, tp: (
m.DispatchTx[zone_from, zone_to, tp]))
mod.TxPowerReceived = Expression(
mod.TRANS_TIMEPOINTS,
rule=lambda m, zone_from, zone_to, tp: (
m.DispatchTx[zone_from, zone_to, tp] *
m.trans_efficiency[m.trans_d_line[zone_from, zone_to]]))
def TXPowerNet_calculation(m, z, tp):
return (
sum(m.TxPowerReceived[zone_from, z, tp]
for zone_from in m.TX_CONNECTIONS_TO_ZONE[z]) -
sum(m.TxPowerSent[z, zone_to, tp]
for zone_to in m.TX_CONNECTIONS_TO_ZONE[z]))
mod.TXPowerNet = Expression(
mod.LOAD_ZONES, mod.TIMEPOINTS,
rule=TXPowerNet_calculation)
# Register net transmission as contributing to zonal energy balance
mod.Zone_Power_Injections.append('TXPowerNet')
|
# organize imports
import cv2
import numpy as np
from pygame import mixer
# color to detect - drum stick
lower = [17, 15, 100]
upper = [80, 76, 220]
# initialize mixer
mixer.init()
# region coordinates
k_top, k_bottom, k_right, k_left = 180, 280, 540, 640
h_top, h_bottom, h_right, h_left = 140, 240, 300, 400
s_top, s_bottom, s_right, s_left = 140, 240, 750, 850
#----------------------
# play sounds
#----------------------
def playKick():
mixer.music.load('kick.mp3')
mixer.music.play()
def playHihat():
mixer.music.load('hihat.mp3')
mixer.music.play()
def playSnare():
mixer.music.load('snare.mp3')
mixer.music.play()
#----------------------
# find contours
#----------------------
def findContours(image):
img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresholded = cv2.threshold(img, 15, 255, cv2.THRESH_BINARY)[1]
(_, cnts, _) = cv2.findContours(thresholded.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
return len(cnts)
# bool for each drum
e_snare = 0
e_kick = 0
e_hihat = 0
#----------------------
# main function
#----------------------
if __name__ == "__main__":
# accumulated weight
aWeight = 0.5
# get reference to camera
cam = cv2.VideoCapture(0)
# camera related tuning
cam.set(3, 1280)
cam.set(4, 720)
cam.set(cv2.CAP_PROP_FPS, 60)
# loop till user presses "q"
while True:
# read a frame from the camera
status, frame = cam.read()
# take a clone
clone = frame.copy()
clone = cv2.flip(clone, 1)
clone = cv2.resize(clone, (1280,720))
# get the three drum regions
reg_kick = clone[k_top:k_bottom, k_right:k_left]
reg_hihat = clone[h_top:h_bottom, h_right:h_left]
reg_snare = clone[s_top:s_bottom, s_right:s_left]
# blur the regions
reg_kick = cv2.GaussianBlur(reg_kick, (7, 7), 0)
reg_hihat = cv2.GaussianBlur(reg_hihat, (7, 7), 0)
reg_snare = cv2.GaussianBlur(reg_snare, (7, 7), 0)
l = np.array(lower, dtype="uint8")
u = np.array(upper, dtype="uint8")
mask_kick = cv2.inRange(reg_kick, l, u)
mask_hihat = cv2.inRange(reg_hihat, l, u)
mask_snare = cv2.inRange(reg_snare, l, u)
out_kick = cv2.bitwise_and(reg_kick, reg_kick, mask=mask_kick)
out_hihat = cv2.bitwise_and(reg_hihat, reg_hihat, mask=mask_hihat)
out_snare = cv2.bitwise_and(reg_snare, reg_snare, mask=mask_snare)
cnts_kick = findContours(out_kick)
cnts_hihat = findContours(out_hihat)
cnts_snare = findContours(out_snare)
if (cnts_kick > 0) and (e_kick == 0):
playKick()
e_kick = 1
elif (cnts_kick == 0):
e_kick = 0
if (cnts_hihat > 0) and (e_hihat == 0):
playHihat()
e_hihat = 1
elif (cnts_hihat == 0):
e_hihat = 0
if (cnts_snare > 0) and (e_snare == 0):
playSnare()
e_snare = 1
elif (cnts_snare == 0):
e_snare = 0
# draw the drum regions
cv2.rectangle(clone, (k_left,k_top), (k_right,k_bottom), (0,255,0,0.5), 2)
cv2.rectangle(clone, (h_left,h_top), (h_right,h_bottom), (255,0,0,0.5), 2)
cv2.rectangle(clone, (s_left,s_top), (s_right,s_bottom), (0,0,255,0.5), 2)
# display the frame
cv2.namedWindow("video", cv2.WINDOW_AUTOSIZE)
cv2.imshow("video", clone)
# if user presses 'q', quit the program
if cv2.waitKey(1) & 0XFF == ord('q'):
break
# release the camera
cam.release()
# destroy all windows
cv2.destroyAllWindows()
|
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('nlp.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
from spacy import displacy
text_features = [x for x in request.form.values() ]
example = ''.join(text_features)
doc = model(example)
ENTITY = []
for ent in doc.ents:
ENTITY.append((ent.text, ent.label_))
# output = displacy.render(doc, style='ent')
return render_template('index.html', prediction_text = 'NER for the given text is {}'.format(ENTITY))
if __name__ == "__main__":
app.run(debug=True)
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/YACC/live-check-output-cleaned.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test that .output file is cleaned
"""
import TestSCons
_exe = TestSCons._exe
test = TestSCons.TestSCons()
yacc = test.where_is('yacc') or test.where_is('bison')
if not yacc:
test.skip_test('No yacc or bison found; skipping test.\n')
test.write('SConstruct', """
foo = Environment(YACCFLAGS='-v -d')
foo.CFile(source = 'foo.y')
""" % locals())
yacc = r"""
%%{
#include <stdio.h>
main()
{
yyparse();
}
yyerror(s)
char *s;
{
fprintf(stderr, "%%s\n", s);
return 0;
}
yylex()
{
int c;
c = fgetc(stdin);
return (c == EOF) ? 0 : c;
}
%%}
%%%%
input: letter newline { printf("%s\n"); };
letter: 'a' | 'b';
newline: '\n';
"""
test.write('foo.y', yacc % 'foo.y')
test.run('.', stderr = None)
test.up_to_date(arguments = 'foo.c')
test.must_exist(test.workpath('foo.output'))
test.must_exist(test.workpath('foo.c'))
test.run(arguments = '-c .')
test.must_not_exist(test.workpath('foo.output'))
test.must_not_exist(test.workpath('foo.c'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
# coding=utf-8
'''
Created on 2016-10-26
@author: Jennifer
Project:读取mysql数据库的数据,转为json格式
'''
import json
import pymysql
#import pymysql
# baidulocalower(30.667580,104.072539)
# baidulocalhigh(30.678855,104.085511)
# gaodelocallower(30.6614534471,104.0660738426)
# gaodelocalhigh(30.6730225889,104.0789416320)
def TableToJson():
try:
# 1-7:如何使用python DB API访问数据库流程的
# 1.创建mysql数据库连接对象connection
# connection对象支持的方法有cursor(),commit(),rollback(),close()
#Test:114.55.104.77
conn = pymysql.connect("localhost", "root", "bwc123", "bw", charset="utf8")
print "connect success"
# 2.创建mysql数据库游标对象 cursor
# cursor对象支持的方法有execute(sql语句),fetchone(),fetchmany(size),fetchall(),rowcount,close()
cur = conn.cursor()
# 3.编写sql
#百度
#sql="SELECT * FROM newpoi WHERE latBD>30.659192 and latBD<30.662237 and lngBD>104.067293 and lngBD<104.072216"
#高德
# sql = "SELECT * FROM busstoppoi WHERE bdmaplat>'30.653016' AND bdmaplat<'30.667887' AND bdmaplng>'104.041627' AND bdmaplng<'104.067597'"
sql="SELECT * FROM unitpoi"
#sqlserver的sql
# sql = "SELECT POL.POID,DI1.ItemValue,P.Name,BL.PickupLongitude,BL.PickupLatitude,T.TakeOffDate,T.TakeOffTime" \
# "FROM dbo.PickupOrderList AS POLLEFT JOIN dbo.BookingList AS BL ON POL.BLID = BL.BLIDLEFT JOIN dbo.Trips AS T ON BL.TID = T.TID" \
# "LEFT JOIN dbo.BW_DictItems AS DI1 ON DI1.ItemCode = T.TimeTable LEFT JOIN dbo.Passengers AS P ON P.PID = T.PID"
# 4.执行sql命令
# execute可执行数据库查询select和命令insert,delete,update三种命令(这三种命令需要commit()或rollback())
cur.execute(sql)
# 5.获取数据
# fetchall遍历execute执行的结果集。取execute执行后放在缓冲区的数据,遍历结果,返回数据。
# 返回的数据类型是元组类型,每个条数据元素为元组类型:(('第一条数据的字段1的值','第一条数据的字段2的值',...,'第一条数据的字段N的值'),(第二条数据),...,(第N条数据))
data = cur.fetchall()
print u'查询成功'
# 6.关闭cursor
cur.close()
# 7.关闭connection
conn.close()
jsonData = []
# 循环读取元组数据
# 将元组数据转换为列表类型,每个条数据元素为字典类型:[{'字段1':'字段1的值','字段2':'字段2的值',...,'字段N:字段N的值'},{第二条数据},...,{第N条数据}]
#高德
tmpIdx = 1
for row in data:
result = {}
result['name'] = row[0]
result['lat'] = row[3]
result['lng'] = row[4]
result['idx'] = tmpIdx
jsonData.append(result)
print('正在转换')
tmpIdx +=1
#print u'转换为列表字典的原始数据:', jsonData
#百度
# for row in data:
# result = {}
# result['name'] = row[1]
# result['lat'] = row[3]
# result['lng'] = row[4]
# jsonData.append(result)
# print('正在转换')
#print u'转换为列表字典的原始数据:', jsonData
except:
print 'MySQL connect fail...'
else:
# 使用json.dumps将数据转换为json格式,json.dumps方法默认会输出成这种格式"\u5377\u76ae\u6298\u6263",加ensure_ascii=False,则能够防止中文乱码。
# JSON采用完全独立于语言的文本格式,事实上大部分现代计算机语言都以某种形式支持它们。这使得一种数据格式在同样基于这些结构的编程语言之间交换成为可能。
# json.dumps()是将原始数据转为json(其中单引号会变为双引号),而json.loads()是将json转为原始数据。
jsondatar = json.dumps(jsonData, ensure_ascii=False, separators=(',', ':')).encode('utf-8') + '\n'
# 去除首尾的中括号
return jsondatar
if __name__ == '__main__':
# 调用函数
jsonData = TableToJson()
print('转换完成正在写入')
#print u'转换为json格式的数据:', jsonData
# 以读写方式w+打开文件,路径前加r,防止字符转义
f = open('unpoi.json', 'w+')
# 写数据
f.write(jsonData)
# 关闭文件
f.close()
print ('写入完成')
|
# fmt: off
"""
A version module managed by the invoke-release task
See also tasks.py
"""
from __future__ import unicode_literals
__version_info__ = (0, 2, 0)
__version__ = '-'.join(filter(None, ['.'.join(map(str, __version_info__[:3])), (__version_info__[3:] or [None])[0]]))
# fmt: on
|
"""There are some discrepancies between the fna and faa files. What are they?"""
import numpy
from fasta import FASTA
names = ['2236446587', '2236446227', '2236446226', '2236446115', '2236446114', '2236445762', '2236446248', '2236446050', '2236446154', '2236446074', '2236445702', '2236446303', '2236446345', '2236446625', '2236446051', '2236446049', '2236446581', '2236445824', '2236446355', '2236446351', '2236446370', '2236446339', '2236446062', '2236445915', '2236445916', '2236445669', '2236446147', '2236446048', '2236446182']
path = "/proj/b2013274/mcl/2236347014.genes.fna"
fasta = FASTA(path)
print "all:", numpy.mean(fasta.lengths)
print "missing:", numpy.mean([len(fasta[name]) for name in names])
|
#!/usr/bin/env python
import sys
import time
import os.path
import datetime
import logging
from operator import attrgetter
from functools import partial
import click
from click_datetime import Datetime
from finam import (Exporter,
Timeframe,
Market,
FinamExportError,
FinamObjectNotFoundError)
from finam.utils import click_validate_enum
"""
Helper script to download a set of assets
"""
logger = logging.getLogger(__name__)
def _arg_split(ctx, param, value):
if value is None:
return value
try:
items = value.split(',')
except ValueError:
raise click.BadParameter('comma-separated {} is required, got {}'
.format(param, value))
return items
@click.command()
@click.option('--contracts',
help='Contracts to lookup',
required=False,
callback=_arg_split)
@click.option('--market',
help='Market to lookup',
callback=partial(click_validate_enum, Market),
required=False)
@click.option('--timeframe',
help='Timeframe to use (DAILY, HOURLY, MINUTES30 etc)',
default=Timeframe.DAILY.name,
callback=partial(click_validate_enum, Timeframe),
required=False)
@click.option('--destdir',
help='Destination directory name',
required=True,
type=click.Path(exists=True, file_okay=False, writable=True,
resolve_path=True))
@click.option('--skiperr',
help='Continue if a download error occurs. False by default',
required=False,
default=True,
type=bool)
@click.option('--lineterm',
help='Line terminator',
default='\r\n')
@click.option('--delay',
help='Seconds to sleep between requests',
type=click.IntRange(0, 600),
default=1)
@click.option('--startdate', help='Start date',
type=Datetime(format='%Y-%m-%d'),
default='2007-01-01',
required=False)
@click.option('--enddate', help='End date',
type=Datetime(format='%Y-%m-%d'),
default=datetime.date.today().strftime('%Y-%m-%d'),
required=False)
@click.option('--ext',
help='Resulting file extension',
default='csv')
def main(contracts, market, timeframe, destdir, lineterm,
delay, startdate, enddate, skiperr, ext):
exporter = Exporter()
if not any((contracts, market)):
raise click.BadParameter('Neither contracts nor market is specified')
market_filter = dict()
if market:
market_filter.update(market=Market[market])
if not contracts:
contracts = exporter.lookup(**market_filter)['code'].tolist()
for contract_code in contracts:
logging.info('Handling {}'.format(contract_code))
try:
contracts = exporter.lookup(code=contract_code, **market_filter)
except FinamObjectNotFoundError:
logger.error('unknown contract "{}"'.format(contract_code))
sys.exit(1)
else:
contract = contracts.reset_index().iloc[0]
logger.info(u'Downloading contract {}'.format(contract))
try:
data = exporter.download(contract.id,
start_date=startdate,
end_date=enddate,
timeframe=Timeframe[timeframe],
market=Market(contract.market))
except FinamExportError as e:
if skiperr:
logger.error(repr(e))
continue
else:
raise
destpath = os.path.join(destdir, '{}-{}.{}'
.format(contract.code, timeframe, ext))
data.to_csv(destpath, index=False, line_terminator=lineterm)
if delay > 0:
logger.info('Sleeping for {} second(s)'.format(delay))
time.sleep(delay)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image32.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image('B2', self.image_dir + 'red.png', {'x_offset': -100, 'y_offset': -100})
workbook.close()
self.assertExcelEqual()
|
from dataclasses import dataclass
@dataclass
class MotorState:
position: float = None
velocity: float = None
torque: float = None
temperature: float = None
position_goal: float = None
|
from django.conf import settings
from django.conf.urls import include, url
from django.urls import include, path
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from django.views.static import serve
from rest_framework_jwt.views import obtain_jwt_token
from rookie import views
urlpatterns = [
url(settings.ADMIN_URL, admin.site.urls),
url(r'^api-token-auth/', obtain_jwt_token),
url(r'^rest-auth/', include('rest_auth.urls')),
url(r'^rest-auth/registration/', include('rest_auth.registration.urls')),
url(r'^eclass/', include("rookie.eclass.urls", namespace="eclass")),
url(r'^users/', include("rookie.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
url(r'^download/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),
# Your stuff: custom urls includes go here
] + static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT,
)
urlpatterns += [
url(r'^', views.ReactAppView.as_view()),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
|
# 890. Find and Replace Pattern
class Solution:
def findAndReplacePattern2(self, words, pattern: str):
ans = []
for w in words:
d = {}
flag = True
if len(w) == len(pattern):
for i in range(len(w)):
if w[i] in d and d[w[i]] != pattern[i] \
or pattern[i] in d and d[pattern[i]] != w[i]:
flag = False
break
else:
d[w[i]] = pattern[i]
d[pattern[i]] = w[i]
if flag:
ans.append(w)
return ans
def findAndReplacePattern3(self, words, pattern: str):
def match(word):
dw, dp = {}, {}
for w, p in zip(word, pattern):
if w not in dw: dw[w] = p
if p not in dp: dp[p] = w
if (dw[w], dp[p]) != (p, w): return False
return True
return list(filter(match, words))
def findAndReplacePattern(self, words, pattern: str):
def match(word):
d = {}
for w, p in zip(word, pattern):
if d.setdefault(w, p) != p:
return False
return len(set(d.values())) == len(d.values())
return list(filter(match, words))
print(Solution().findAndReplacePattern(["abc","deq","mee","aqq","dkd","ccc"], "abb"))
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
from setuptools import setup, find_packages
if __name__ == '__main__':
print('Sopel does not correctly load modules installed with setup.py '
'directly. Please use "pip install .", or add {}/sopel_modules to '
'core.extra in your config.'.format(
os.path.dirname(os.path.abspath(__file__))),
file=sys.stderr)
with open('README.md') as readme_file:
readme = readme_file.read()
with open('NEWS') as history_file:
history = history_file.read()
with open('requirements.txt') as requirements_file:
requirements = [req for req in requirements_file.readlines()]
with open('tests/requirements.txt') as dev_requirements_file:
dev_requirements = [req for req in dev_requirements_file.readlines()]
setup(
name='sopel_modules.urban',
version='1.1.0',
description='Urban Dictionary module for Sopel',
long_description=readme + '\n\n' + history,
author='Rusty Bower',
author_email='rusty@rustybower.com',
url='http://github.com/rustybower/sopel-urban',
packages=find_packages('.'),
namespace_packages=['sopel_modules'],
include_package_data=True,
install_requires=requirements,
tests_require=dev_requirements,
test_suite='tests',
license='MIT License',
)
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_request(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_get_instance_view_request(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/instanceView") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_list_all_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/cloudServices")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_list_request(
resource_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_start_request_initial(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/start") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_power_off_request_initial(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/poweroff") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_restart_request_initial(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/restart") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
def build_reimage_request_initial(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/reimage") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
def build_rebuild_request_initial(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/rebuild") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_instances_request_initial(
resource_group_name: str,
cloud_service_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/delete") # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"cloudServiceName": _SERIALIZER.url("cloud_service_name", cloud_service_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
class CloudServicesOperations(object): # pylint: disable=too-many-public-methods
"""CloudServicesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_10_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
cloud_service_name: str,
parameters: Optional["_models.CloudService"] = None,
**kwargs: Any
) -> "_models.CloudService":
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'CloudService')
else:
_json = None
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CloudService', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CloudService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}"} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
cloud_service_name: str,
parameters: Optional["_models.CloudService"] = None,
**kwargs: Any
) -> LROPoller["_models.CloudService"]:
"""Create or update a cloud service. Please note some properties can be set only during cloud
service creation.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param cloud_service_name: Name of the cloud service.
:type cloud_service_name: str
:param parameters: The cloud service object. Default value is None.
:type parameters: ~azure.mgmt.compute.v2020_10_01_preview.models.CloudService
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CloudService or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2020_10_01_preview.models.CloudService]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudService"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('CloudService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}"} # type: ignore
def _update_initial(
self,
resource_group_name: str,
cloud_service_name: str,
parameters: Optional["_models.CloudServiceUpdate"] = None,
**kwargs: Any
) -> "_models.CloudService":
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'CloudServiceUpdate')
else:
_json = None
request = build_update_request_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CloudService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}"} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
cloud_service_name: str,
parameters: Optional["_models.CloudServiceUpdate"] = None,
**kwargs: Any
) -> LROPoller["_models.CloudService"]:
"""Update a cloud service.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param cloud_service_name: Name of the cloud service.
:type cloud_service_name: str
:param parameters: The cloud service object. Default value is None.
:type parameters: ~azure.mgmt.compute.v2020_10_01_preview.models.CloudServiceUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either CloudService or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2020_10_01_preview.models.CloudService]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudService"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('CloudService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
request = build_delete_request_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}"} # type: ignore
@distributed_trace
def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a cloud service.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param cloud_service_name: Name of the cloud service.
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> "_models.CloudService":
"""Display information about a cloud service.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param cloud_service_name: Name of the cloud service.
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CloudService, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_10_01_preview.models.CloudService
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
request = build_get_request(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CloudService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}"} # type: ignore
@distributed_trace
def get_instance_view(
self,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> "_models.CloudServiceInstanceView":
"""Gets the status of a cloud service.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param cloud_service_name: Name of the cloud service.
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CloudServiceInstanceView, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_10_01_preview.models.CloudServiceInstanceView
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudServiceInstanceView"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
request = build_get_instance_view_request(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_instance_view.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CloudServiceInstanceView', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_instance_view.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/instanceView"} # type: ignore
@distributed_trace
def list_all(
self,
**kwargs: Any
) -> Iterable["_models.CloudServiceListResult"]:
"""Gets a list of all cloud services in the subscription, regardless of the associated resource
group. Use nextLink property in the response to get the next page of Cloud Services. Do this
till nextLink is null to fetch all the Cloud Services.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CloudServiceListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_10_01_preview.models.CloudServiceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudServiceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_all.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CloudServiceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/cloudServices"} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.CloudServiceListResult"]:
"""Gets a list of all cloud services under a resource group. Use nextLink property in the response
to get the next page of Cloud Services. Do this till nextLink is null to fetch all the Cloud
Services.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CloudServiceListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_10_01_preview.models.CloudServiceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.CloudServiceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CloudServiceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices"} # type: ignore
def _start_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
request = build_start_request_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._start_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/start"} # type: ignore
@distributed_trace
def begin_start( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Starts the cloud service.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param cloud_service_name: Name of the cloud service.
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/start"} # type: ignore
def _power_off_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
request = build_power_off_request_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._power_off_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_power_off_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/poweroff"} # type: ignore
@distributed_trace
def begin_power_off( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Power off the cloud service. Note that resources are still attached and you are getting charged
for the resources.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param cloud_service_name: Name of the cloud service.
:type cloud_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._power_off_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_power_off.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/poweroff"} # type: ignore
def _restart_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
parameters: Optional["_models.RoleInstances"] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'RoleInstances')
else:
_json = None
request = build_restart_request_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._restart_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restart_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/restart"} # type: ignore
@distributed_trace
def begin_restart( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
parameters: Optional["_models.RoleInstances"] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Restarts one or more role instances in a cloud service.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param cloud_service_name: Name of the cloud service.
:type cloud_service_name: str
:param parameters: List of cloud service role instance names. Default value is None.
:type parameters: ~azure.mgmt.compute.v2020_10_01_preview.models.RoleInstances
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._restart_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/restart"} # type: ignore
def _reimage_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
parameters: Optional["_models.RoleInstances"] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'RoleInstances')
else:
_json = None
request = build_reimage_request_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._reimage_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reimage_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/reimage"} # type: ignore
@distributed_trace
def begin_reimage( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
parameters: Optional["_models.RoleInstances"] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Reimage asynchronous operation reinstalls the operating system on instances of web roles or
worker roles.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param cloud_service_name: Name of the cloud service.
:type cloud_service_name: str
:param parameters: List of cloud service role instance names. Default value is None.
:type parameters: ~azure.mgmt.compute.v2020_10_01_preview.models.RoleInstances
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reimage_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reimage.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/reimage"} # type: ignore
def _rebuild_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
parameters: Optional["_models.RoleInstances"] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'RoleInstances')
else:
_json = None
request = build_rebuild_request_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._rebuild_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rebuild_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/rebuild"} # type: ignore
@distributed_trace
def begin_rebuild( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
parameters: Optional["_models.RoleInstances"] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Rebuild Role Instances reinstalls the operating system on instances of web roles or worker
roles and initializes the storage resources that are used by them. If you do not want to
initialize storage resources, you can use Reimage Role Instances.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param cloud_service_name: Name of the cloud service.
:type cloud_service_name: str
:param parameters: List of cloud service role instance names. Default value is None.
:type parameters: ~azure.mgmt.compute.v2020_10_01_preview.models.RoleInstances
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._rebuild_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rebuild.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/rebuild"} # type: ignore
def _delete_instances_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
parameters: Optional["_models.RoleInstances"] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'RoleInstances')
else:
_json = None
request = build_delete_instances_request_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._delete_instances_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_instances_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/delete"} # type: ignore
@distributed_trace
def begin_delete_instances( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
cloud_service_name: str,
parameters: Optional["_models.RoleInstances"] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes role instances in a cloud service.
:param resource_group_name: Name of the resource group.
:type resource_group_name: str
:param cloud_service_name: Name of the cloud service.
:type cloud_service_name: str
:param parameters: List of cloud service role instance names. Default value is None.
:type parameters: ~azure.mgmt.compute.v2020_10_01_preview.models.RoleInstances
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_instances_initial(
resource_group_name=resource_group_name,
cloud_service_name=cloud_service_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_instances.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/delete"} # type: ignore
|
# coding=utf-8
"""
Filename: good_solution
Author: xilepeng
Date: 2020/4/9
Description:
"""
from typing import List
class Solution:
@staticmethod
def removeDuplicates(nums: List[int]) -> int:
"""
思路和我一样,只是写得更精简一些。不过还是要学习哒~
"""
if not nums:
return 0
i = 0
for j in range(1, len(nums)):
if nums[i] != nums[j]:
i += 1
nums[i] = nums[j]
return i + 1
|
TRANSFER_CREATED_TEST_DATA = {
"created": 1348360173,
"data": {
"object": {
"amount": 455,
"currency": "usd",
"date": 1348876800,
"description": None,
"id": "tr_XXXXXXXXXXXX",
"object": "transfer",
"other_transfers": [],
"status": "paid",
"summary": {
"adjustment_count": 0,
"adjustment_fee_details": [],
"adjustment_fees": 0,
"adjustment_gross": 0,
"charge_count": 1,
"charge_fee_details": [
{
"amount": 45,
"application": None,
"currency": "usd",
"description": None,
"type": "stripe_fee",
}
],
"charge_fees": 45,
"charge_gross": 500,
"collected_fee_count": 0,
"collected_fee_gross": 0,
"currency": "usd",
"net": 455,
"refund_count": 0,
"refund_fees": 0,
"refund_gross": 0,
"validation_count": 0,
"validation_fees": 0,
},
}
},
"id": "evt_XXXXXXXXXXXX",
"livemode": True,
"object": "event",
"pending_webhooks": 1,
"type": "transfer.created",
}
TRANSFER_CREATED_TEST_DATA2 = {
"created": 1348360173,
"data": {
"object": {
"amount": 1455,
"currency": "usd",
"date": 1348876800,
"description": None,
"id": "tr_XXXXXXXXXXX2",
"object": "transfer",
"other_transfers": [],
"status": "paid",
"summary": {
"adjustment_count": 0,
"adjustment_fee_details": [],
"adjustment_fees": 0,
"adjustment_gross": 0,
"charge_count": 1,
"charge_fee_details": [
{
"amount": 45,
"application": None,
"currency": "usd",
"description": None,
"type": "stripe_fee",
}
],
"charge_fees": 45,
"charge_gross": 1500,
"collected_fee_count": 0,
"collected_fee_gross": 0,
"currency": "usd",
"net": 1455,
"refund_count": 0,
"refund_fees": 0,
"refund_gross": 0,
"validation_count": 0,
"validation_fees": 0,
},
}
},
"id": "evt_XXXXXXXXXXXY",
"livemode": True,
"object": "event",
"pending_webhooks": 1,
"type": "transfer.created",
}
TRANSFER_PENDING_TEST_DATA = {
"created": 1375603198,
"data": {
"object": {
"account": {
"bank_name": "BANK OF AMERICA, N.A.",
"country": "US",
"fingerprint": "xxxxxxxxxx",
"last4": "4444",
"object": "bank_account",
"validated": False,
},
"amount": 941,
"currency": "usd",
"date": 1375747200,
"description": "STRIPE TRANSFER",
"fee": 0,
"fee_details": [],
"id": "tr_adlkj2l3kj23",
"livemode": True,
"object": "transfer",
"recipient": None,
"statement_descriptor": None,
"status": "pending",
}
},
"id": "evt_2l3kj232k223",
"livemode": True,
"object": "event",
"pending_webhooks": 1,
"request": None,
"type": "transfer.created",
}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-28 07:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('images', '0004_auto_20180127_2135'),
]
operations = [
migrations.AlterField(
model_name='image',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='images', to=settings.AUTH_USER_MODEL),
),
]
|
from urllib.parse import urlparse, parse_qs
import requests
import datetime
import scrapy
class PeraturanSpider(scrapy.Spider):
name = 'peraturan_wide'
allowed_domains = ['peraturan.go.id']
custom_settings = {
'FEED_URI': f"export/peraturan_wide_{datetime.date.today()}",
'FEED_FORMAT': 'json',
'FEED_EXPORTERS': {
'json': 'scrapy.exporters.JsonItemExporter',
},
'FEED_EXPORT_ENCODING': 'utf-8',
}
start_urls = [
'https://peraturan.go.id/peraturan/kategori.html?id=7',
'https://peraturan.go.id/peraturan/kategori.html?id=14',
'https://peraturan.go.id/peraturan/kategori.html?id=18',
'https://peraturan.go.id/peraturan/kategori.html?id=19',
'https://peraturan.go.id/peraturan/kategori.html?id=38',
'https://peraturan.go.id/peraturan/kategori.html?id=75',
'https://peraturan.go.id/peraturan/kategori.html?id=34',
'https://peraturan.go.id/peraturan/kategori.html?id=29',
'https://peraturan.go.id/peraturan/kategori.html?id=11',
'https://peraturan.go.id/peraturan/kategori.html?id=27',
'https://peraturan.go.id/peraturan/kategori.html?id=84',
'https://peraturan.go.id/peraturan/kategori.html?id=25',
'https://peraturan.go.id/peraturan/kategori.html?id=61',
'https://peraturan.go.id/peraturan/kategori.html?id=73'
]
@staticmethod
def snakeify(text):
return "_".join(text.lower().strip().split())
@staticmethod
def parse_id_from_url(url):
return parse_qs(urlparse(url).query)['id'][0]
def parse(self, response, **kwargs):
anchors = response.xpath("//tbody[1]/tr/td[5]/a")
for anchor in anchors:
url = 'https://peraturan.go.id' + anchor.xpath('@href').get()
yield scrapy.Request(url=url, callback=self.parse_peraturan)
def parse_peraturan(self, response, **kwargs):
url = response.url
item = {
'id': self.parse_id_from_url(url),
'source': url,
'document': [],
'document_status': []
}
# Parse metadata
rows = response.xpath('//tr')
for row in rows:
key = self.snakeify(row.xpath('th/text()').get())
item[key] = row.xpath('td/div[@class="kv-attribute"]/text()').get()
# Parse documents download
documents = response.xpath("//a[@title='Download']/@href").getall()
item['document'] = documents
# Check document head status
for doc in documents:
r = requests.head(doc)
item['document_status'].append(r.status_code)
# Parse categories
categories = response.xpath("//span[@class='badge badge-warning']/text()").getall()
item['category'] = categories
# Parse data section
headings = response.css('.panel-heading').xpath('text()').getall()
headings = [self.snakeify(heading) for heading in headings]
# Parse relation
url_list = []
if "relasi" in headings:
rows = response.css(".col-md-3 > div.panel:last-child > .panel-body > *:not(br)")
key = ""
for row in rows:
if len(row.attrib) == 0:
key = row.xpath('text()').get().replace(':', '')
key = self.snakeify(key)
if not item.get(key, False):
item[key] = []
else:
url = 'https://peraturan.go.id' + row.attrib['href']
val = self.parse_id_from_url(url)
item[key].append(val)
# Check if allowed to be crawled
if (key in [
"diubah_oleh",
"dicabut_oleh",
"mengubah",
"mencabut",
"memiliki_dasar_hukum",
"dasar_hukum_dari",
"dilaksanakan_oleh_peraturan_pelaksana",
"ditafsirkan",
"peraturan_pelaksana_dari",
"menafsirkan"
]):
yield scrapy.Request(url=url, callback=self.parse_peraturan)
if len(documents):
yield item
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Video models."""
import torch
import torch.nn as nn
import utils.weight_init_helper as init_helper
from models.batchnorm_helper import get_norm
from . import head_helper, resnet_helper, stem_helper
from .build import MODEL_REGISTRY
from bmn_models import BMN
from utils.BMN_utils import ioa_with_anchors, iou_with_anchors
from loss_function import bmn_loss_func, get_mask
# Number of blocks for different stages given the model depth.
_MODEL_STAGE_DEPTH = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
# Basis of temporal kernel sizes for each of the stage.
_TEMPORAL_KERNEL_BASIS = {
"c2d": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[1]], # res4 temporal kernel.
[[1]], # res5 temporal kernel.
],
"c2d_nopool": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[1]], # res4 temporal kernel.
[[1]], # res5 temporal kernel.
],
"i3d": [
[[5]], # conv1 temporal kernel.
[[3]], # res2 temporal kernel.
[[3, 1]], # res3 temporal kernel.
[[3, 1]], # res4 temporal kernel.
[[1, 3]], # res5 temporal kernel.
],
"i3d_nopool": [
[[5]], # conv1 temporal kernel.
[[3]], # res2 temporal kernel.
[[3, 1]], # res3 temporal kernel.
[[3, 1]], # res4 temporal kernel.
[[1, 3]], # res5 temporal kernel.
],
"slow": [
[[1]], # conv1 temporal kernel.
[[1]], # res2 temporal kernel.
[[1]], # res3 temporal kernel.
[[3]], # res4 temporal kernel.
[[3]], # res5 temporal kernel.
],
"slowfast": [
[[1], [5]], # conv1 temporal kernel for slow and fast pathway.
[[1], [3]], # res2 temporal kernel for slow and fast pathway.
[[1], [3]], # res3 temporal kernel for slow and fast pathway.
[[3], [3]], # res4 temporal kernel for slow and fast pathway.
[[3], [3]], # res5 temporal kernel for slow and fast pathway.
],
}
_POOL1 = {
"c2d": [[2, 1, 1]],
"c2d_nopool": [[1, 1, 1]],
"i3d": [[2, 1, 1]],
"i3d_nopool": [[1, 1, 1]],
"slow": [[1, 1, 1]],
"slowfast": [[1, 1, 1], [1, 1, 1]],
}
class FuseFastToSlow(nn.Module):
"""
Fuses the information from the Fast pathway to the Slow pathway. Given the
tensors from Slow pathway and Fast pathway, fuse information from Fast to
Slow, then return the fused tensors from Slow and Fast pathway in order.
"""
def __init__(
self,
dim_in,
fusion_conv_channel_ratio,
fusion_kernel,
alpha,
eps=1e-5,
bn_mmt=0.1,
inplace_relu=True,
norm_module=nn.BatchNorm3d,
):
"""
Args:
dim_in (int): the channel dimension of the input.
fusion_conv_channel_ratio (int): channel ratio for the convolution
used to fuse from Fast pathway to Slow pathway.
fusion_kernel (int): kernel size of the convolution used to fuse
from Fast pathway to Slow pathway.
alpha (int): the frame rate ratio between the Fast and Slow pathway.
eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
inplace_relu (bool): if True, calculate the relu on the original
input without allocating new memory.
norm_module (nn.Module): nn.Module for the normalization layer. The
default is nn.BatchNorm3d.
"""
super(FuseFastToSlow, self).__init__()
self.conv_f2s = nn.Conv3d(
dim_in,
dim_in * fusion_conv_channel_ratio,
kernel_size=[fusion_kernel, 1, 1],
stride=[alpha, 1, 1],
padding=[fusion_kernel // 2, 0, 0],
bias=False,
)
self.bn = norm_module(
num_features=dim_in * fusion_conv_channel_ratio, eps=eps, momentum=bn_mmt
)
self.relu = nn.ReLU(inplace_relu)
def forward(self, x):
x_s = x[0]
x_f = x[1]
fuse = self.conv_f2s(x_f)
fuse = self.bn(fuse)
fuse = self.relu(fuse)
x_s_fuse = torch.cat([x_s, fuse], 1)
return [x_s_fuse, x_f]
@MODEL_REGISTRY.register()
class SlowFast(nn.Module):
"""
SlowFast model builder for SlowFast network.
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(SlowFast, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = is_detection_enabled(cfg)
self.num_pathways = 2
self._construct_network(cfg)
init_helper.init_weights(
self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN
)
def _construct_network(self, cfg, with_head=True):
"""
Builds a SlowFast model. The first pathway is the Slow pathway and the
second pathway is the Fast pathway.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
pool_size = _POOL1[cfg.MODEL.ARCH]
assert len({len(pool_size), self.num_pathways}) == 1
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
(d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
out_dim_ratio = cfg.SLOWFAST.BETA_INV // cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(
dim_in=cfg.DATA.INPUT_CHANNEL_NUM,
dim_out=[width_per_group, width_per_group // cfg.SLOWFAST.BETA_INV],
kernel=[temp_kernel[0][0] + [7, 7], temp_kernel[0][1] + [7, 7]],
stride=[[1, 2, 2]] * 2,
padding=[
[temp_kernel[0][0][0] // 2, 3, 3],
[temp_kernel[0][1][0] // 2, 3, 3],
],
norm_module=self.norm_module,
)
self.s1_fuse = FuseFastToSlow(
width_per_group // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
self.s2 = resnet_helper.ResStage(
dim_in=[
width_per_group + width_per_group // out_dim_ratio,
width_per_group // cfg.SLOWFAST.BETA_INV,
],
dim_out=[width_per_group * 4, width_per_group * 4 // cfg.SLOWFAST.BETA_INV],
dim_inner=[dim_inner, dim_inner // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[1],
stride=cfg.RESNET.SPATIAL_STRIDES[0],
num_blocks=[d2] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0],
nonlocal_inds=cfg.NONLOCAL.LOCATION[0],
nonlocal_group=cfg.NONLOCAL.GROUP[0],
nonlocal_pool=cfg.NONLOCAL.POOL[0],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[0],
norm_module=self.norm_module,
)
self.s2_fuse = FuseFastToSlow(
width_per_group * 4 // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(
kernel_size=pool_size[pathway],
stride=pool_size[pathway],
padding=[0, 0, 0],
)
self.add_module("pathway{}_pool".format(pathway), pool)
self.s3 = resnet_helper.ResStage(
dim_in=[
width_per_group * 4 + width_per_group * 4 // out_dim_ratio,
width_per_group * 4 // cfg.SLOWFAST.BETA_INV,
],
dim_out=[width_per_group * 8, width_per_group * 8 // cfg.SLOWFAST.BETA_INV],
dim_inner=[dim_inner * 2, dim_inner * 2 // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[2],
stride=cfg.RESNET.SPATIAL_STRIDES[1],
num_blocks=[d3] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1],
nonlocal_inds=cfg.NONLOCAL.LOCATION[1],
nonlocal_group=cfg.NONLOCAL.GROUP[1],
nonlocal_pool=cfg.NONLOCAL.POOL[1],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[1],
norm_module=self.norm_module,
)
self.s3_fuse = FuseFastToSlow(
width_per_group * 8 // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
self.s4 = resnet_helper.ResStage(
dim_in=[
width_per_group * 8 + width_per_group * 8 // out_dim_ratio,
width_per_group * 8 // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 16,
width_per_group * 16 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner * 4, dim_inner * 4 // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[3],
stride=cfg.RESNET.SPATIAL_STRIDES[2],
num_blocks=[d4] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2],
nonlocal_inds=cfg.NONLOCAL.LOCATION[2],
nonlocal_group=cfg.NONLOCAL.GROUP[2],
nonlocal_pool=cfg.NONLOCAL.POOL[2],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[2],
norm_module=self.norm_module,
)
self.s4_fuse = FuseFastToSlow(
width_per_group * 16 // cfg.SLOWFAST.BETA_INV,
cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,
cfg.SLOWFAST.FUSION_KERNEL_SZ,
cfg.SLOWFAST.ALPHA,
norm_module=self.norm_module,
)
self.s5 = resnet_helper.ResStage(
dim_in=[
width_per_group * 16 + width_per_group * 16 // out_dim_ratio,
width_per_group * 16 // cfg.SLOWFAST.BETA_INV,
],
dim_out=[
width_per_group * 32,
width_per_group * 32 // cfg.SLOWFAST.BETA_INV,
],
dim_inner=[dim_inner * 8, dim_inner * 8 // cfg.SLOWFAST.BETA_INV],
temp_kernel_sizes=temp_kernel[4],
stride=cfg.RESNET.SPATIAL_STRIDES[3],
num_blocks=[d5] * 2,
num_groups=[num_groups] * 2,
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3],
nonlocal_inds=cfg.NONLOCAL.LOCATION[3],
nonlocal_group=cfg.NONLOCAL.GROUP[3],
nonlocal_pool=cfg.NONLOCAL.POOL[3],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
dilation=cfg.RESNET.SPATIAL_DILATIONS[3],
norm_module=self.norm_module,
)
if not with_head:
return
if self.enable_detection:
head = head_helper.ResNetRoIHead(
dim_in=[
width_per_group * 32,
width_per_group * 32 // cfg.SLOWFAST.BETA_INV,
],
num_classes=cfg.MODEL.NUM_CLASSES[0],
pool_size=[
[
cfg.DATA.NUM_FRAMES // cfg.SLOWFAST.ALPHA // pool_size[0][0],
1,
1,
],
[cfg.DATA.NUM_FRAMES // pool_size[1][0], 1, 1],
],
resolution=[[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2] * 2,
scale_factor=[cfg.DETECTION.SPATIAL_SCALE_FACTOR] * 2,
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
aligned=cfg.DETECTION.ALIGNED,
)
else:
head = head_helper.ResNetBasicHead(
dim_in=[
width_per_group * 32,
width_per_group * 32 // cfg.SLOWFAST.BETA_INV,
],
num_classes=cfg.MODEL.NUM_CLASSES[0],
pool_size=[
[
cfg.DATA.NUM_FRAMES // cfg.SLOWFAST.ALPHA // pool_size[0][0],
cfg.DATA.CROP_SIZE // 32 // pool_size[0][1],
cfg.DATA.CROP_SIZE // 32 // pool_size[0][2],
],
[
cfg.DATA.NUM_FRAMES // pool_size[1][0],
cfg.DATA.CROP_SIZE // 32 // pool_size[1][1],
cfg.DATA.CROP_SIZE // 32 // pool_size[1][2],
],
],
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
)
self.head_name = "head"
self.add_module(self.head_name, head)
def forward(self, x, bboxes=None):
x = self.s1(x)
x = self.s1_fuse(x)
x = self.s2(x)
x = self.s2_fuse(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s3_fuse(x)
x = self.s4(x)
x = self.s4_fuse(x)
x = self.s5(x)
head = getattr(self, self.head_name)
if self.enable_detection:
x = head(x, bboxes)
else:
x = head(x)
return x
@MODEL_REGISTRY.register()
class ResNet(nn.Module):
"""
ResNet model builder. It builds a ResNet like network backbone without
lateral connection (C2D, I3D, Slow).
Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He.
"SlowFast networks for video recognition."
https://arxiv.org/pdf/1812.03982.pdf
Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He.
"Non-local neural networks."
https://arxiv.org/pdf/1711.07971.pdf
"""
def __init__(self, cfg):
"""
The `__init__` method of any subclass should also contain these
arguments.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
super(ResNet, self).__init__()
self.norm_module = get_norm(cfg)
self.enable_detection = is_detection_enabled(cfg)
self.num_pathways = 1
self._construct_network(cfg)
init_helper.init_weights(
self, cfg.MODEL.FC_INIT_STD, cfg.RESNET.ZERO_INIT_FINAL_BN
)
def _construct_network(self, cfg, with_head=True):
"""
Builds a single pathway ResNet model.
Args:
cfg (CfgNode): model building configs, details are in the
comments of the config file.
"""
assert cfg.MODEL.ARCH in _POOL1.keys()
pool_size = _POOL1[cfg.MODEL.ARCH]
assert len({len(pool_size), self.num_pathways}) == 1
assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()
(d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]
num_groups = cfg.RESNET.NUM_GROUPS
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
dim_inner = num_groups * width_per_group
temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]
self.s1 = stem_helper.VideoModelStem(
dim_in=cfg.DATA.INPUT_CHANNEL_NUM,
dim_out=[width_per_group],
kernel=[temp_kernel[0][0] + [7, 7]],
stride=[[1, 2, 2]],
padding=[[temp_kernel[0][0][0] // 2, 3, 3]],
norm_module=self.norm_module,
)
self.s2 = resnet_helper.ResStage(
dim_in=[width_per_group],
dim_out=[width_per_group * 4],
dim_inner=[dim_inner],
temp_kernel_sizes=temp_kernel[1],
stride=cfg.RESNET.SPATIAL_STRIDES[0],
num_blocks=[d2],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0],
nonlocal_inds=cfg.NONLOCAL.LOCATION[0],
nonlocal_group=cfg.NONLOCAL.GROUP[0],
nonlocal_pool=cfg.NONLOCAL.POOL[0],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[0],
norm_module=self.norm_module,
)
for pathway in range(self.num_pathways):
pool = nn.MaxPool3d(
kernel_size=pool_size[pathway],
stride=pool_size[pathway],
padding=[0, 0, 0],
)
self.add_module("pathway{}_pool".format(pathway), pool)
self.s3 = resnet_helper.ResStage(
dim_in=[width_per_group * 4],
dim_out=[width_per_group * 8],
dim_inner=[dim_inner * 2],
temp_kernel_sizes=temp_kernel[2],
stride=cfg.RESNET.SPATIAL_STRIDES[1],
num_blocks=[d3],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1],
nonlocal_inds=cfg.NONLOCAL.LOCATION[1],
nonlocal_group=cfg.NONLOCAL.GROUP[1],
nonlocal_pool=cfg.NONLOCAL.POOL[1],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[1],
norm_module=self.norm_module,
)
self.s4 = resnet_helper.ResStage(
dim_in=[width_per_group * 8],
dim_out=[width_per_group * 16],
dim_inner=[dim_inner * 4],
temp_kernel_sizes=temp_kernel[3],
stride=cfg.RESNET.SPATIAL_STRIDES[2],
num_blocks=[d4],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2],
nonlocal_inds=cfg.NONLOCAL.LOCATION[2],
nonlocal_group=cfg.NONLOCAL.GROUP[2],
nonlocal_pool=cfg.NONLOCAL.POOL[2],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[2],
norm_module=self.norm_module,
)
self.s5 = resnet_helper.ResStage(
dim_in=[width_per_group * 16],
dim_out=[width_per_group * 32],
dim_inner=[dim_inner * 8],
temp_kernel_sizes=temp_kernel[4],
stride=cfg.RESNET.SPATIAL_STRIDES[3],
num_blocks=[d5],
num_groups=[num_groups],
num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3],
nonlocal_inds=cfg.NONLOCAL.LOCATION[3],
nonlocal_group=cfg.NONLOCAL.GROUP[3],
nonlocal_pool=cfg.NONLOCAL.POOL[3],
instantiation=cfg.NONLOCAL.INSTANTIATION,
trans_func_name=cfg.RESNET.TRANS_FUNC,
stride_1x1=cfg.RESNET.STRIDE_1X1,
inplace_relu=cfg.RESNET.INPLACE_RELU,
dilation=cfg.RESNET.SPATIAL_DILATIONS[3],
norm_module=self.norm_module,
)
if not with_head:
return
if self.enable_detection:
head = head_helper.ResNetRoIHead(
dim_in=[width_per_group * 32],
num_classes=cfg.MODEL.NUM_CLASSES[0],
pool_size=[[cfg.DATA.NUM_FRAMES // pool_size[0][0], 1, 1]],
resolution=[[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2],
scale_factor=[cfg.DETECTION.SPATIAL_SCALE_FACTOR],
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
aligned=cfg.DETECTION.ALIGNED,
)
else:
head = head_helper.ResNetBasicHead(
dim_in=[width_per_group * 32],
num_classes=cfg.MODEL.NUM_CLASSES[0],
pool_size=[
[
cfg.DATA.NUM_FRAMES // pool_size[0][0],
cfg.DATA.CROP_SIZE // 32 // pool_size[0][1],
cfg.DATA.CROP_SIZE // 32 // pool_size[0][2],
]
],
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.HEAD_ACT,
)
self.head_name = "head"
self.add_module(self.head_name, head)
def forward(self, x, bboxes=None):
x = self.s1(x)
x = self.s2(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s4(x)
x = self.s5(x)
head = getattr(self, self.head_name)
if self.enable_detection:
x = head(x, bboxes)
else:
x = head(x)
return x
@MODEL_REGISTRY.register()
class DualHeadResNet(ResNet):
def _construct_network(self, cfg):
super()._construct_network(cfg, with_head=False)
width_per_group = cfg.RESNET.WIDTH_PER_GROUP
pool_size = _POOL1[cfg.MODEL.ARCH]
head_1 = head_helper.ResNetKeyframeLocalizationHead(
dim_in=[width_per_group * 32],
num_classes=cfg.MODEL.NUM_CLASSES[0],
pool_size=[
[
1,
cfg.DATA.CROP_SIZE // 32 // pool_size[0][1],
cfg.DATA.CROP_SIZE // 32 // pool_size[0][2],
]
],
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.KEYFRAME_DETECTION_ACT,
)
self.head_1_name = 'Keyframe_localisation_head'
self.add_module(self.head_1_name, head_1)
num_frames = cfg.DATA.CLIP_LEN_SEC * cfg.DATA.SAMPLING_FPS
head_2 = head_helper.ResNetKeyframeLocalizationHead(
dim_in=[width_per_group * 32],
num_classes=cfg.MODEL.NUM_STATE_CLASSES[0],
pool_size=[
[
num_frames // pool_size[0][0],
cfg.DATA.CROP_SIZE // 32 // pool_size[0][1],
cfg.DATA.CROP_SIZE // 32 // pool_size[0][2],
]
],
dropout_rate=cfg.MODEL.DROPOUT_RATE,
act_func=cfg.MODEL.STATE_CHANGE_ACT,
)
self.head_2_name = 'State_detection_head'
self.add_module(self.head_2_name, head_2)
def forward(self, x):
x = self.s1(x)
x = self.s2(x)
for pathway in range(self.num_pathways):
pool = getattr(self, "pathway{}_pool".format(pathway))
x[pathway] = pool(x[pathway])
x = self.s3(x)
x = self.s4(x)
x = self.s5(x)
#head_1 = getattr(self, self.head_1_name)
#head_2 = getattr(self, self.head_2_name)
#keyframe_output = head_1(x.copy())
#state_change_output = head_2(x.copy())
return x #keyframe_output, state_change_output
@MODEL_REGISTRY.register()
class ActionProposal(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.feature_extract = DualHeadResNet(cfg)
self.model = BMN()
def forward(self, x):
f = self.feature_extract(x)
confidence_map, start, end = self.model(f[0].permute(0, 1, 3, 4, 2).reshape(-1, self.cfg.BMN.FEATURE_SIZE, self.cfg.BMN.TEMPORAL_SCALE))
return confidence_map, start, end
def is_detection_enabled(cfg):
try:
detection_enabled = cfg.DATA.TASK == "detection" or cfg.DATA.TASK=="short_term_anticipation"
except Exception:
# Temporary default config while still using old models without this config option
detection_enabled = False
return detection_enabled
|
import uuid
from fastapi import status
#
# INVALID TESTS
#
def test_get_invalid_uuid(client_valid_access_token):
get = client_valid_access_token.get("/api/event/source/1")
assert get.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
def test_get_nonexistent_uuid(client_valid_access_token):
get = client_valid_access_token.get(f"/api/event/source/{uuid.uuid4()}")
assert get.status_code == status.HTTP_404_NOT_FOUND
#
# VALID TESTS
#
def test_get_all(client_valid_access_token):
# Create some objects
client_valid_access_token.post("/api/event/source/", json={"value": "test"})
client_valid_access_token.post("/api/event/source/", json={"value": "test2"})
# Read them back
get = client_valid_access_token.get("/api/event/source/")
assert get.status_code == status.HTTP_200_OK
assert len(get.json()) == 2
def test_get_all_empty(client_valid_access_token):
get = client_valid_access_token.get("/api/event/source/")
assert get.status_code == status.HTTP_200_OK
assert get.json() == []
|
size = int(input())
array = [int(i) for i in input().split()]
for i in range(1, size):
temp = array[i]
index = i - 1
while index >=0 and temp < array[index]:
array[index + 1] = array[index]
index -= 1
array[index + 1] = temp
print(' '.join([str(i) for i in array]))
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for google3.image.understanding.detection_tools.utils.json_utils."""
import os
import tensorflow.compat.v1 as tf
from detection_tools.utils import json_utils
class JsonUtilsTest(tf.test.TestCase):
def testDumpReasonablePrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.0, f, float_digits=2)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1.00')
def testDumpPassExtraParams(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump([1.12345], f, float_digits=2, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '[\n 1.12\n]')
def testDumpZeroPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.0, f, float_digits=0, indent=3)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1')
def testDumpUnspecifiedPrecision(self):
output_path = os.path.join(tf.test.get_temp_dir(), 'test.json')
with tf.gfile.GFile(output_path, 'w') as f:
json_utils.Dump(1.012345, f)
with tf.gfile.GFile(output_path, 'r') as f:
self.assertEqual(f.read(), '1.012345')
def testDumpsReasonablePrecision(self):
s = json_utils.Dumps(1.12545, float_digits=2)
self.assertEqual(s, '1.13')
def testDumpsPassExtraParams(self):
s = json_utils.Dumps([1.0], float_digits=2, indent=3)
self.assertEqual(s, '[\n 1.00\n]')
def testDumpsZeroPrecision(self):
s = json_utils.Dumps(1.0, float_digits=0)
self.assertEqual(s, '1')
def testDumpsUnspecifiedPrecision(self):
s = json_utils.Dumps(1.012345)
self.assertEqual(s, '1.012345')
def testPrettyParams(self):
s = json_utils.Dumps({'v': 1.012345, 'n': 2}, **json_utils.PrettyParams())
self.assertEqual(s, '{\n "n": 2,\n "v": 1.0123\n}')
def testPrettyParamsExtraParamsInside(self):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, **json_utils.PrettyParams(allow_nan=True))
self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}')
with self.assertRaises(ValueError):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, **json_utils.PrettyParams(allow_nan=False))
def testPrettyParamsExtraParamsOutside(self):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, allow_nan=True, **json_utils.PrettyParams())
self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}')
with self.assertRaises(ValueError):
s = json_utils.Dumps(
{'v': 1.012345,
'n': float('nan')}, allow_nan=False, **json_utils.PrettyParams())
if __name__ == '__main__':
tf.test.main()
|
# -*- coding: utf-8 -*-
""" GPML hyper parameter treatment.
Provide iterator, getter and setter.
Created: Mon Jan 13 11:01:19 2014 by Hannes Nickisch, Philips Research Hamburg.
Modified: $Id: hyper.py 1263 2013-12-13 13:36:13Z hn $
"""
__version__ = "$Id: hyper.py 913 2013-08-15 12:54:33Z hn $"
import numpy as np
class HyperIter:
def __init__(self):
self.chil = []
def __iter__(self):
""" Iterate over hyperparameters.
"""
for s,h in self.hyp.iteritems():
if isinstance(h,float) or isinstance(h,int): # scalar parameter
yield s,None,h,self
else: # vector parameter
for i,hi in enumerate(h): yield s,i,hi,self
for c in self.chil: # recursion over children
for y in c: yield y
def get_hyp(self):
""" Obtain hyperparameters as a vector.
"""
hyp = []
for s,i,hi,k in self: hyp.append(hi)
return np.array(hyp)
def set_hyp(self,hyp,j=None):
""" Set all hyperparameters jointly or individually.
"""
ii = 0
for s,i,hi,k in self:
if j==None:
if i==None: k.hyp[s] = hyp[ii]
else: k.hyp[s][i] = hyp[ii]
elif j==ii:
if i==None: k.hyp[s] = hyp
else: k.hyp[s][i] = hyp
ii += 1
return self
def get_nhyp(self):
""" Obtain total number of hyperparameters.
"""
return sum(1 for _ in self)
def get_hyp_tree(self):
""" Construct the tree of hyperparameters.
"""
hyp_tree = []
def dfs(k,s): # depth first search over the tree
s += k.name
if k.chil!=None:
for i,ki in enumerate(k.chil):
dfs(ki,'%s%d/'%(s,i+1))
for v,h in k.hyp.items():
hyp_tree.append( (s,h) )
dfs(self,'')
return hyp_tree
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/config_change.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/api/config_change.proto",
package="google.api",
syntax="proto3",
serialized_options=b"\n\016com.google.apiB\021ConfigChangeProtoP\001ZCgoogle.golang.org/genproto/googleapis/api/configchange;configchange\242\002\004GAPI",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1egoogle/api/config_change.proto\x12\ngoogle.api"\x97\x01\n\x0c\x43onfigChange\x12\x0f\n\x07\x65lement\x18\x01 \x01(\t\x12\x11\n\told_value\x18\x02 \x01(\t\x12\x11\n\tnew_value\x18\x03 \x01(\t\x12+\n\x0b\x63hange_type\x18\x04 \x01(\x0e\x32\x16.google.api.ChangeType\x12#\n\x07\x61\x64vices\x18\x05 \x03(\x0b\x32\x12.google.api.Advice"\x1d\n\x06\x41\x64vice\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t*O\n\nChangeType\x12\x1b\n\x17\x43HANGE_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x01\x12\x0b\n\x07REMOVED\x10\x02\x12\x0c\n\x08MODIFIED\x10\x03\x42q\n\x0e\x63om.google.apiB\x11\x43onfigChangeProtoP\x01ZCgoogle.golang.org/genproto/googleapis/api/configchange;configchange\xa2\x02\x04GAPIb\x06proto3',
)
_CHANGETYPE = _descriptor.EnumDescriptor(
name="ChangeType",
full_name="google.api.ChangeType",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="CHANGE_TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="ADDED",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="REMOVED",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="MODIFIED",
index=3,
number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=231,
serialized_end=310,
)
_sym_db.RegisterEnumDescriptor(_CHANGETYPE)
ChangeType = enum_type_wrapper.EnumTypeWrapper(_CHANGETYPE)
CHANGE_TYPE_UNSPECIFIED = 0
ADDED = 1
REMOVED = 2
MODIFIED = 3
_CONFIGCHANGE = _descriptor.Descriptor(
name="ConfigChange",
full_name="google.api.ConfigChange",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="element",
full_name="google.api.ConfigChange.element",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="old_value",
full_name="google.api.ConfigChange.old_value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="new_value",
full_name="google.api.ConfigChange.new_value",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="change_type",
full_name="google.api.ConfigChange.change_type",
index=3,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="advices",
full_name="google.api.ConfigChange.advices",
index=4,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=47,
serialized_end=198,
)
_ADVICE = _descriptor.Descriptor(
name="Advice",
full_name="google.api.Advice",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="description",
full_name="google.api.Advice.description",
index=0,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=200,
serialized_end=229,
)
_CONFIGCHANGE.fields_by_name["change_type"].enum_type = _CHANGETYPE
_CONFIGCHANGE.fields_by_name["advices"].message_type = _ADVICE
DESCRIPTOR.message_types_by_name["ConfigChange"] = _CONFIGCHANGE
DESCRIPTOR.message_types_by_name["Advice"] = _ADVICE
DESCRIPTOR.enum_types_by_name["ChangeType"] = _CHANGETYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ConfigChange = _reflection.GeneratedProtocolMessageType(
"ConfigChange",
(_message.Message,),
{
"DESCRIPTOR": _CONFIGCHANGE,
"__module__": "google.api.config_change_pb2"
# @@protoc_insertion_point(class_scope:google.api.ConfigChange)
},
)
_sym_db.RegisterMessage(ConfigChange)
Advice = _reflection.GeneratedProtocolMessageType(
"Advice",
(_message.Message,),
{
"DESCRIPTOR": _ADVICE,
"__module__": "google.api.config_change_pb2"
# @@protoc_insertion_point(class_scope:google.api.Advice)
},
)
_sym_db.RegisterMessage(Advice)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
import logging
from django.contrib.auth import get_user_model
from django.shortcuts import redirect
from django.urls import reverse, reverse_lazy
from django.views.generic import UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from rest_framework import permissions
from rest_framework import viewsets
from rest_framework import generics
from django_filters.rest_framework import DjangoFilterBackend
from .forms import UserProfileForm
from .models import Profile
from .serializers import ProfileSerializer
from ... import app_settings
from ...permissions import is_staff
from ...mixins import WebpackBuiltTemplateViewMixin
from ...views import DjangoSsoAppBaseViewMixin
from .filters import ProfileFilter
User = get_user_model()
logger = logging.getLogger('django_sso_app')
class ProfileView(LoginRequiredMixin, WebpackBuiltTemplateViewMixin):
template_name = "account/profile.html"
def get_object(self, queryset=None):
request_user = getattr(self.request, 'user', None)
return request_user.get_sso_app_profile()
def get_context_data(self, *args, **kwargs):
context = super(ProfileView, self).get_context_data(*args, **kwargs)
profile = self.get_object()
if profile is not None:
user_fields = []
for field in app_settings.PROFILE_FIELDS:
user_fields.append((field, getattr(profile, field)))
context['user_fields'] = user_fields
return context
def get(self, request, *args, **kwargs):
logger.info('Getting profile')
try:
return super(ProfileView, self).get(request, *args, **kwargs)
except:
logger.exception('Error getting profile')
raise
class ProfileUpdateView(LoginRequiredMixin, UpdateView, WebpackBuiltTemplateViewMixin,
DjangoSsoAppBaseViewMixin):
model = Profile
template_name = "account/profile_update.html"
form_class = UserProfileForm
success_url = reverse_lazy('profile')
def get_object(self, queryset=None):
request_user = getattr(self.request, 'user', None)
return request_user.get_sso_app_profile()
def get_context_data(self, *args, **kwargs):
self.object = self.get_object()
context = super(WebpackBuiltTemplateViewMixin, self).get_context_data(*args, **kwargs)
return context
def get(self, request, *args, **kwargs):
logger.info('Getting profile update')
try:
profile = self.get_object()
return super(ProfileUpdateView, self).get(request, profile.pk, *args, **kwargs)
except:
logger.exception('Error getting profile update')
raise
def post(self, request, *args, **kwargs):
logger.info('Posting profile update')
try:
profile = self.get_object()
response = super(ProfileUpdateView, self).post(request, profile.pk, *args, **kwargs)
except:
logger.exception('Error posting profile update')
raise
else:
user = profile.user
self.update_response_jwt(user, response)
return response
class ProfileCompleteView(ProfileUpdateView):
template_name = "account/profile_complete.html"
def get(self, request, *args, **kwargs):
logger.info('Getting profile complete')
if not request.user.sso_app_profile.is_incomplete:
return redirect(reverse('profile.update'))
return super(ProfileCompleteView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
logger.info('Posting profile create')
if not request.user.sso_app_profile.is_incomplete:
return redirect(reverse('profile.update'))
return super(ProfileCompleteView, self).post(request, *args, **kwargs)
# api
class ProfileApiViewSet(viewsets.ModelViewSet,
DjangoSsoAppBaseViewMixin):
lookup_field = 'sso_id'
permission_classes = (permissions.IsAuthenticated, )
filter_backends = (DjangoFilterBackend,)
filterset_class = ProfileFilter
serializer_class = ProfileSerializer
def get_queryset(self):
requesting_user = self.request.user
queryset = Profile.objects.filter(user__is_superuser=False, is_active=True, user__is_active=True)
if is_staff(requesting_user):
return queryset
else:
return queryset.filter(user=requesting_user)
def list(self, request, *args, **kwargs):
"""
List profiles
"""
return super(ProfileApiViewSet, self).list(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
"""
Profile detail
"""
return super(ProfileApiViewSet, self).retrieve(request, *args, **kwargs)
def partial_update(self, request, *args, **kwargs):
"""
Profile update
"""
response = super(ProfileApiViewSet, self).partial_update(request, *args, **kwargs)
response_status_code = response.status_code
logger.info('Profile partial_update status code is {}, {}'.format(response_status_code, request.data))
if response_status_code == 200:
profile = self.get_object()
user = profile.user
self.update_response_jwt(user, response)
return response
class ProfileDetailApiView(generics.RetrieveAPIView):
"""
Profile detail entrypoint
"""
lookup_field = 'sso_id'
serializer_class = ProfileSerializer
def get_queryset(self):
return Profile.objects.filter(is_active=True, user__is_active=True)
def get_object(self, *args, **kwargs):
user = getattr(self.request, 'user', None)
return user.get_sso_app_profile()
|
import numpy as np
from phimal_utilities.data import Dataset
from phimal_utilities.data.diffusion import DiffusionGaussian
from phimal_utilities.data.burgers import BurgersDelta, BurgersCos, BurgersSawtooth
from phimal_utilities.data.kdv import KdVSoliton
x = np.linspace(-5, 5, 1000)
t = np.linspace(0.0, 2.0, 100)
x_grid, t_grid = np.meshgrid(x, t)
dataset = Dataset(BurgersDelta, v=0.1, A=1.0)
dataset = Dataset(BurgersCos, v=0.1, a=0.1, b=0.1, k=2)
dataset = Dataset(BurgersSawtooth, v=0.1)
#dataset = Dataset(KdVSoliton, c=5.0, a = -1.0, b=1)
dataset.generate_solution(x_grid, t_grid).shape
dataset.parameters
dataset.time_deriv(x_grid, t_grid).shape
theta = dataset.library(x_grid.reshape(-1, 1), t_grid.reshape(-1, 1), poly_order=2, deriv_order=2)
dt = dataset.time_deriv(x_grid.reshape(-1, 1), t_grid.reshape(-1, 1))
theta.shape
np.linalg.lstsq(theta, dt, rcond=None)[0]
X_train, y_train = dataset.create_dataset(x_grid, t_grid, n_samples=0, noise=0.05)
y_train.shape
from phimal_utilities.analysis import load_tensorboard
|
from __future__ import division
import numpy as np
import matplotlib
import scipy.integrate as sint
import matplotlib.pyplot as plt
import math
import constants
q = constants.cgs_constants['q']
c = constants.cgs_constants['c']
m_e = constants.cgs_constants['m_e']
m_p = constants.cgs_constants['m_p']
def compute_system_energy(particles, fields):
h_drift = np.sum(np.sqrt((particles.beta * particles.p_xi)**2 - particles.px ** 2
- particles.py ** 2 - (particles.weight * particles.m_0 * c) ** 2)) * 9.9
fields.solver.compute_psi_particles(fields, particles)
h_sc = -np.sum( particles.weight * particles.charge / (particles.beta * c) * fields.psi_vals)
#es_energy = fields.mode_coefficients * np.conj(fields.mode_coefficients) * fields.k_matrix_sq
return np.abs(h_drift + h_sc) # + np.sum(es_energy))
def plot_beam(beam):
plt.figure(figsize = (12,12))
plt.subplot(2,2,1)
plt.plot(beam.x, beam.px / beam.pz, 'o')
plt.subplot(2,2,2)
plt.plot(beam.y, beam.py / beam.pz, 'o')
plt.subplot(2,2,3)
plt.plot(beam.x, beam.y, 'o')
plt.subplot(2,2,4)
plt.plot(beam.px / beam.pz, beam.py / beam.pz, 'o')
plt.show()
def plot_rs_beam(points):
x = points[:,0]
xp = points[:,1]
y = points[:,2]
yp = points[:,3]
plt.figure(figsize = (16,16))
plt.subplot(3,3,1)
plt.hexbin(x, xp, gridsize = 40)#, 'o')
plt.title('x-xp')
plt.subplot(3,3,2)
plt.hexbin(y, yp, gridsize = 40)#,'o')
plt.title('y-yp')
plt.subplot(3,3,3)
plt.hexbin(x, y, gridsize = 40)#,'o')
plt.title('x-y')
plt.subplot(3,3,4)
plt.hexbin(xp, yp, gridsize = 40)#, 'o')
plt.title('xp-yp')
plt.subplot(3,3,5)
plt.hexbin(x, yp, gridsize = 40)#,'o')
plt.title('x-yp')
plt.subplot(3,3,6)
plt.hexbin(y, xp, gridsize = 40)#, 'o')
plt.title('y-xp')
plt.show()
return
def round_beam_expansion(z, e_x, e_y, x0, y0, q, gamma, m_0):
beta = np.sqrt(1. - 1./ gamma **2)
I = q * beta * c / 100.
I0 = 4 * np.pi * 8.85e-12 * (c/100.) ** 3 * (m_0 / 1000.) / (1.602e-19)
K = I * 2. / (I0 * (beta **3) * (gamma **3))
def func(E, z):
x = E[0]
xp = E[1]
y = E[2]
yp = E[3]
xpp = 2 * K / (x + y) + e_x**2 / x**3
ypp = 2 * K / (x + y) + e_y**2 / y**3
return np.asarray([xp, xpp, yp, ypp])
init = np.asarray([x0, 0, y0, 0])
output = sint.odeint(func, init, z)
return output
def calc_perveance(I, beam_beta, beam_gamma, cn=0):
'''Calculate the perveance for a proton beam of a given current and particle energy.
Arguments
- I - current in A
- ref - the reference particle for extracting beta and gamma
- (optional) charge neutralization factor - default 0
'''
I0 = 3.13e7 #characteristic current
beta = beam_beta
gamma = beam_gamma
return (I/I0)*(2/beta**3)*(1/gamma**3)
def calc_characteristic_current():
'''Return characteristics current for proton beam'''
return 4*np.pi*scipy.constants.epsilon_0*scipy.constants.m_p*(scipy.constants.c**3)/scipy.constants.e
#Introduce numerical integrators
#2nd Order RK - Ralston Method
def Ralston(r,z,h,f):
k1 = h*f(r)
return 0.25*k1 + 0.75*h*f(r+(2/3)*k1)
#4th Order Runge-Kutta
def RungeKutta4(r,z,h,f):
k1 = f(r)
k2 = f(r + (h/2)*k1)
k3 = f(r + (h/2)*k2)
k4 = f(r + h*k3)
return h/6*(k1 + 2*k2 +2*k3 + k4)
#function here, which is a function of r and z
def rprime(K,emit,r0,rp0,rm):
'''
Returns the slope of the beam envelope (dr/dz) for a given value of emittance,rm, K, and initial conditions.
This equation follows from Reisier.
Arguments:
- r - beam radius (or RMS)
- K - perveance
- emit - geometric emittance
- r0 - initial envelope radius (or RMS)
- rp0 - initial slope of envelope (or RMS)
'''
first = rp0**2 #first term
second = (emit**2)*((1./r0**2)-(1./rm**2)) #second term
third = 2*K* np.log(rm/r0) / 4
return np.sqrt(first + second + third)
def calculate_expansion(current, beam_beta, beam_gamma, r0, rprime0,
emit = 1.0e-6, N = 1000, zf = 1.0):
'''Evaluate the expansion of a KV beam envelope in a drift along z-axis, begining at z = 0.
Arguments:
- current - beam current in A
- reference_particle - synergia object for bunch/lattice reference particle
- r0 - initial envelope value (provide RMS for RMS expansion, a for envelope expansion, etc.)
- rp0 - initial slope of envelope (must be non-zero, but calculation is not sensitive to small values)
- (optional) emit - geometric emittance of beam - default 2.05721258396*1.e-6 (for 0.3 mm-mrad KV beam)
- (optional) N - number of steps for integration - default 1000
- (optional) zf - final z value (e.g. length of expansion) - default 50.0
'''
z0 = 0.0 #start
ss = (zf-z0)/N #step size
zpoints = np.linspace(0.0, zf, num=N) #define z values
rpoints = [] #empty array for r values
#calculate perveance
Kp = calc_perveance(current, beam_beta, beam_gamma)
#x is r
#z is t (what we step up)
#f is our function describing the relationship between r and z
f = lambda r: rprime(Kp,emit,r0,rprime0,r)
r,z,dz = r0,z0,ss
points = []
while z < zf:
points.append((z,r))
z, r = z+dz, r + Ralston(r,z,dz,f) #incremement
return points
|
#
# Copyright (c) 2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import json
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.urls import reverse
from bridge.vars import JOB_ROLES
from bridge.utils import KleverTestCase
from users.models import User, PreferableView
from jobs.models import Job, JobFile, FileSystem
class TestJobs(KleverTestCase):
def setUp(self):
super(TestJobs, self).setUp()
User.objects.create_superuser('superuser', '', 'top_secret')
populate_users(
admin={'username': 'admin', 'password': 'admin'},
manager={'username': 'manager', 'password': 'manager'},
service={'username': 'service', 'password': 'service'}
)
self.client.post(reverse('users:login'), {'username': 'manager', 'password': 'manager'})
self.client.post(reverse('population'))
self.test_filename = 'test_jobfile.txt'
self.test_archive = 'test_jobarchive.zip'
self.test_conf = 'test.conf'
def test_tree_and_views(self):
# Check jobs tree
response = self.client.get(reverse('jobs:tree'))
self.assertEqual(response.status_code, 200)
# Creating view
tree_view = {
"columns": ['name', 'role', 'author', 'date', 'status', 'resource',
'unsafe:total', 'problem:total', 'safe:total'],
"order": ["up", "date"],
"filters": {
"title": {"type": "istartswith", "value": "Testing"},
"format": {"type": "is", "value": "1"}
}
}
response = self.client.post('/users/ajax/save_view/', {
'view': json.dumps(tree_view), 'view_type': '1', 'title': 'My view'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
try:
view_id = int(res['view_id'])
except KeyError or ValueError:
self.fail('Response must contain integer "view_id"')
response = self.client.get(reverse('jobs:tree'))
self.assertEqual(response.status_code, 200)
try:
view = View.objects.get(author__username='manager', type='1', name='My view', pk=view_id)
except ObjectDoesNotExist:
self.fail('The view was not saved')
self.assertEqual(tree_view, json.loads(view.view))
# Changing view
tree_view = {"columns": ["role", "author", "status", "problem", "safe"], "order": [], "filters": {}}
response = self.client.post('/users/ajax/save_view/', {
'view': json.dumps(tree_view), 'view_type': '1', 'view_id': view_id
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
try:
view_id = int(res['view_id'])
except KeyError or ValueError:
self.fail('Response must contain integer "view_id"')
response = self.client.get(reverse('jobs:tree'))
self.assertEqual(response.status_code, 200)
try:
view = View.objects.get(author__username='manager', type='1', name='My view', pk=view_id)
except ObjectDoesNotExist:
self.fail('The view was not saved')
self.assertEqual(tree_view, json.loads(view.view))
# Making view preffered
response = self.client.post('/users/ajax/preferable_view/', {'view_type': '1', 'view_id': view_id})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('message', res)
self.assertEqual(len(PreferableView.objects.filter(user__username='manager', view_id=view_id)), 1)
response = self.client.get(reverse('jobs:tree'))
self.assertEqual(response.status_code, 200)
# Share view
self.assertFalse(view.shared)
response = self.client.post('/users/ajax/share_view/', {'view_type': '1', 'view_id': view_id})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('message', res)
view = View.objects.get(id=view_id)
self.assertTrue(view.shared)
# Testing view name check
response = self.client.post('/users/ajax/check_view_name/', {'view_type': '1', 'view_title': 'Default'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertIn('error', json.loads(str(response.content, encoding='utf8')))
response = self.client.post('/users/ajax/check_view_name/', {'view_type': '1', 'view_title': ''})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertIn('error', json.loads(str(response.content, encoding='utf8')))
response = self.client.post('/users/ajax/check_view_name/', {'view_type': '1', 'view_title': 'My view'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertIn('error', json.loads(str(response.content, encoding='utf8')))
response = self.client.post('/users/ajax/check_view_name/', {'view_type': '1', 'view_title': 'New view'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Check view deletion
response = self.client.post('/users/ajax/remove_view/', {'view_type': '1', 'view_id': view_id})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertIn('message', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(len(PreferableView.objects.filter(user__username='manager')), 0)
self.assertEqual(len(View.objects.filter(author__username='manager')), 0)
def test_create_edit_job(self):
job_template = Job.objects.order_by('name').first()
self.assertIsNotNone(job_template)
# Requests for template job's page and data for autoupdate
response = self.client.get(reverse('jobs:job', args=[job_template.pk]))
self.assertEqual(response.status_code, 200)
response = self.client.post('/jobs/decision_results/%s/' % job_template.pk)
self.assertEqual(response.status_code, 200)
response = self.client.get('/jobs/progress/%s/' % job_template.pk)
self.assertEqual(response.status_code, 200)
response = self.client.post('/jobs/status/%s/' % job_template.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Job template shouldn't have children now
response = self.client.get('/jobs/do_job_has_children/%s/' % job_template.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertIn('children', res)
self.assertFalse(res['children'])
# Create job page
response = self.client.get(reverse('jobs:form', args=[job_template.pk, 'copy']))
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.content) > 0)
tmpl_filedata = json.dumps(LoadFilesTree(job_template.id, job_template.version).as_json()['children'])
# Save new job
response = self.client.post(reverse('jobs:form', args=[job_template.pk, 'copy']), {
'name': 'New job title',
'description': 'Description of new job',
'global_role': JOB_ROLES[0][0],
'user_roles': json.dumps([{'user': User.objects.get(username='superuser').pk, 'role': JOB_ROLES[2][0]}]),
'parent': job_template.identifier,
'file_data': tmpl_filedata
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
try:
newjob_pk = int(res['job_id'])
except ValueError or KeyError:
self.fail('Integer job id is expected')
# Job page
response = self.client.get(reverse('jobs:job', args=[newjob_pk]))
self.assertEqual(response.status_code, 200)
# Job autoupdate data
response = self.client.post('/jobs/decision_results/%s/' % newjob_pk)
self.assertEqual(response.status_code, 200)
response = self.client.get('/jobs/progress/%s/' % newjob_pk)
self.assertEqual(response.status_code, 200)
response = self.client.post('/jobs/status/%s/' % newjob_pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
resp = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', resp)
self.assertIn('status', resp)
# Check if job and its versions exist
try:
newjob = Job.objects.get(pk=newjob_pk)
except ObjectDoesNotExist:
self.fail('New job was not created')
try:
JobHistory.objects.get(job=newjob)
except ObjectDoesNotExist:
self.fail('New job was created without version')
except MultipleObjectsReturned:
self.fail('New job has too many versions')
# Edit job page
response = self.client.get(reverse('jobs:form', args=[newjob.pk, 'edit']))
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.content) > 0)
# Version data (description) for edit job page
response = self.client.get('/jobs/get_version_data/%s/%s/' % (newjob.pk, newjob.version))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
resp = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', resp)
self.assertIn('description', resp)
# Version roles for edit job page
response = self.client.get('/jobs/get_version_roles/%s/%s/' % (newjob.pk, newjob.version))
self.assertEqual(response.status_code, 200)
# Version files for edit job page
response = self.client.post('/jobs/get_version_files/%s/%s/' % (newjob.pk, newjob.version))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
resp = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', resp)
for i in range(1, 5):
response = self.client.post(reverse('jobs:form', args=[newjob.pk, 'edit']), {
'name': 'New job title',
'description': 'New description of new job',
'global_role': JOB_ROLES[1][0],
'user_roles': '[]',
'job_id': newjob.pk,
'file_data': tmpl_filedata,
'parent': job_template.identifier,
'comment': 'Comment %s' % i,
'last_version': Job.objects.get(pk=newjob.pk).version
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertIn('job_id', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(len(JobHistory.objects.filter(job=newjob)), 1 + i)
response = self.client.get(reverse('jobs:job', args=[newjob_pk]))
self.assertEqual(response.status_code, 200)
# Removing versions
response = self.client.post('/jobs/remove_versions/%s/' % newjob.pk, {'versions': '["2","3"]'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertIn('message', json.loads(str(response.content, encoding='utf8')))
# Try to remove first version
response = self.client.post('/jobs/remove_versions/%s/' % newjob.pk, {'versions': '["1"]'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertIn('error', json.loads(str(response.content, encoding='utf8')))
# Remove job
response = self.client.post('/jobs/api/%s/remove/' % newjob.parent_id)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(Job.objects.filter(id__in={newjob.id, newjob.parent_id}).count(), 0)
def test_files(self):
job_template = Job.objects.all().first()
file_data = [{"type": "root", "text": "Files", "children": []}]
response = self.client.post(reverse('jobs:form', args=[job_template.pk, 'copy']), {
'name': 'New job title', 'description': 'Description of new job', 'parent': job_template.identifier,
'global_role': JOB_ROLES[0][0], 'user_roles': '[]',
'file_data': json.dumps(file_data)
})
newjob_pk = int(json.loads(str(response.content, encoding='utf8'))['job_id'])
self.assertEqual(Job.objects.filter(id=newjob_pk).count(), 1)
with open(os.path.join(settings.MEDIA_ROOT, self.test_filename), mode='wb') as fp:
fp.write(b'My test text')
fp.close()
with open(os.path.join(settings.MEDIA_ROOT, self.test_filename), mode='rb') as fp:
response = self.client.post('/jobs/upload_file/', {'file': fp})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('hashsum', res)
try:
newfile = JobFile.objects.get(hash_sum=res['hashsum'])
except ObjectDoesNotExist:
self.fail('The file was not uploaded')
file_data[0]['children'].append({
"data": {"hashsum": newfile.hash_sum}, 'text': 'filename.txt', 'type': 'file'
})
# Add new file to job and check result and DB changes
response = self.client.post(reverse('jobs:form', args=[newjob_pk, 'edit']), {
'name': 'New job title', 'description': 'New description of new job',
'parent': job_template.identifier, 'global_role': JOB_ROLES[1][0],
'user_roles': json.dumps([{'user': User.objects.get(username='superuser').pk, 'role': JOB_ROLES[2][0]}]),
'file_data': json.dumps(file_data),
'comment': 'Add file', 'last_version': 1
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('job_id', res)
self.assertEqual(len(JobHistory.objects.filter(job_id=newjob_pk)), 2)
try:
job_file = FileSystem.objects.get(job__job_id=newjob_pk, job__version=2)
except ObjectDoesNotExist:
self.fail('File was not saved')
except MultipleObjectsReturned:
self.fail('Too many files for new job (only 1 expected)')
self.assertEqual(job_file.name, 'filename.txt')
self.assertIsNone(job_file.parent)
self.assertEqual(job_file.file_id, newfile.id)
# Try to download new file
response = self.client.get(reverse('jobs:download_file', args=[newfile.hash_sum]))
self.assertEqual(response.status_code, 200)
# Try to download new job and one of the defaults
response = self.client.post('/jobs/downloadjobs/', {
'job_ids': json.dumps([newjob_pk, Job.objects.filter(parent=None).first().pk])
})
self.assertEqual(response.status_code, 200)
# Check access to download job
response = self.client.post('/jobs/check_download_access/', {
'jobs': json.dumps([newjob_pk, Job.objects.filter(parent=None).first().pk])
})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Try to download new job
response = self.client.get('/jobs/downloadjob/%s/' % newjob_pk)
self.assertEqual(response.status_code, 200)
with open(os.path.join(settings.MEDIA_ROOT, self.test_archive), mode='wb') as fp:
for content in response.streaming_content:
fp.write(content)
# We have to remove job before uploading new one with the same identifier
response = self.client.post('/jobs/api/%s/remove/' % newjob_pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Upload downloaded job
with open(os.path.join(settings.MEDIA_ROOT, self.test_archive), mode='rb') as fp:
response = self.client.post('/jobs/upload_jobs/%s/' % job_template.identifier, {'file': fp})
fp.close()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertJSONEqual(str(response.content, encoding='utf8'), '{}')
try:
uploaded_job = Job.objects.get(parent__identifier=job_template.identifier, name='New job title')
except ObjectDoesNotExist:
self.fail('The job was not found after upload')
self.assertEqual(len(FileSystem.objects.filter(job__job=uploaded_job, job__version=2)), 1)
self.assertEqual(len(FileSystem.objects.filter(job__job=uploaded_job, job__version=1)), 0)
self.assertEqual(len(JobHistory.objects.filter(job=uploaded_job)), 2)
# Check file content of uploaded job
response = self.client.get('/jobs/api/file/{0}/'.format(
FileSystem.objects.get(job__job=uploaded_job, job__version=2).file.hash_sum
))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('content', res)
self.assertEqual(res['content'], 'My test text')
def test_run_decision(self):
file_data = [{"type": "root", "text": "Files", "children": []}]
job_template = Job.objects.all().first()
response = self.client.post(reverse('jobs:form', args=[job_template.id, 'copy']), {
'name': 'New job title', 'description': 'Description of new job',
'global_role': JOB_ROLES[0][0], 'user_roles': '[]',
'parent': job_template.identifier, 'file_data': json.dumps(file_data)
})
job_pk = int(json.loads(str(response.content, encoding='utf8'))['job_id'])
# Fast run
response = self.client.post('/jobs/run_decision/%s/' % job_pk, {'mode': 'fast'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(Job.objects.filter(pk=job_pk, status=JOB_STATUS[1][0])), 1)
self.assertEqual(len(RunHistory.objects.filter(
job_id=job_pk, operator__username='manager', status=JOB_STATUS[1][0]
)), 1)
# Get progress (console request)
response = self.client.get('/jobs/get_job_progress_json/%s/' % job_pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('data', res)
# Stop decision
response = self.client.post('/jobs/stop_decision/%s/' % job_pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(Job.objects.get(pk=job_pk).status, JOB_STATUS[6][0])
self.assertEqual(len(RunHistory.objects.filter(job_id=job_pk, operator__username='manager')), 1)
Job.objects.filter(status=JOB_STATUS[6][0]).update(status=JOB_STATUS[7][0])
# Start decision page
response = self.client.get(reverse('jobs:prepare_run', args=[job_pk]))
self.assertEqual(response.status_code, 200)
# Get KLEVER_CORE_LOG_FORMATTERS value
response = self.client.post('/jobs/get_def_start_job_val/', {'name': 'console_log_formatter', 'value': 'brief'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('console_log_formatter', res)
# Start decision with settings
run_conf = json.dumps([
["HIGH", "0", 100, '1'], ["1", "1.0", "2"], [1, 1, 100, ''],
[
"INFO", "%(asctime)s (%(filename)s:%(lineno)03d) %(name)s %(levelname)5s> %(message)s",
"NOTSET", "%(name)s %(levelname)5s> %(message)s"
],
[False, True, True, True, False, True]
])
response = self.client.post('/jobs/run_decision/%s/' % job_pk, {'mode': 'data', 'data': run_conf})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(Job.objects.get(pk=job_pk).status, JOB_STATUS[1][0])
self.assertEqual(len(RunHistory.objects.filter(job_id=job_pk, operator__username='manager')), 2)
self.client.post('/jobs/stop_decision/%s/' % job_pk)
Job.objects.filter(status=JOB_STATUS[6][0]).update(status=JOB_STATUS[7][0])
response = self.client.post('/jobs/run_decision/%s/' % job_pk, {'mode': 'lastconf'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(Job.objects.get(pk=job_pk).status, JOB_STATUS[1][0])
self.assertEqual(len(RunHistory.objects.filter(job_id=job_pk, operator__username='manager')), 3)
self.client.post('/jobs/stop_decision/%s/' % job_pk)
Job.objects.filter(status=JOB_STATUS[6][0]).update(status=JOB_STATUS[7][0])
response = self.client.get(
'/jobs/download_configuration/%s/' % RunHistory.objects.filter(job_id=job_pk).order_by('-date').first().pk
)
self.assertEqual(response.status_code, 200)
with open(os.path.join(settings.MEDIA_ROOT, self.test_conf), mode='wb') as fp:
for content in response.streaming_content:
fp.write(content)
with open(os.path.join(settings.MEDIA_ROOT, self.test_conf), mode='rb') as fp:
response = self.client.post('/jobs/run_decision/%s/' % job_pk, {'mode': 'file_conf', 'file_conf': fp})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(Job.objects.get(pk=job_pk).status, JOB_STATUS[1][0])
def test_console_requests(self):
job_template = Job.objects.all().first()
if not job_template:
return
# Copy the job with autofilled name
response = self.client.post('/jobs/api/duplicate-job/', data={'parent': job_template.pk})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('id', res)
self.assertIn('identifier', res)
job_pk = res['id']
job_identifer = res['identifier']
try:
job = Job.objects.get(id=job_pk)
except ObjectDoesNotExist:
self.fail("The job wasn't copied")
# Copy job version
response = self.client.patch('/jobs/api/duplicate/{}/'.format(job_pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Get job fields: status (by id)
response = self.client.post('/jobs/get_job_field/', {'job': job_identifer, 'field': 'status'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertIn('status', res)
self.assertEqual(res['status'], JOB_STATUS[0][0])
# Get job fields: identifier (by name)
response = self.client.post('/jobs/get_job_field/', {'job': job.name, 'field': 'identifier'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertIn('identifier', res)
self.assertEqual(res['identifier'], job_identifer)
def test_comparison_and_trees(self):
jobs = Job.objects.all()
job1 = jobs.first()
if not job1:
return
file_data = [{"type": "root", "text": "Files"}]
# Add file for the first job (job1)
with open(os.path.join(settings.MEDIA_ROOT, self.test_filename), mode='wb') as fp:
fp.write(b'My test text 1')
fp.close()
with open(os.path.join(settings.MEDIA_ROOT, self.test_filename), mode='rb') as fp:
response = self.client.post('/jobs/upload_file/', {'file': fp})
f1_hashsum = json.loads(str(response.content, encoding='utf8'))['hashsum']
file_data[0]['children'] = [{"data": {"hashsum": f1_hashsum}, 'text': 'filename.txt', 'type': 'file'}]
self.client.post(reverse('jobs:form', args=[job1.id, 'edit']), {
'name': job1.name, 'description': job1.versions.get(version=job1.version).description,
'global_role': JOB_ROLES[0][0], 'user_roles': '[]',
'parent': job1.parent.identifier if job1.parent else '', 'file_data': json.dumps(file_data),
'comment': 'Replace all files with one', 'last_version': job1.version
})
# Save job copy and copy new version
response = self.client.post('/jobs/api/duplicate-job/', data={'parent': job1.pk})
job2 = Job.objects.get(id=json.loads(str(response.content, encoding='utf8'))['id'])
self.client.patch('/jobs/api/duplicate/{}/'.format(job2.id))
self.assertEqual(JobHistory.objects.filter(job=job2).count(), 2)
# Replace file for job2
with open(os.path.join(settings.MEDIA_ROOT, self.test_filename), mode='wb') as fp:
fp.write(b'My test text 2')
fp.close()
with open(os.path.join(settings.MEDIA_ROOT, self.test_filename), mode='rb') as fp:
response = self.client.post('/jobs/replace_job_file/%s/' % job2.id, {'name': 'filename.txt', 'file': fp})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
try:
fs = FileSystem.objects.select_related('file').get(job__job=job2, name='filename.txt', job__version=2)
except ObjectDoesNotExist:
self.fail('Job2 file was not copied with version copy')
f2_hashsum = fs.file.hash_sum
# Compare versions of job2
response = self.client.post('/jobs/compare_versions/%s/' % job2.id, {'v1': 1, 'v2': 2})
self.assertEqual(response.status_code, 200)
# Check compare access
response = self.client.post('/jobs/check_compare_access/', {'job1': job1.id, 'job2': job2.id})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
# Compare jobs' files
response = self.client.get(reverse('jobs:comparison', args=[job1.id, job2.id]))
self.assertEqual(response.status_code, 200)
# get files difference
response = self.client.post('/jobs/get_files_diff/%s/%s/' % (f1_hashsum, f2_hashsum))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
res = json.loads(str(response.content, encoding='utf8'))
self.assertNotIn('error', res)
self.assertIn('content', res)
# Download tree (should dowload at least job1 and job2)
response = self.client.post('/jobs/downloadtrees/', {'job_ids': json.dumps([job1.id])})
self.assertEqual(response.status_code, 200)
with open(os.path.join(settings.MEDIA_ROOT, self.test_archive), mode='wb') as fp:
for content in response.streaming_content:
fp.write(content)
# Remove jobs are currently downloaded
self.client.post('/jobs/api/%s/remove/' % job1.id)
# Upload tree
with open(os.path.join(settings.MEDIA_ROOT, self.test_archive), mode='rb') as fp:
response = self.client.post('/jobs/upload_jobs_tree/', {'parent_id': 'null', 'file': fp})
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
self.assertNotIn('error', json.loads(str(response.content, encoding='utf8')))
self.assertEqual(Job.objects.filter(identifier=job1.identifier).count(), 1)
self.assertEqual(Job.objects.filter(identifier=job2.identifier).count(), 1)
def tearDown(self):
if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.test_filename)):
os.remove(os.path.join(settings.MEDIA_ROOT, self.test_filename))
if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.test_archive)):
os.remove(os.path.join(settings.MEDIA_ROOT, self.test_archive))
if os.path.exists(os.path.join(settings.MEDIA_ROOT, self.test_conf)):
os.remove(os.path.join(settings.MEDIA_ROOT, self.test_conf))
super().tearDown()
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .utils.utils import get_paddle_version
pd_ver = get_paddle_version()
import paddle
if pd_ver == 185:
Layer = paddle.fluid.dygraph.Layer
else:
Layer = paddle.nn.Layer
_cnt = 0
def counter():
global _cnt
_cnt += 1
return _cnt
class BaseBlock(Layer):
def __init__(self, key=None):
super(BaseBlock, self).__init__()
if key is not None:
self._key = str(key)
else:
self._key = self.__class__.__name__ + str(counter())
# set SuperNet class
def set_supernet(self, supernet):
self.__dict__['supernet'] = supernet
@property
def key(self):
return self._key
class Block(BaseBlock):
"""
Model is composed of nest blocks.
Parameters:
fn(paddle.nn.Layer): instance of super layers, such as: SuperConv2D(3, 5, 3).
fixed(bool, optional): whether to fix the shape of the weight in this layer. Default: False.
key(str, optional): key of this layer, one-to-one correspondence between key and candidate config. Default: None.
"""
def __init__(self, fn, fixed=False, key=None):
super(Block, self).__init__(key)
self.fn = fn
self.fixed = fixed
self.candidate_config = self.fn.candidate_config
def forward(self, *inputs, **kwargs):
out = self.supernet.layers_forward(self, *inputs, **kwargs)
return out
|
from uuid import uuid4
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from .mixins import MPAwareModel
treebeard = True
try:
from treebeard.mp_tree import MP_Node
except ImportError:
treebeard = False
class UniqueTokenModel(models.Model):
token = models.CharField(max_length=32, unique=True, blank=True)
class Meta:
abstract = True
def get_token(self):
return str(uuid4().hex)
def save(self, **kwargs):
if not self.token:
self.token = self.get_token()
super().save(**kwargs)
if treebeard:
class MaterializedPathNode(MPAwareModel, MP_Node):
slug = models.SlugField(max_length=255, db_index=True, unique=False, blank=True)
node_order_by = ['name']
node_order_by = ['numval', 'strval']
class Meta:
abstract = True
class MutableModelManager(models.QuerySet):
def by_type(self, model_class):
return self.filter(specific_type=ContentType.objects.get_for_model(model_class))
class MutableModel(models.Model):
"""A Model that if inherited from will store the specific class reference in self."""
specific_type = models.ForeignKey(
ContentType,
verbose_name=_('specific type'),
related_name='+',
editable=False,
on_delete=models.PROTECT)
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.pk and not self.specific_type_id:
# this model is being newly created rather than retrieved from the db;
# set content type to correctly represent the model class that this was
# created as
self.specific_type = ContentType.objects.get_for_model(self)
@cached_property
def specific(self):
"""Return this page in its most specific subclassed form."""
specific_type = ContentType.objects.get_for_id(self.specific_type_id)
model_class = specific_type.model_class()
if model_class is None:
return self
elif isinstance(self, model_class):
return self
else:
return specific_type.get_object_for_this_type(id=self.id)
@cached_property
def specific_class(self):
"""Return the class that this page would be if instantiated in its most specific form."""
specific_type = ContentType.objects.get_for_id(self.specific_type_id)
return specific_type.model_class()
|
import numpy as np
from keras.applications.inception_v3 import InceptionV3, decode_predictions
def predict(image):
model = InceptionV3()
pred = model.predict(image)
decoded_predictions = decode_predictions(pred, top=10)
response = 'InceptionV3 predictions: ' + str(decoded_predictions[0][0:5])
print(response)
np.argmax(pred[0])
return response
|
EMAIL_DOMAINS = [
'10minute-email.com',
'10minutemail.co.uk',
'probl.info',
'fakemailgenerator.com',
'drdrb.net',
'armyspy.com',
'cuvox.de',
'dayrep.com',
'spam4.me'
'einrot.com',
'maildrop.cc',
'fleckens.hu',
'gustr.com',
'grr.la',
'jourrapide.com',
'rhyta.com',
'superrito.com',
'teleworm.us',
'0-mail.com',
'0815.ru',
'0clickemail.com',
'0wnd.net',
'0wnd.org',
'10minutemail.com',
'20minutemail.com',
'2prong.com',
'30minutemail.com',
'3d-painting.com',
'4warding.com',
'4warding.net',
'4warding.org',
'60minutemail.com',
'675hosting.com',
'675hosting.net',
'675hosting.org',
'6url.com',
'75hosting.com',
'75hosting.net',
'75hosting.org',
'7tags.com',
'9ox.net',
'a-bc.net',
'afrobacon.com',
'ajaxapp.net',
'amilegit.com',
'amiri.net',
'amiriindustries.com',
'anonbox.net',
'anonymbox.com',
'antichef.com',
'antichef.net',
'antispam.de',
'baxomale.ht.cx',
'beefmilk.com',
'binkmail.com',
'bio-muesli.net',
'bobmail.info',
'bodhi.lawlita.com',
'bofthew.com',
'brefmail.com',
'broadbandninja.com',
'bsnow.net',
'bugmenot.com',
'bumpymail.com',
'casualdx.com',
'centermail.com',
'centermail.net',
'chogmail.com',
'choicemail1.com',
'cool.fr.nf',
'correo.blogos.net',
'cosmorph.com',
'courriel.fr.nf',
'courrieltemporaire.com',
'cubiclink.com',
'curryworld.de',
'cust.in',
'dacoolest.com',
'dandikmail.com',
'dayrep.com',
'deadaddress.com',
'deadspam.com',
'despam.it',
'despammed.com',
'devnullmail.com',
'dfgh.net',
'digitalsanctuary.com',
'discardmail.com',
'discardmail.de',
'Disposableemailaddresses:emailmiser.com',
'disposableaddress.com',
'disposeamail.com',
'disposemail.com',
'dispostable.com',
'dm.w3internet.co.ukexample.com',
'dodgeit.com',
'dodgit.com',
'dodgit.org',
'donemail.ru',
'dontreg.com',
'dontsendmespam.de',
'dump-email.info',
'dumpandjunk.com',
'dumpmail.de',
'dumpyemail.com',
'e4ward.com',
'email60.com',
'emaildienst.de',
'emailias.com',
'emailigo.de',
'emailinfive.com',
'emailmiser.com',
'emailsensei.com',
'emailtemporario.com.br',
'emailto.de',
'emailwarden.com',
'emailx.at.hm',
'emailxfer.com',
'emz.net',
'enterto.com',
'ephemail.net',
'etranquil.com',
'etranquil.net',
'etranquil.org',
'explodemail.com',
'fakeinbox.com',
'fakeinformation.com',
'fastacura.com',
'fastchevy.com',
'fastchrysler.com',
'fastkawasaki.com',
'fastmazda.com',
'fastmitsubishi.com',
'fastnissan.com',
'fastsubaru.com',
'fastsuzuki.com',
'fasttoyota.com',
'fastyamaha.com',
'filzmail.com',
'fizmail.com',
'fr33mail.info',
'frapmail.com',
'front14.org',
'fux0ringduh.com',
'garliclife.com',
'get1mail.com',
'get2mail.fr',
'getonemail.com',
'getonemail.net',
'ghosttexter.de',
'girlsundertheinfluence.com',
'gishpuppy.com',
'gowikibooks.com',
'gowikicampus.com',
'gowikicars.com',
'gowikifilms.com',
'gowikigames.com',
'gowikimusic.com',
'gowikinetwork.com',
'gowikitravel.com',
'gowikitv.com',
'great-host.in',
'greensloth.com',
'gsrv.co.uk',
'guerillamail.biz',
'guerillamail.de',
'guerillamail.com',
'guerillamail.net',
'guerillamail.org',
'guerrillamail.biz',
'guerrillamail.com',
'guerrillamail.de',
'guerrillamail.net',
'guerrillamail.org',
'guerrillamailblock.com',
'h.mintemail.com',
'h8s.org',
'haltospam.com',
'hatespam.org',
'hidemail.de',
'hochsitze.com',
'hotpop.com',
'hulapla.de',
'ieatspam.eu',
'ieatspam.info',
'ihateyoualot.info',
'iheartspam.org',
'imails.info',
'inboxclean.com',
'inboxclean.org',
'incognitomail.com',
'incognitomail.net',
'incognitomail.org',
'insorg-mail.info',
'ipoo.org',
'irish2me.com',
'iwi.net',
'jetable.com',
'jetable.fr.nf',
'jetable.net',
'jetable.org',
'jnxjn.com',
'junk1e.com',
'kasmail.com',
'kaspop.com',
'keepmymail.com',
'killmail.com',
'killmail.net',
'kir.ch.tc',
'klassmaster.com',
'klassmaster.net',
'klzlk.com',
'kulturbetrieb.info',
'kurzepost.de',
'letthemeatspam.com',
'lhsdv.com',
'lifebyfood.com',
'link2mail.net',
'litedrop.com',
'lol.ovpn.to',
'lookugly.com',
'lopl.co.cc',
'lortemail.dk',
'lr78.com',
'm4ilweb.info',
'maboard.com',
'mail-temporaire.fr',
'mail.by',
'mail.mezimages.net',
'mail2rss.org',
'mail333.com',
'mail4trash.com',
'mailbidon.com',
'mailblocks.com',
'mailcatch.com',
'maileater.com',
'mailexpire.com',
'mailfreeonline.com',
'mailin8r.com',
'mailinater.com',
'mailinator.com',
'mailinator.net',
'mailinator2.com',
'mailincubator.com',
'mailme.ir',
'mailme.lv',
'mailmetrash.com',
'mailmoat.com',
'mailnator.com',
'mailnesia.com',
'mailnull.com',
'mailshell.com',
'mailsiphon.com',
'mailslite.com',
'mailzilla.com',
'mailzilla.org',
'mbx.cc',
'mega.zik.dj',
'meinspamschutz.de',
'meltmail.com',
'messagebeamer.de',
'mierdamail.com',
'mintemail.com',
'moburl.com',
'moncourrier.fr.nf',
'monemail.fr.nf',
'monmail.fr.nf',
'msa.minsmail.com',
'mt2009.com',
'mx0.wwwnew.eu',
'mycleaninbox.net',
'mypartyclip.de',
'myphantomemail.com',
'myspaceinc.com',
'myspaceinc.net',
'myspaceinc.org',
'myspacepimpedup.com',
'myspamless.com',
'mytrashmail.com',
'neomailbox.com',
'nepwk.com',
'nervmich.net',
'nervtmich.net',
'netmails.com',
'netmails.net',
'netzidiot.de',
'neverbox.com',
'no-spam.ws',
'nobulk.com',
'noclickemail.com',
'nogmailspam.info',
'nomail.xl.cx',
'nomail2me.com',
'nomorespamemails.com',
'nospam.ze.tc',
'nospam4.us',
'nospamfor.us',
'nospamthanks.info',
'notmailinator.com',
'nowmymail.com',
'nurfuerspam.de',
'nus.edu.sg',
'nwldx.com',
'objectmail.com',
'obobbo.com',
'oneoffemail.com',
'onewaymail.com',
'online.ms',
'oopi.org',
'ordinaryamerican.net',
'otherinbox.com',
'ourklips.com',
'outlawspam.com',
'ovpn.to',
'owlpic.com',
'pancakemail.com',
'pimpedupmyspace.com',
'pjjkp.com',
'politikerclub.de',
'poofy.org',
'pookmail.com',
'privacy.net',
'proxymail.eu',
'prtnx.com',
'punkass.com',
'PutThisInYourSpamDatabase.com',
'qq.com',
'quickinbox.com',
'rcpt.at',
'recode.me',
'recursor.net',
'regbypass.com',
'regbypass.comsafe-mail.net',
'rejectmail.com',
'rklips.com',
'rmqkr.net',
'rppkn.com',
'rtrtr.com',
's0ny.net',
'safe-mail.net',
'safersignup.de',
'safetymail.info',
'safetypost.de',
'sandelf.de',
'saynotospams.com',
'selfdestructingmail.com',
'SendSpamHere.com',
'sharklasers.com',
'shiftmail.com',
'shitmail.me',
'shortmail.net',
'sibmail.com',
'skeefmail.com',
'slaskpost.se',
'slopsbox.com',
'smellfear.com',
'snakemail.com',
'sneakemail.com',
'sofimail.com',
'sofort-mail.de',
'sogetthis.com',
'soodonims.com',
'spam.la',
'spam.su',
'spamavert.com',
'spambob.com',
'spambob.net',
'spambob.org',
'spambog.com',
'spambog.de',
'spambog.ru',
'spambox.info',
'spambox.irishspringrealty.com',
'spambox.us',
'spamcannon.com',
'spamcannon.net',
'spamcero.com',
'spamcon.org',
'spamcorptastic.com',
'spamcowboy.com',
'spamcowboy.net',
'spamcowboy.org',
'spamday.com',
'spamex.com',
'spamfree24.com',
'spamfree24.de',
'spamfree24.eu',
'spamfree24.info',
'spamfree24.net',
'spamfree24.org',
'spamgourmet.com',
'spamgourmet.net',
'spamgourmet.org',
'SpamHereLots.com',
'SpamHerePlease.com',
'spamhole.com',
'spamify.com',
'spaminator.de',
'spamkill.info',
'spaml.com',
'spaml.de',
'spammotel.com',
'spamobox.com',
'spamoff.de',
'spamslicer.com',
'spamspot.com',
'spamthis.co.uk',
'spamthisplease.com',
'spamtrail.com',
'speed.1s.fr',
'supergreatmail.com',
'supermailer.jp',
'suremail.info',
'teewars.org',
'teleworm.com',
'tempalias.com',
'tempe-mail.com',
'tempemail.biz',
'tempemail.com',
'TempEMail.net',
'tempinbox.co.uk',
'tempinbox.com',
'tempmail.it',
'tempmail2.com',
'tempomail.fr',
'temporarily.de',
'temporarioemail.com.br',
'temporaryemail.net',
'temporaryforwarding.com',
'temporaryinbox.com',
'thanksnospam.info',
'thankyou2010.com',
'thisisnotmyrealemail.com',
'throwawayemailaddress.com',
'tilien.com',
'tmailinator.com',
'tradermail.info',
'trash-amil.com',
'trash-mail.at',
'trash-mail.com',
'trash-mail.de',
'trash2009.com',
'trashemail.de',
'trashmail.at',
'trashmail.com',
'trashmail.de',
'trashmail.me',
'trashmail.net',
'trashmail.org',
'trashmail.ws',
'trashmailer.com',
'trashymail.com',
'trashymail.net',
'trillianpro.com',
'turual.com',
'twinmail.de',
'tyldd.com',
'uggsrock.com',
'upliftnow.com',
'uplipht.com',
'venompen.com',
'veryrealemail.com',
'viditag.com',
'viewcastmedia.com',
'viewcastmedia.net',
'viewcastmedia.org',
'webm4il.info',
'wegwerfadresse.de',
'wegwerfemail.de',
'wegwerfmail.de',
'wegwerfmail.net',
'wegwerfmail.org',
'wetrainbayarea.com',
'wetrainbayarea.org',
'wh4f.org',
'whyspam.me',
'willselfdestruct.com',
'winemaven.info',
'wronghead.com',
'wuzup.net',
'wuzupmail.net',
'www.e4ward.com',
'www.gishpuppy.com',
'www.mailinator.com',
'wwwnew.eu',
'xagloo.com',
'xemaps.com',
'xents.com',
'xmaily.com',
'xoxy.net',
'yep.it',
'yogamaven.com',
'yopmail.com',
'yopmail.fr',
'yopmail.net',
'ypmail.webarnak.fr.eu.org',
'yuurok.com',
'zehnminutenmail.de',
'zippymail.info',
'zoaxe.com',
'zoemail.org',
# manual additions:
'mziqo.com',
'lakqs.com',
'london2.space',
'mail2.space,'
'vpslists.com',
'o3enzyme.com',
'trimsj.com'
]
|
import tables
import nptdms
import numpy as np
import os
from ecogdata.util import mkdir_p, Bunch
import ecogdata.filt.time as ft
import ecogdata.devices.electrode_pinouts as epins
import ecogdata.parallel.sharedmem as shm
from ecogdata.parallel.split_methods import filtfilt
from ..units import convert_dyn_range, convert_scale
# different ranges code for dynamic range of charge
range_lookup = {0: 0.13,
1: 0.25,
2: 0.5,
3: 1.2,
4: 2.4,
5: 4.8,
6: 7.2,
7: 9.6}
def prepare_afe_primary_file(tdms_file, write_path=None):
tdms_path, tdms_name = os.path.split(tdms_file)
tdms_name = os.path.splitext(tdms_name)[0]
if write_path is None:
write_path = tdms_path
if not os.path.exists(write_path):
mkdir_p(write_path)
new_primary_file = os.path.join(write_path, tdms_name + '.h5')
with tables.open_file(new_primary_file, 'w') as hdf:
tdms = nptdms.TdmsFile(tdms_file)
# Translate metadata
t_group = tdms.groups()[0]
group = tdms.object(t_group)
rename_arrays = dict(
SamplingRate='sampRate', nrRows='numRow', nrColumns='numCol',
OverSampling='OSR'
)
h5_info = hdf.create_group('/', 'info')
for (key, val) in group.properties.items():
if isinstance(val, str):
# pytables doesn't support strings as arrays
arr = hdf.create_vlarray(h5_info, key, atom=tables.ObjectAtom())
arr.append(val)
else:
hdf.create_array(h5_info, key, obj=val)
if key in rename_arrays:
hdf.create_array('/', rename_arrays[key], obj=val)
# Parse channels
chans = tdms.group_channels(t_group)
elec_chans = [c for c in chans if 'CH_' in c.channel]
bnc_chans = [c for c in chans if 'BNC_' in c.channel]
# For now, files only have 1 row and all channels relate to that row
num_per_electrode_row = len(elec_chans)
# do extra extra conversions
fs = float(group.properties['SamplingRate']) / (num_per_electrode_row * group.properties['OverSampling'])
hdf.create_array(hdf.root, 'Fs', fs)
# ensure that channels are ordered correctly
elec_mapping = dict([(ch.properties['NI_ArrayColumn'], ch) for ch in elec_chans])
# This value is currently always 1 -- but leave this factor in for future flexibility
num_electrode_rows = len(elec_chans) // num_per_electrode_row
if num_per_electrode_row * num_electrode_rows < len(elec_chans):
print('There were excess TDMS channels: {}'.format(len(elec_chans)))
channels_per_row = 32
sampling_offset = 6
hdf_array = hdf.create_carray('/', 'data', atom=tables.Float64Atom(),
shape=(channels_per_row * num_electrode_rows, len(elec_chans[0].data)))
for elec_group in range(num_electrode_rows):
for c in range(channels_per_row):
chan_a = elec_mapping[2 * c + sampling_offset]
chan_b = elec_mapping[2 * c + 1 + sampling_offset]
hdf_array[elec_group * channels_per_row + c, :] = 0.5 * (chan_a.data + chan_b.data)
sampling_offset += num_per_electrode_row
bnc_array = hdf.create_carray('/', 'bnc', atom=tables.Float64Atom(),
shape=(len(bnc_chans), len(bnc_chans[0].data)))
bnc_mapping = dict([(ch.properties['NI_ArrayColumn'] - len(elec_chans), ch) for ch in bnc_chans])
for n in range(len(bnc_mapping)):
bnc_array[n, :] = bnc_mapping[n].data
return
# ---- Old code ----
# valid units: (micro)Volts, (pico)Coulombs, (nano)Amps
# encoding: 'uv', 'v', 'pc', 'c', 'na', 'a'
def load_afe_aug21(
exp_pth, test, electrode, n_data, range_code, cycle_rate, units='nA',
bandpass=(), save=False, notches=(), snip_transient=True, **extra
):
h5 = tables.open_file(os.path.join(exp_pth, test+'.h5'))
Fs = h5.root.Fs.read()
n_row = h5.root.numRow.read()
n_data_col = h5.root.numCol.read()
n_col = h5.root.numChan.read()
# data rows are 4:67 -- acquiring AFE chans 31:0 and 63:32 on two columns
data_rows = slice(4, 67, 2)
full_data = h5.root.data[:].reshape(n_col, n_row, -1)
#data_chans = shm.shared_ndarray( (32*n_data_col, full_data.shape[-1]) )
data_chans = full_data[:n_data_col, data_rows].reshape(-1, full_data.shape[-1])
trig_chans = full_data[-10:, -1]
del full_data
trig = np.any( trig_chans > 1, axis = 0 ).astype('i')
pos_edge = np.where( np.diff(trig) > 0 )[0] + 1
# convert dynamic range to charge or current
if 'v' not in units.lower():
pico_coulombs = range_lookup[range_code]
convert_dyn_range(
data_chans, (-1.4, 1.4), pico_coulombs, out=data_chans
)
if 'a' in units.lower():
i_period = 563e-6
data_chans /= i_period
convert_scale(data_chans, 'pa', units)
elif units.lower() != 'v':
convert_scale(data_chans, 'v', units)
## # only use this one electrode (for now)
chan_map, disconnected = epins.get_electrode_map('psv_61_afe')[:2]
connected = np.setdiff1d(np.arange(n_data), disconnected)
disconnected = disconnected[ disconnected < n_data ]
data = shm.shared_ndarray( (len(connected), data_chans.shape[-1]) )
data[:, :] = data_chans[connected]
ground_chans = data_chans[disconnected]
del data_chans
# do a little extra to kill DC
data -= data.mean(axis=1)[:,None]
if bandpass:
(b, a) = ft.butter_bp(lo=bandpass[0], hi=bandpass[1], Fs=Fs)
filtfilt(data, b, a)
if notches:
ft.notch_all(
data, Fs, lines=notches, inplace=True, filtfilt=True
)
if snip_transient:
snip_len = min(10000, pos_edge[0]) if len(pos_edge) else 10000
data = data[..., snip_len:].copy()
if ground_chans is not None:
ground_chans = ground_chans[..., snip_len:].copy()
if len(pos_edge):
trig = trig[..., snip_len:]
pos_edge -= snip_len
dset = ut.Bunch()
dset.data = data
dset.ground_chans = ground_chans
dset.chan_map = chan_map
dset.Fs = Fs
dset.pos_edge = pos_edge
dset.bandpass = bandpass
dset.trig = trig
dset.transient_snipped = snip_transient
dset.units = units
dset.notches = notches
return dset
def load_afe(
exp_pth, test, electrode, n_data, range_code, cycle_rate,
units='nA', bandpass=(), save=True, notches=(),
snip_transient=True, **extra
):
h5 = tables.open_file(os.path.join(exp_pth, test+'.h5'))
data = h5.root.data[:]
Fs = h5.root.Fs[0,0]
if data.shape[1] > n_data:
trig_chans = data[:,n_data:]
trig = np.any( trig_chans > 1, axis = 1 ).astype('i')
pos_edge = np.where( np.diff(trig) > 0 )[0] + 1
else:
trig = None
pos_edge = ()
data_chans = data[:,:n_data].T.copy(order='C')
# convert dynamic range to charge or current
if 'v' not in units.lower():
pico_coulombs = range_lookup[range_code]
convert_dyn_range(
data_chans, (-1.4, 1.4), pico_coulombs, out=data_chans
)
if 'a' in units.lower():
# To convert to amps, need to divide coulombs by the
# integration period. This is found approximately by
# finding out how many cycles in a scan period were spent
# integrating. A scan period is now hard coded to be 500
# cycles. The cycling rate is given as an argument.
# The integration period for channel i should be:
# 500 - 2*(n_data - i)
# That is, the 1st channel is clocked out over two cycles
# immediately after the integration period. Meanwhile other
# channels still acquire until they are clocked out.
n_cycles = 500
#i_cycles = n_cycles - 2*(n_data - np.arange(n_data))
i_cycles = n_cycles - 2*n_data
i_period = i_cycles / cycle_rate
data_chans /= i_period #[:,None]
convert_scale(data_chans, 'pa', units)
elif units.lower() != 'v':
convert_scale(data, 'v', units)
# only use this one electrode (for now)
chan_map, disconnected = epins.get_electrode_map('psv_61_afe')[:2]
connected = np.setdiff1d(np.arange(n_data), disconnected)
disconnected = disconnected[ disconnected < n_data ]
chan_map = chan_map.subset(list(range(len(connected))))
data = shm.shared_ndarray( (len(connected), data_chans.shape[-1]) )
data[:,:] = data_chans[connected]
ground_chans = data_chans[disconnected].copy()
del data_chans
if bandpass:
# do a little extra to kill DC
data -= data.mean(axis=1)[:,None]
(b, a) = ft.butter_bp(lo=bandpass[0], hi=bandpass[1], Fs=Fs)
filtfilt(data, b, a)
if notches:
for freq in notches:
(b, a) = ft.notch(freq, Fs=Fs, ftype='cheby2')
filtfilt(data, b, a)
## detrend_window = int(round(0.750*Fs))
## ft.bdetrend(data, bsize=detrend_window, type='linear', axis=-1)
else:
data -= data.mean(axis=1)[:,None]
if snip_transient:
snip_len = min(10000, pos_edge[0]) if len(pos_edge) else 10000
data = data[..., snip_len:].copy()
ground_chans = ground_chans[..., snip_len:].copy()
if len(pos_edge):
trig = trig[..., snip_len:]
pos_edge -= snip_len
dset = Bunch()
dset.data = data
dset.ground_chans = ground_chans
dset.chan_map = chan_map
dset.Fs = Fs
dset.pos_edge = pos_edge
dset.bandpass = bandpass
dset.trig = trig
dset.transient_snipped = snip_transient
dset.units = units
dset.notches = notches
return dset
|
import pytest
from ospweb.users.models import User
from ospweb.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
|
import json
import os, subprocess, time, signal
import gym
from gym import error, spaces
from gym import utils
from gym.utils import seeding
from pyrosetta.teaching import *
from pyrosetta import init, pose_from_pdb, pose_from_sequence
from pyrosetta.toolbox import cleanATOM
from pyrosetta.rosetta.core.id import AtomID
from enum import Enum
from pyrosetta.rosetta.core.scoring import CA_rmsd
import numpy as np
from pyrosetta.toolbox import cleanATOM
ANGLE_MOVE = [-90, -45, -10, -1, 1, 10, 45, 90]
import logging
logger = logging.getLogger(__name__)
RESIDUE_LETTERS = [
'R', 'H', 'K',
'D', 'E',
'S', 'T', 'N', 'Q',
'C', 'G', 'P',
'A', 'V', 'I', 'L', 'M', 'F', 'Y', 'W', 'empty'
]
MAX_LENGTH = 64
class ProteinFoldEnv(gym.Env, utils.EzPickle):
def __init__(self, max_move_amount=1000):
super(ProteinFoldEnv, self).__init__()
self.reward = 0.0
self.prev_ca_rmsd = None
self.protein_pose = None
self._configure_environment()
self.is_finished = False
self.move_counter = 0
self.max_move_amount = max_move_amount
self.name = None
# self.observation_space = spaces.Box(low=-180, high=180, shape=(MAX_LENGTH, 3))
self.observation_space = spaces.Dict({
"energy": spaces.Box(low=np.array([-np.inf]), high=np.array([np.inf]), dtype=np.float32),
"backbone": spaces.Box(low=-180, high=180, shape=(MAX_LENGTH, 3)),
"amino_acids": spaces.Box(low=0, high=1, shape=(MAX_LENGTH, len(RESIDUE_LETTERS))),
"backbone_current": spaces.Box(low=-180, high=180, shape=(MAX_LENGTH, 3)),
})
self.action_space = spaces.MultiDiscrete([64, 3, 8])
# self.action_space = spaces.Dict({
# "angles": spaces.Box(low=-1, high=1, shape=(3,)),
# "torsion": spaces.Discrete(3),
# "residue": spaces.Discrete(MAX_LENGTH)
# })
self.encoded_residue_sequence = None
self.scorefxn = get_fa_scorefxn()
self.best_distance = np.inf
self.validation = False
self.best_conformations_dict = {}
self.start_distance = 1000
self.residue_mask = None
self.shuffle = False
self.achieved_goal_counter = 0
def save_best_matches(self):
with open('data.json', 'w') as fp:
json.dump(self.best_conformations_dict, fp)
def _configure_environment(self):
"""
Provides a chance for subclasses to override this method and supply
a different server configuration. By default, we initialize one
offense agent against no defenders.
"""
self._start_rossetta()
def _start_rossetta(self):
init()
def _get_ca_metric(self, coordinates, target):
return CA_rmsd(coordinates, target)
def _move_on_torsion(self, residue_number, move_pose, get_angle, set_angle):
new_torsion_position = get_angle(residue_number) + move_pose
set_angle(residue_number, new_torsion_position)
def _move(self, action):
if action is not None:
self.move_counter += 1
residue_number = action[0] + 1
torsion_number = action[1]
move_pose_index = action[2]
move_pose = ANGLE_MOVE[move_pose_index]
if residue_number < self.protein_pose.total_residue():
if torsion_number == 0:
self._move_on_torsion(residue_number, move_pose, self.protein_pose.phi, self.protein_pose.set_phi)
if torsion_number == 1:
self._move_on_torsion(residue_number, move_pose, self.protein_pose.omega, self.protein_pose.set_omega)
if torsion_number == 2:
self._move_on_torsion(residue_number, move_pose, self.protein_pose.psi, self.protein_pose.set_psi)
return 0.0
else:
return -0.5
return 0.0
def _encode_residues(self, sequence):
encoded_residues = []
for res in sequence:
temp = np.zeros(len(RESIDUE_LETTERS))
temp[RESIDUE_LETTERS.index(res)] = 1
encoded_residues.append(temp)
for zero in range(MAX_LENGTH - len(sequence)):
temp = np.zeros(len(RESIDUE_LETTERS))
temp[len(RESIDUE_LETTERS) - 1] = 1
encoded_residues.append(temp)
return encoded_residues
def create_residue_mask(self, sequence):
residues_mask = np.ones(len(sequence) - 2 ) * 1
zero = np.zeros(MAX_LENGTH - len(sequence) + 1)
return np.concatenate(([0], residues_mask, zero),)
def write_best_conformation(self, distance):
if self.name not in self.best_conformations_dict.keys():
self.best_conformations_dict[self.name] = [distance]
else:
self.best_conformations_dict[self.name].append(distance)
def step(self, action):
penalty = self._move(action)
ob = self._get_state()
done = False
reward = penalty
distance = self._get_ca_metric(self.protein_pose, self.target_protein_pose)
if self.prev_ca_rmsd:
reward += self.prev_ca_rmsd - distance
if self.best_distance > distance:
# if distance < 3.0:
# reward += 1
self.best_distance = distance
print("best distance: {} {}".format(self.best_distance, self.name))
self.prev_ca_rmsd = distance
if distance < self.start_distance * 0.1:
self.achieved_goal_counter += 1
reward += 5
if self.achieved_goal_counter > 25:
done = True
if self.move_counter >= 256:
reward -= 10
done = True
return [ob, reward, done, {}]
def reset(self):
if self.start_distance > self.best_distance:
self.write_best_conformation(self.best_distance)
if self.validation:
self.name = np.random.choice(os.listdir('protein_data/short_valid'))
dir = 'protein_data/short_valid'
else:
self.name = np.random.choice(os.listdir('protein_data/short'))
dir = 'protein_data/short'
print('chosen protein: ' + self.name)
self.target_protein_pose = pose_from_pdb(os.path.join(dir, self.name))
if not self.shuffle:
self.protein_pose = pose_from_sequence(self.target_protein_pose.sequence())
# else :
self.move_counter = 0
self.reward = 0.0
self.prev_ca_rmsd = None
self.achieved_goal_counter = 0
self.best_distance = self._get_ca_metric(self.protein_pose, self.target_protein_pose)
self.start_distance = self._get_ca_metric(self.protein_pose, self.target_protein_pose)
self.encoded_residue_sequence = self._encode_residues(self.target_protein_pose.sequence())
self.residue_mask = self.create_residue_mask(self.target_protein_pose.sequence())
self.save_best_matches()
return self._get_state()
def difference_energy(self):
target_score = self.scorefxn(self.target_protein_pose)
return (self.scorefxn(self.protein_pose) - target_score ) / target_score
def _get_state(self):
psis = np.divide([self.target_protein_pose.psi(i + 1) - self.protein_pose.psi(i + 1) for i in range(self.protein_pose.total_residue())], 180.0)
omegas = np.divide([self.target_protein_pose.omega(i + 1) - self.protein_pose.omega(i + 1) for i in range(self.protein_pose.total_residue())], 180.0)
phis = np.divide([self.target_protein_pose.phi(i + 1) - self.protein_pose.phi(i + 1) for i in range(self.protein_pose.total_residue())], 180)
psis_current = np.divide([self.protein_pose.psi(i + 1) for i in
range(self.protein_pose.total_residue())], 180.0)
omegas_current = np.divide([self.protein_pose.omega(i + 1) for i in
range(self.protein_pose.total_residue())], 180.0)
phis_current = np.divide([self.protein_pose.phi(i + 1) for i in
range(self.protein_pose.total_residue())], 180)
rest_zeros = MAX_LENGTH - len(self.target_protein_pose.sequence())
psis = np.concatenate((psis, np.zeros(rest_zeros)))
omegas = np.concatenate((omegas, np.zeros(rest_zeros)))
phis = np.concatenate((phis, np.zeros(rest_zeros)))
psis_current = np.concatenate((psis_current, np.zeros(rest_zeros)))
omegas_current = np.concatenate((omegas_current, np.zeros(rest_zeros)))
phis_current = np.concatenate((phis_current, np.zeros(rest_zeros)))
backbone_geometry = [list(a) for a in zip(psis, omegas, phis)]
backbone_geometry_current = [list(a) for a in zip(psis_current, omegas_current, phis_current)]
a = self.difference_energy()
# return backbone_geometry
return {
"backbone": backbone_geometry,
"amino_acids": self.encoded_residue_sequence,
"energy": [self.difference_energy()],
"backbone_current": backbone_geometry_current,
}
def set_validation_mode(self, is_valid):
self.validation = is_valid
def seed(self, seed=None):
np.random.seed(seed)
NUM_ACTIONS = 3
END_GAME, ROTATE,TURN, = list(range(NUM_ACTIONS))
|
# -*- coding: utf-8 -*-
import tensorflow as tf
# 嵌入矩阵的维度
EMBED_DIM = 32
USER_ID_COUNT = 6041
GENDER_COUNT = 2
AGE_COUNT = 7
JOB_COUNT = 21
MOVIE_ID_COUNT = 3953
MOVIE_GENRES_COUNT = 18
MOVIE_TITLE_WORDS_COUNT = 5217
BATCH_SIZE = 256
LSTM_UNIT_NUM = 128
# 用户特征网络核心代码
def user_feature_network(user_id, user_gender, user_age, user_job, dropout_keep_prob, train):
with tf.variable_scope('user_id_embed'):
user_id_embed_matrix = tf.get_variable('id_embed_matrix', [USER_ID_COUNT, EMBED_DIM],
initializer=tf.truncated_normal_initializer(stddev=0.1))
user_embed_layer = tf.nn.embedding_lookup(user_id_embed_matrix, user_id, name='id_lookup')
with tf.variable_scope('user_gender_embed'):
gender_embed_matrix = tf.get_variable('gender_embed_matrix', [GENDER_COUNT, EMBED_DIM // 2],
initializer=tf.truncated_normal_initializer(stddev=0.1))
gender_embed_layer = tf.nn.embedding_lookup(gender_embed_matrix, user_gender, name='gender_lookup')
with tf.variable_scope('user_age_embed'):
age_embed_matrix = tf.get_variable('age_embed_matrix', [AGE_COUNT, EMBED_DIM // 2],
initializer=tf.truncated_normal_initializer(stddev=0.1))
age_embed_layer = tf.nn.embedding_lookup(age_embed_matrix, user_age, name='age_lookup')
with tf.variable_scope('user_job_embed'):
job_embed_matrix = tf.get_variable('job_embed_matrix', [JOB_COUNT, EMBED_DIM // 2],
initializer=tf.truncated_normal_initializer(stddev=0.1))
job_embed_layer = tf.nn.embedding_lookup(job_embed_matrix, user_job, name='job_lookup')
# user_conv1 = tf.layers.conv1d(user_embed_layer, EMBED_DIM * 2, kernel_size=3, strides=1, padding='same', data_format='channels_first')
# user_bn1 = tf.layers.batch_normalization(user_conv1, training=train)
# user_relu1 = tf.nn.relu(user_bn1)
# user_conv2 = tf.layers.conv1d(user_relu1, EMBED_DIM, kernel_size=3, strides=1, padding='same', data_format='channels_first')
# user_bn2 = tf.layers.batch_normalization(user_conv2, training=train)
# user_relu2 = tf.nn.relu(user_bn2)
user_id_fc_layer = tf.layers.dense(user_embed_layer, EMBED_DIM,
activation=tf.nn.relu,
kernel_regularizer=tf.nn.l2_loss,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
name='user_id_fc')
user_id_fc_dropout_layer = tf.layers.dropout(user_id_fc_layer, dropout_keep_prob, name='user_id_dropout')
# gender_conv1 = tf.layers.conv1d(gender_embed_layer, EMBED_DIM * 2, kernel_size=3, strides=1, padding='same',
# data_format='channels_first')
# gender_bn1 = tf.layers.batch_normalization(gender_conv1, training=train)
# gender_relu1 = tf.nn.relu(gender_bn1)
# gender_conv2 = tf.layers.conv1d(gender_relu1, EMBED_DIM, kernel_size=3, strides=1, padding='same',
# data_format='channels_first')
# gender_bn2 = tf.layers.batch_normalization(gender_conv2, training=train)
# gender_relu2 = tf.nn.relu(gender_bn2)
gender_fc_layer = tf.layers.dense(gender_embed_layer, EMBED_DIM,
activation=tf.nn.relu,
kernel_regularizer=tf.nn.l2_loss,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
name='user_gender_fc')
gender_fc_dropout_layer = tf.layers.dropout(gender_fc_layer, dropout_keep_prob, name='user_gender_dropout')
# age_conv1 = tf.layers.conv1d(age_embed_layer, EMBED_DIM * 2, kernel_size=3, strides=1, padding='same',
# data_format='channels_first')
# age_bn1 = tf.layers.batch_normalization(age_conv1, training=train)
# age_relu1 = tf.nn.relu(age_bn1)
# age_conv2 = tf.layers.conv1d(age_relu1, EMBED_DIM, kernel_size=3, strides=1, padding='same',
# data_format='channels_first')
# age_bn2 = tf.layers.batch_normalization(age_conv2, training=train)
# age_relu2 = tf.nn.relu(age_bn2)
age_fc_layer = tf.layers.dense(age_embed_layer, EMBED_DIM,
activation=tf.nn.relu,
kernel_regularizer=tf.nn.l2_loss,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
name='user_age_fc')
age_fc_dropout_layer = tf.layers.dropout(age_fc_layer, dropout_keep_prob, name='user_age_dropout')
# job_conv1 = tf.layers.conv1d(job_embed_layer, EMBED_DIM * 2, kernel_size=3, strides=1, padding='same',
# data_format='channels_first')
# job_bn1 = tf.layers.batch_normalization(job_conv1, training=train)
# job_relu1 = tf.nn.relu(job_bn1)
# job_conv2 = tf.layers.conv1d(job_relu1, EMBED_DIM, kernel_size=3, strides=1, padding='same',
# data_format='channels_first')
# job_bn2 = tf.layers.batch_normalization(job_conv2, training=train)
# job_relu2 = tf.nn.relu(job_bn2)
job_fc_layer = tf.layers.dense(job_embed_layer, EMBED_DIM,
activation=tf.nn.relu,
kernel_regularizer=tf.nn.l2_loss,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
name='user_job_fc')
job_fc_dropout_layer = tf.layers.dropout(job_fc_layer, dropout_keep_prob, name='user_job_dropout')
with tf.name_scope('user_fc'):
user_combine_feature = tf.concat(
[user_id_fc_dropout_layer, gender_fc_dropout_layer, age_fc_dropout_layer, job_fc_dropout_layer], 2)
# user_combine_feature = tf.transpose(user_combine_feature, perm=[0, 2, 1])
user_combine_conv1 = tf.layers.conv1d(user_combine_feature, EMBED_DIM * 4, kernel_size=3, strides=1, padding='same',
data_format='channels_first')
user_combine_bn1 = tf.layers.batch_normalization(user_combine_conv1, training=train)
user_combine_relu1 = tf.nn.relu(user_combine_bn1)
user_combine_conv2 = tf.layers.conv1d(user_combine_relu1, 1, kernel_size=3, strides=1, padding='same',
data_format='channels_first')
user_combine_bn2 = tf.layers.batch_normalization(user_combine_conv2, training=train)
user_combine_relu2 = tf.nn.relu(user_combine_bn2)
user_combine_fc_layer = tf.layers.dense(user_combine_relu2, 200,
activation=tf.nn.relu,
kernel_regularizer=tf.nn.l2_loss,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
name='user_fc')
user_combine_layer_flat = tf.reshape(user_combine_fc_layer, [-1, 200])
return user_combine_layer_flat
# 电影特征网络核心代码
def movie_feature_embed_network(movie_id, movie_genres):
with tf.variable_scope('movie_id_embed'):
movie_id_embed_matrix = tf.get_variable('id_embed_matrix', [MOVIE_ID_COUNT, EMBED_DIM],
initializer=tf.truncated_normal_initializer(stddev=0.1))
movie_id_embed_layer = tf.nn.embedding_lookup(movie_id_embed_matrix, movie_id, name='id_lookup')
with tf.name_scope('genres_embed'):
movie_genres_embed_matrix = tf.Variable(tf.random_uniform([MOVIE_GENRES_COUNT, EMBED_DIM], -1, 1),
name='genres_embed_matrix')
movie_genres_embed_layer = tf.matmul(movie_genres, movie_genres_embed_matrix)
movie_genres_embed_layer = tf.expand_dims(movie_genres_embed_layer, 1)
return movie_id_embed_layer, movie_genres_embed_layer
def movie_title_lstm_layer(movie_titles, movie_title_length, dropout_keep_prob):
with tf.variable_scope('movie_title_embed'):
movie_title_embed_matrix = tf.get_variable('title_embed_matrix', [MOVIE_TITLE_WORDS_COUNT, EMBED_DIM],
initializer=tf.truncated_normal_initializer(stddev=0.1))
movie_title_embed_layer = tf.nn.embedding_lookup(movie_title_embed_matrix, movie_titles,
name='title_lookup')
lstm_cell = tf.contrib.rnn.BasicLSTMCell(LSTM_UNIT_NUM, forget_bias=0.0)
with tf.name_scope("movie_title_dropout"):
lstm_cell_dropout = tf.contrib.rnn.DropoutWrapper(lstm_cell, output_keep_prob=dropout_keep_prob)
# 根据输入动态决定对应的batch_size大小
batch_size_ = tf.shape(movie_titles)[0]
init_state = lstm_cell_dropout.zero_state(batch_size_, dtype=tf.float32)
# 步长根据标题长度动态变化,dynamic_rnn会将填充长度输出置为0
lstm_output, final_state = tf.nn.dynamic_rnn(lstm_cell_dropout,
movie_title_embed_layer,
sequence_length=movie_title_length,
initial_state=init_state,
scope='movie_title_rnn')
# 根据标题长度计算平均值,除数是标题的真实长度
with tf.name_scope('movie_title_avg_pool'):
lstm_output = tf.reduce_sum(lstm_output, 1) / movie_title_length[:, None]
return lstm_output
def movie_feature_network(movie_id, movie_genres, movie_titles, movie_title_length, dropout_keep_prob, train):
movie_id_embed_layer, movie_genres_embed_layer = movie_feature_embed_network(movie_id, movie_genres)
# movie_id_conv1 = tf.layers.conv1d(movie_id_embed_layer, EMBED_DIM * 2, kernel_size=3, strides=1, padding='same',
# data_format='channels_first')
# movie_id_bn1 = tf.layers.batch_normalization(movie_id_conv1, training=train)
# movie_id_relu1 = tf.nn.relu(movie_id_bn1)
# movie_id_conv2 = tf.layers.conv1d(movie_id_relu1, EMBED_DIM, kernel_size=3, strides=1, padding='same',
# data_format='channels_first')
# movie_id_bn2 = tf.layers.batch_normalization(movie_id_conv2, training=train)
# movie_id_relu2 = tf.nn.relu(movie_id_bn2)
movie_id_fc_layer = tf.layers.dense(movie_id_embed_layer, EMBED_DIM,
activation=tf.nn.relu,
kernel_regularizer=tf.nn.l2_loss,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
name='movie_id_fc')
movie_id_dropout_layer = tf.layers.dropout(movie_id_fc_layer, dropout_keep_prob, name='movie_id_dropout')
# movie_genres_conv1 = tf.layers.conv1d(movie_genres_embed_layer, EMBED_DIM * 2, kernel_size=3, strides=1, padding='same',
# data_format='channels_first')
# movie_genres_bn1 = tf.layers.batch_normalization(movie_genres_conv1, training=train)
# movie_genres_relu1 = tf.nn.relu(movie_genres_bn1)
# movie_genres_conv2 = tf.layers.conv1d(movie_genres_relu1, EMBED_DIM, kernel_size=3, strides=1, padding='same',
# data_format='channels_first')
# movie_genres_bn2 = tf.layers.batch_normalization(movie_genres_conv2, training=train)
# movie_genres_relu2 = tf.nn.relu(movie_genres_bn2)
movie_genres_fc_layer = tf.layers.dense(movie_genres_embed_layer, EMBED_DIM,
activation=tf.nn.relu,
kernel_regularizer=tf.nn.l2_loss,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
name='movie_genres_fc')
movie_genres_dropout_layer = tf.layers.dropout(movie_genres_fc_layer, dropout_keep_prob,
name='movie_genres_dropout')
# 获取电影名的特征向量
movie_title_output_layer = movie_title_lstm_layer(movie_titles, movie_title_length, dropout_keep_prob)
movie_title_output_layer = tf.expand_dims(movie_title_output_layer, 1)
with tf.name_scope('movie_fc_layer'):
# movie_id_dropout_layer = tf.reduce_sum(movie_id_dropout_layer, 1)
movie_combine_feature = tf.concat(
[movie_id_dropout_layer, movie_genres_dropout_layer, movie_title_output_layer], 2)
movie_combine_conv1 = tf.layers.conv1d(movie_combine_feature, EMBED_DIM * 4, kernel_size=3, strides=1,
padding='same',
data_format='channels_first')
movie_combine_bn1 = tf.layers.batch_normalization(movie_combine_conv1, training=train)
movie_combine_relu1 = tf.nn.relu(movie_combine_bn1)
movie_combine_conv2 = tf.layers.conv1d(movie_combine_relu1, 1, kernel_size=3, strides=1, padding='same',
data_format='channels_first')
movie_combine_bn2 = tf.layers.batch_normalization(movie_combine_conv2, training=train)
movie_combine_relu2 = tf.nn.relu(movie_combine_bn2)
movie_combine_layer = tf.layers.dense(movie_combine_relu2, 200,
activation=tf.nn.relu,
kernel_regularizer=tf.nn.l2_loss,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
name='movie_fc_layer')
movie_combine_layer = tf.reshape(movie_combine_layer, [-1, 200])
return movie_combine_layer
# 损失层核心代码
def full_network(uid, user_gender, user_age, user_job, movie_id, movie_genres, movie_titles, movie_title_length,
dropout_keep_prob, train):
# 得到用户特征
user_combine_layer_flat = user_feature_network(uid, user_gender, user_age, user_job, dropout_keep_prob, train)
# 获取电影特征
movie_combine_layer = movie_feature_network(movie_id, movie_genres, movie_titles, movie_title_length,
dropout_keep_prob, train)
# 将用户特征和电影特征作为输入,经过全连接,输出一个值
with tf.name_scope('user_movie_fc'):
input_layer = tf.concat([user_combine_layer_flat, movie_combine_layer], 1) # (?, 200)
predicted = tf.layers.dense(input_layer, 1,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.nn.l2_loss,
name='user_movie_fc')
return user_combine_layer_flat, movie_combine_layer, predicted
def get_inputX(uid, user_gender, user_age, user_job, movie_id, movie_genres, movie_titles, movie_title_length,
dropout_keep_prob):
# 得到用户特征
user_combine_layer_flat = user_feature_network(uid, user_gender, user_age, user_job, dropout_keep_prob)
# 获取电影特征
movie_combine_layer = movie_feature_network(movie_id, movie_genres, movie_titles, movie_title_length,
dropout_keep_prob)
# 将用户特征和电影特征作为输入,经过全连接,输出一个值
with tf.name_scope('user_movie_fc'):
input_X = tf.concat([user_combine_layer_flat, movie_combine_layer], 1)
return input_X
def trainable_variable_summaries():
for variable in tf.trainable_variables():
name = variable.name.split(':')[0]
tf.summary.histogram(name, variable)
mean = tf.reduce_mean(variable)
tf.summary.scalar('mean/' + name, mean)
stddev = tf.sqrt(tf.reduce_mean(tf.square(variable - mean)))
tf.summary.scalar('stddev/' + name, stddev)
|
class Infinite_memory(list):
def __init__(self,*args):
list.__init__(self,*args)
def __getitem__(self, index):
if index>=len(self):
for _ in range((index-len(self))+1):
self.append(0)
return super().__getitem__(index)
def __setitem__(self, key, value):
if key>=len(self):
for _ in range((key-len(self))+1):
self.append(0)
return super().__setitem__(key, value)
def run_intcode(memory,input_stream=None,output_l=None):
global relative_base
relative_base=0
def suma(args):
memory[args[2]]=args[0]+args[1]
def mult(args):
memory[args[2]]=args[0]*args[1]
def inpt(args):
if input_stream:
memory[args[0]]=input_stream.pop(0)
else:
memory[args[0]]=input(">")
def output(args):
if output_l==None:
print(args[0])
#print(str(chr(args[0])),end='')
else:
output_l.append(args[0])
def j_if_t(args):
if args[0]!=0:
return args[1]
def j_if_f(args):
if args[0]==0:
return args[1]
def less_than(args):
memory[args[2]]=int(args[0]<args[1])
def equals(args):
memory[args[2]]=int(args[0]==args[1])
def inc_rel_base(args):
global relative_base
relative_base+=args[0]
func_dict={1:suma,2:mult,3:inpt,4:output,5:j_if_t,6:j_if_f,7:less_than,8:equals,9:inc_rel_base}
writes_to_mem=[1,2,3,7,8]#instructions that write to memory
inc_dict={1:4,2:4,3:2,4:2,5:3,6:3,7:4,8:4,9:2}
i=0
while True:
opcode=str(memory[i])
param_bits=list(opcode[:-2])
opcode=int(opcode[-2:])
if opcode==99:
break
params=[]
for p in range(1,inc_dict[opcode]):
bit=0
if len(param_bits)>0:
bit=int(param_bits.pop())
if bit==2:
if (p==inc_dict[opcode]-1 and opcode in writes_to_mem): #write relative
params.append(relative_base+memory[i+p])
else: #read relative
params.append(memory[relative_base+memory[i+p]])
elif (p==inc_dict[opcode]-1 and opcode in writes_to_mem) or (bit==1): #read immediate or write positional
params.append(memory[i+p])
else: #default read positional
params.append(memory[memory[i+p]])
instruction=func_dict[opcode](params)
if instruction!=None:
i=instruction
else:
i+=inc_dict[opcode]
with open("input.txt","r") as f:
code=Infinite_memory(map(int,f.readline().split(",")))
code[0]=2
out=[]
mapa=[[]]
inp=list(map(ord,"A,B,A,C,B,C,A,B,A,C\nR,10,L,8,R,10,R,4\nL,6,L,6,R,10\nL,6,R,12,R,12,R,10\nn\n"))
print(inp)
run_intcode(code,input_stream=inp)
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
import argparse
import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing, cluster
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
from scipy.stats import pearsonr
import datasets
import data_utils as data
import defenses
parser = argparse.ArgumentParser()
parser.add_argument('dataset_name', help='One of: imdb, enron, dogfish, mnist_17')
args = parser.parse_args()
dataset_name = args.dataset_name
assert dataset_name in ['imdb', 'enron', 'dogfish', 'mnist_17']
print('=== Dataset: %s ===' % dataset_name)
epsilons = datasets.DATASET_EPSILONS[dataset_name]
X_train, Y_train, X_test, Y_test = datasets.load_dataset(dataset_name)
random_seed = 1
class_map, centroids, centroid_vec, sphere_radii, slab_radii = data.get_data_params(
X_train, Y_train, percentile=70)
sphere_dists_flip = defenses.compute_dists_under_Q(
X_train, -Y_train,
Q=None,
subtract_from_l2=False,
centroids=centroids,
class_map=class_map,
norm=2)
slab_dists_flip = defenses.compute_dists_under_Q(
X_train, -Y_train,
Q=centroid_vec,
subtract_from_l2=False,
centroids=centroids,
class_map=class_map,
norm=2)
feasible_flipped_mask = np.zeros(X_train.shape[0], dtype=bool)
for y in set(Y_train):
class_idx_flip = class_map[-y]
sphere_radius_flip = sphere_radii[class_idx_flip]
slab_radius_flip = slab_radii[class_idx_flip]
feasible_flipped_mask[Y_train == y] = (
(sphere_dists_flip[Y_train == y] <= sphere_radius_flip) &
(slab_dists_flip[Y_train == y] <= slab_radius_flip))
print('Num positive points: %s' % np.sum(Y_train == 1))
print('Num negative points: %s' % np.sum(Y_train == -1))
print('Fraction of feasible positive points that can be flipped: %s' % np.mean(feasible_flipped_mask[Y_train == 1]))
print('Fraction of feasible negative points that can be flipped: %s' % np.mean(feasible_flipped_mask[Y_train == -1]))
for epsilon in epsilons:
if epsilon == 0: continue
num_copies = int(np.round(epsilon * X_train.shape[0]))
idx_to_copy = np.random.choice(
np.where(feasible_flipped_mask)[0],
size=num_copies,
replace=True)
if sparse.issparse(X_train):
X_modified = sparse.vstack((X_train, X_train[idx_to_copy, :]))
else:
X_modified = np.append(X_train, X_train[idx_to_copy, :], axis=0)
Y_modified = np.append(Y_train, -Y_train[idx_to_copy])
# Sanity check
sphere_dists = defenses.compute_dists_under_Q(
X_modified, Y_modified,
Q=None,
subtract_from_l2=False,
centroids=centroids,
class_map=class_map,
norm=2)
slab_dists = defenses.compute_dists_under_Q(
X_modified, Y_modified,
Q=centroid_vec,
subtract_from_l2=False,
centroids=centroids,
class_map=class_map,
norm=2)
feasible_mask = np.zeros(X_modified.shape[0], dtype=bool)
for y in set(Y_modified):
class_idx = class_map[y]
sphere_radius = sphere_radii[class_idx]
slab_radius = slab_radii[class_idx]
feasible_mask[Y_modified == y] = (
(sphere_dists[Y_modified == y] <= sphere_radius) &
(slab_dists[Y_modified == y] <= slab_radius))
print('Fraction of feasible points in attack: %s' % np.mean(feasible_mask[X_train.shape[0]:]))
np.savez(
datasets.get_labelflip_attack_npz_path(dataset_name, epsilon, norm_sq_constraint=None),
poisoned_X_train=X_modified,
Y_train=Y_modified)
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Scaffold connection and channel."""
import asyncio
from asyncio import Task
from collections import deque
from typing import Any, Deque, Dict, List, Optional, cast
from aea.connections.base import Connection, ConnectionStates
from aea.mail.base import Envelope
from aea.protocols.base import Message
from packages.fetchai.connections.ledger.base import CONNECTION_ID, RequestDispatcher
from packages.fetchai.connections.ledger.contract_dispatcher import (
ContractApiRequestDispatcher,
)
from packages.fetchai.connections.ledger.ledger_dispatcher import (
LedgerApiRequestDispatcher,
)
from packages.fetchai.protocols.contract_api import ContractApiMessage
from packages.fetchai.protocols.ledger_api import LedgerApiMessage
class LedgerConnection(Connection):
"""Proxy to the functionality of the SDK or API."""
connection_id = CONNECTION_ID
def __init__(self, **kwargs: Any):
"""Initialize a connection to interact with a ledger APIs."""
super().__init__(**kwargs)
self._ledger_dispatcher: Optional[LedgerApiRequestDispatcher] = None
self._contract_dispatcher: Optional[ContractApiRequestDispatcher] = None
self._event_new_receiving_task: Optional[asyncio.Event] = None
self.receiving_tasks: List[asyncio.Future] = []
self.task_to_request: Dict[asyncio.Future, Envelope] = {}
self.done_tasks: Deque[asyncio.Future] = deque()
self.api_configs = self.configuration.config.get(
"ledger_apis", {}
) # type: Dict[str, Dict[str, str]]
@property
def event_new_receiving_task(self) -> asyncio.Event:
"""Get the event to notify the 'receive' method of new receiving tasks."""
return cast(asyncio.Event, self._event_new_receiving_task)
async def connect(self) -> None:
"""Set up the connection."""
if self.is_connected: # pragma: nocover
return
self.state = ConnectionStates.connecting
self._ledger_dispatcher = LedgerApiRequestDispatcher(
self._state,
loop=self.loop,
api_configs=self.api_configs,
logger=self.logger,
)
self._contract_dispatcher = ContractApiRequestDispatcher(
self._state,
loop=self.loop,
api_configs=self.api_configs,
logger=self.logger,
)
self._event_new_receiving_task = asyncio.Event(loop=self.loop)
self.state = ConnectionStates.connected
async def disconnect(self) -> None:
"""Tear down the connection."""
if self.is_disconnected: # pragma: nocover
return
self.state = ConnectionStates.disconnecting
for task in self.receiving_tasks:
if not task.cancelled(): # pragma: nocover
task.cancel()
self._ledger_dispatcher = None
self._contract_dispatcher = None
self._event_new_receiving_task = None
self.state = ConnectionStates.disconnected
async def send(self, envelope: "Envelope") -> None:
"""
Send an envelope.
:param envelope: the envelope to send.
"""
task = self._schedule_request(envelope)
self.receiving_tasks.append(task)
self.task_to_request[task] = envelope
self.event_new_receiving_task.set()
def _schedule_request(self, envelope: Envelope) -> Task:
"""
Schedule a ledger API request.
:param envelope: the message.
:return: task
"""
dispatcher: RequestDispatcher
if (
envelope.protocol_specification_id
== LedgerApiMessage.protocol_specification_id
):
if self._ledger_dispatcher is None: # pragma: nocover
raise ValueError("No ledger dispatcher set.")
dispatcher = self._ledger_dispatcher
elif (
envelope.protocol_specification_id
== ContractApiMessage.protocol_specification_id
):
if self._contract_dispatcher is None: # pragma: nocover
raise ValueError("No contract dispatcher set.")
dispatcher = self._contract_dispatcher
else:
raise ValueError("Protocol not supported")
task = dispatcher.dispatch(envelope)
return task
async def receive(self, *args: Any, **kwargs: Any) -> Optional["Envelope"]:
"""
Receive an envelope. Blocking.
:param args: positional arguments
:param kwargs: keyword arguments
:return: the envelope received, or None.
"""
# if there are done tasks, return the result
if len(self.done_tasks) > 0: # pragma: nocover
done_task = self.done_tasks.pop()
return self._handle_done_task(done_task)
if len(self.receiving_tasks) == 0:
self.event_new_receiving_task.clear()
await self.event_new_receiving_task.wait()
# wait for completion of at least one receiving task
done, _ = await asyncio.wait(
self.receiving_tasks, return_when=asyncio.FIRST_COMPLETED
)
# pick one done task
done_task = done.pop()
# update done tasks
self.done_tasks.extend([*done])
return self._handle_done_task(done_task)
def _handle_done_task(self, task: asyncio.Future) -> Optional[Envelope]:
"""
Process a done receiving task.
:param task: the done task.
:return: the response envelope.
"""
request = self.task_to_request.pop(task)
self.receiving_tasks.remove(task)
response_message: Optional[Message] = task.result()
response_envelope = None
if response_message is not None:
response_envelope = Envelope(
to=request.sender,
sender=request.to,
message=response_message,
context=request.context,
)
return response_envelope
|
import autoarray as aa
import numpy as np
from autoarray.mock.mock import MockPixelizationGrid, MockRegMapper
class TestRegularizationinstance:
def test__regularization_matrix__compare_to_regularization_util(self):
pixel_neighbors = np.array(
[
[1, 3, 7, 2],
[4, 2, 0, -1],
[1, 5, 3, -1],
[4, 6, 0, -1],
[7, 1, 5, 3],
[4, 2, 8, -1],
[7, 3, 0, -1],
[4, 8, 6, -1],
[7, 5, -1, -1],
]
)
pixel_neighbors_size = np.array([4, 3, 3, 3, 4, 3, 3, 3, 2])
pixelization_grid = MockPixelizationGrid(
pixel_neighbors=pixel_neighbors, pixel_neighbors_size=pixel_neighbors_size
)
mapper = MockRegMapper(source_pixelization_grid=pixelization_grid)
reg = aa.reg.Constant(coefficient=1.0)
regularization_matrix = reg.regularization_matrix_from_mapper(mapper=mapper)
regularization_matrix_util = aa.util.regularization.constant_regularization_matrix_from(
coefficient=1.0,
pixel_neighbors=pixel_neighbors,
pixel_neighbors_size=pixel_neighbors_size,
)
assert (regularization_matrix == regularization_matrix_util).all()
class TestRegularizationWeighted:
def test__weight_list__compare_to_regularization_util(self):
reg = aa.reg.AdaptiveBrightness(inner_coefficient=10.0, outer_coefficient=15.0)
pixel_signals = np.array([0.21, 0.586, 0.45])
mapper = MockRegMapper(pixel_signals=pixel_signals)
weight_list = reg.regularization_weight_list_from_mapper(mapper=mapper)
weight_list_util = aa.util.regularization.adaptive_regularization_weight_list_from(
inner_coefficient=10.0, outer_coefficient=15.0, pixel_signals=pixel_signals
)
assert (weight_list == weight_list_util).all()
def test__regularization_matrix__compare_to_regularization_util(self):
reg = aa.reg.AdaptiveBrightness(
inner_coefficient=1.0, outer_coefficient=2.0, signal_scale=1.0
)
pixel_neighbors = np.array(
[
[1, 4, -1, -1],
[2, 4, 0, -1],
[3, 4, 5, 1],
[5, 2, -1, -1],
[5, 0, 1, 2],
[2, 3, 4, -1],
]
)
pixel_neighbors_size = np.array([2, 3, 4, 2, 4, 3])
pixel_signals = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
pixelization_grid = MockPixelizationGrid(
pixel_neighbors=pixel_neighbors, pixel_neighbors_size=pixel_neighbors_size
)
mapper = MockRegMapper(
source_pixelization_grid=pixelization_grid, pixel_signals=pixel_signals
)
regularization_matrix = reg.regularization_matrix_from_mapper(mapper=mapper)
regularization_weight_list = aa.util.regularization.adaptive_regularization_weight_list_from(
pixel_signals=pixel_signals, inner_coefficient=1.0, outer_coefficient=2.0
)
regularization_matrix_util = aa.util.regularization.weighted_regularization_matrix_from(
regularization_weight_list=regularization_weight_list,
pixel_neighbors=pixel_neighbors,
pixel_neighbors_size=pixel_neighbors_size,
)
assert (regularization_matrix == regularization_matrix_util).all()
|
# Copyright 2018 Timothy M. Shead
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import shutil
import subprocess
import nbconvert.preprocessors
import nbformat
import traitlets.config
parser = argparse.ArgumentParser()
parser.add_argument("command", nargs="?", default="html", choices=["clean", "convert", "html"], help="Command to run.")
arguments = parser.parse_args()
root_dir = os.path.abspath(os.path.join(__file__, "..", ".."))
docs_dir = os.path.join(root_dir, "docs")
build_dir = os.path.join(docs_dir, "_build")
test_dir = os.path.join(docs_dir, "_test")
class SkipCells(nbconvert.preprocessors.Preprocessor):
def preprocess(self, nb, resources):
cells = []
for cell in nb.cells:
if cell["cell_type"] == "code" and cell["source"].startswith("# nbconvert: hide"):
continue
if cell["cell_type"] == "code" and cell["source"].startswith("# nbconvert: stop"):
break
cells.append(cell)
nb.cells = cells
return nb, resources
def convert_notebook(name, force):
print("Converting %s" % name)
# If the Sphinx source is up-to-date, we're done.
source = os.path.join(docs_dir, "%s.ipynb" % name)
target = os.path.join(docs_dir, "%s.rst" % name)
if os.path.exists(target) and os.path.getmtime(target) >= os.path.getmtime(source) and not force:
return
nbconvert_version = subprocess.check_output(["jupyter", "nbconvert", "--version"]).strip()
if nbconvert_version not in ["4.0.0", "4.1.0", "4.2.0"]:
raise Exception("Unsupported nbconvert version: %s" % nbconvert_version)
# Some installations of ipython don't properly configure the hooks for Pygments lexers, which leads to missing
# source code cells when the documentation is built on readthedocs.org.
import pygments.plugin
if not list(pygments.plugin.find_plugin_lexers()):
raise Exception("It appears that ipython isn't configured correctly. This is a known issue with the stock conda ipython package. Try `conda update ipython -c conda-forge` instead.")
# Convert the notebook into restructured text suitable for the
# documentation.
with open(source) as f:
notebook = nbformat.read(f, as_version=4)
# Execute the notebook to update its contents and ensure it runs
# without error.
execute = nbconvert.preprocessors.ExecutePreprocessor()
execute.preprocess(notebook, {"metadata": {"path": "."}})
# Setup the RST exporter.
config = traitlets.config.Config()
config.RSTExporter.preprocessors = [SkipCells]
rst_exporter = nbconvert.RSTExporter(config=config)
(body, resources) = rst_exporter.from_notebook_node(notebook)
# Unmangle Sphinx cross-references in the tutorial that get mangled by
# markdown.
body = re.sub(":([^:]+):``([^`]+)``", ":\\1:`\\2`", body)
body = re.sub("[.][.].*\\\\(_[^:]+):", ".. \\1:", body)
body = """
.. image:: ../artwork/buildcat.png
:width: 200px
:align: right
""" + body
with open(target, "wb") as file:
file.write(body)
# Always build the documentation from scratch.
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
notebooks = [
#"battery-chargers",
#"gps-receivers",
]
# Clean the build.
if arguments.command == "clean":
for name in notebooks:
if os.path.exists("%s.rst" % name):
os.remove("%s.rst" % name)
# Convert notebooks.
if arguments.command == "convert":
for name in notebooks:
convert_notebook(name, force=True)
# Generate the HTML documentation.
if arguments.command in ["html"]:
for name in notebooks:
convert_notebook(name, force=False)
subprocess.check_call(["make", arguments.command], cwd=docs_dir)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Metalcraft and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class QAP(Document):
pass
|
"""Burn functionality."""
import datetime
import pytest
from ethereum.tester import TransactionFailed
from web3.contract import Contract
@pytest.fixture
def token(chain, team_multisig):
args = [
team_multisig,
"Token",
"TKN",
1000000,
0,
int((datetime.datetime(2017, 4, 22, 16, 0) - datetime.datetime(1970, 1, 1)).total_seconds())
]
contract, hash = chain.provider.deploy_contract('CentrallyIssuedToken', deploy_args=args)
assert contract.call().balanceOf(team_multisig) == 1000000
contract.transact({"from": team_multisig}).releaseTokenTransfer()
return contract
@pytest.fixture
def token_with_customer_balance(chain, team_multisig, token, customer) -> Contract:
"""Create a Crowdsale token where transfer restrictions have been lifted."""
# Make sure customer 1 has some token balance
token.transact({"from": team_multisig}).transfer(customer, 10000)
return token
def test_burn(token_with_customer_balance: Contract, customer: str):
"""Burn tokens."""
token = token_with_customer_balance
initial_balance = token.call().balanceOf(customer)
initial_supply = token.call().totalSupply()
amount = 1000
token.transact({"from": customer}).burn(amount)
assert token.call().balanceOf(customer) == initial_balance - amount
assert token.call().totalSupply() == initial_supply - amount
events = token.pastEvents("Burned").get()
assert len(events) == 1
e = events[-1]
assert e["args"]["burner"] == customer
assert e["args"]["burnedAmount"] == amount
|
from enum import Enum
import re
import secrets
from app.config import oapi, nlp
no_clone_replies = ['No clone Found.', 'E be like say clone no dey.', 'The tweet looks original.',
'Error 404: Clone not found.', 'No copies yet.', 'Nothing in sight.',
'I couldn\'t find clones.', 'Clean for now, but the future is not certain.',
'It\'s as clean as Arsenal\'s trophy cabinet.', 'I found nothing.', 'I\'m at peace with this tweet.',
'No clones at this time.', 'I\'ve not seen any tweet of this kind.', 'No clone in sight.',
'Aye! Master! It looks clean.'
]
clone_list_introductions = ['Possible clones are: \n\n', 'These tweets look similar: \n\n',
'I find these results interesting: \n\n', 'The results: \n\n', 'Clones found: \n\n',
'I don see some clones: \n\n', 'There are some matches: \n\n',
'I think I have seen this before. \n\nLook: \n\n', 'I found something: \n\n',
'Some copies found: \n\n', 'Finding these clones gave me joy: \n\n',
'These tweets look like clones: \n\n', 'Aye! Master! \n\nLook what I found: \n\n',
'Sorry I kept you waiting. \n\nHere you go: \n\n', 'These are possible duplicates: \n\n'
]
clone_list_conclusions = ['\n\nThanks for trusting me', '\n\nThe results may not be very accurate',
'\n\nThat\'s all I have for now', '\n\nI\'m glad I was helpful',
'\n\nI feel rusty, but those results look "good"', '\n\nI love my job',
'\n\nOriginality counts', '\n\nCredits to @HAKSOAT',
'\n\nOff-topic, but I want to work at Netflix someday',
'\n\nLife is too short, not to be original', '\n\nThe results are nothing but approximates',
'\n\nMy AI is just a bunch of if statements', '\n\nKindly, don\'t reply this tweet',
'\n\nIt\'s time to go back to sleep'
]
no_reference_replies = ['I can\'t find a reference tweet', 'Wrong usage. There\'s no tweet to find clones for',
'There\'s no target tweet here']
def get_action(mention_text):
directive = re.search(r'(go|old|new)[ ]*@findclones', mention_text.lower())
if directive and 'go' in directive.group():
return ActionType.go.value
elif directive and 'old' in directive.group():
return ActionType.old.value
elif directive and 'new' in directive.group():
return ActionType.new.value
else:
return None
def remove_emojis(text):
return text.encode('ascii', 'ignore').decode('ascii').replace('\n', ' ')
def remove_links(text):
return re.sub(r'http(s)*[^ ]+', '', text)
def process_tweet_text(text):
tweet_text = remove_emojis(text)
tweet_text = remove_links(tweet_text)
tweet_text = nlp(tweet_text.lower())
return tweet_text
def compile_tweet_link(tweet):
url = "https://twitter.com/"
link = url + tweet.author.screen_name + "/status/" + str(tweet.id)
return link
def send_no_reference_tweet(mentioner, mention_id):
tweet_text = secrets.choice(no_reference_replies)
oapi.update_status(f'@{mentioner} {tweet_text}', in_reply_to_status_id=mention_id)
def send_tweet(mentioner, mention_id, links, sent_tweet=None):
if sent_tweet:
tweet_text = sent_tweet
if type(tweet_text).__name__ == 'bytes':
tweet_text = tweet_text.decode('utf-8')
oapi.update_status(f'@{mentioner} ' + tweet_text, in_reply_to_status_id=mention_id)
return tweet_text
if links:
link_count = len(links)
tweet_text = secrets.choice(clone_list_introductions)
for link in links[::-1]:
tweet_text += f"{link_count}: {link}\n"
link_count -= 1
tweet_text += secrets.choice(clone_list_conclusions)
else:
tweet_text = secrets.choice(no_clone_replies)
combo = secrets.choice(no_clone_replies)
while combo == tweet_text:
combo = secrets.choice(no_clone_replies)
tweet_text = f'{tweet_text} {combo}'
if type(tweet_text).__name__ == 'bytes':
tweet_text = tweet_text.decode('utf-8')
oapi.update_status(f'@{mentioner} ' + tweet_text, in_reply_to_status_id=mention_id)
return tweet_text
def get_most_similar_tweets(fetched_tweets, tweet_details, amount):
tweets_by_rank = []
ids = [tweet_details['id']]
boundary = 15
for tweet in fetched_tweets:
if tweet.id in ids:
continue
fetched_tweet_text = nlp(tweet.full_text.lower())
if -boundary > len(tweet_details['text']) - len(fetched_tweet_text) > boundary:
continue
if tweet_details['text'].similarity(fetched_tweet_text) > 0.7:
tweets_by_rank.append((tweet, tweet_details['text'].similarity(fetched_tweet_text)))
ids.append(tweet.id)
tweets_by_rank.sort(key=lambda x: x[1], reverse=True)
top_x_tweets_by_rank = [tweet for tweet, score in tweets_by_rank[:amount]]
return top_x_tweets_by_rank
class ActionType(Enum):
go = 'go'
old = 'old'
new = 'new'
class SearchType(Enum):
mixed = 'mixed'
recent = 'recent'
popular = 'popular'
|
from typing import Dict
from botocore.paginate import Paginator
class DescribeSchedule(Paginator):
def paginate(self, ChannelId: str, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`MediaLive.Client.describe_schedule`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeSchedule>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ChannelId='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'ScheduleActions': [
{
'ActionName': 'string',
'ScheduleActionSettings': {
'HlsTimedMetadataSettings': {
'Id3': 'string'
},
'InputSwitchSettings': {
'InputAttachmentNameReference': 'string'
},
'PauseStateSettings': {
'Pipelines': [
{
'PipelineId': 'PIPELINE_0'|'PIPELINE_1'
},
]
},
'Scte35ReturnToNetworkSettings': {
'SpliceEventId': 123
},
'Scte35SpliceInsertSettings': {
'Duration': 123,
'SpliceEventId': 123
},
'Scte35TimeSignalSettings': {
'Scte35Descriptors': [
{
'Scte35DescriptorSettings': {
'SegmentationDescriptorScte35DescriptorSettings': {
'DeliveryRestrictions': {
'ArchiveAllowedFlag': 'ARCHIVE_NOT_ALLOWED'|'ARCHIVE_ALLOWED',
'DeviceRestrictions': 'NONE'|'RESTRICT_GROUP0'|'RESTRICT_GROUP1'|'RESTRICT_GROUP2',
'NoRegionalBlackoutFlag': 'REGIONAL_BLACKOUT'|'NO_REGIONAL_BLACKOUT',
'WebDeliveryAllowedFlag': 'WEB_DELIVERY_NOT_ALLOWED'|'WEB_DELIVERY_ALLOWED'
},
'SegmentNum': 123,
'SegmentationCancelIndicator': 'SEGMENTATION_EVENT_NOT_CANCELED'|'SEGMENTATION_EVENT_CANCELED',
'SegmentationDuration': 123,
'SegmentationEventId': 123,
'SegmentationTypeId': 123,
'SegmentationUpid': 'string',
'SegmentationUpidType': 123,
'SegmentsExpected': 123,
'SubSegmentNum': 123,
'SubSegmentsExpected': 123
}
}
},
]
},
'StaticImageActivateSettings': {
'Duration': 123,
'FadeIn': 123,
'FadeOut': 123,
'Height': 123,
'Image': {
'PasswordParam': 'string',
'Uri': 'string',
'Username': 'string'
},
'ImageX': 123,
'ImageY': 123,
'Layer': 123,
'Opacity': 123,
'Width': 123
},
'StaticImageDeactivateSettings': {
'FadeOut': 123,
'Layer': 123
}
},
'ScheduleActionStartSettings': {
'FixedModeScheduleActionStartSettings': {
'Time': 'string'
},
'FollowModeScheduleActionStartSettings': {
'FollowPoint': 'END'|'START',
'ReferenceActionName': 'string'
}
}
},
]
}
**Response Structure**
- *(dict) --* An array of channel schedule actions.
- **ScheduleActions** *(list) --* The list of actions in the schedule.
- *(dict) --* Contains information on a single schedule action.
- **ActionName** *(string) --* The name of the action, must be unique within the schedule. This name provides the main reference to an action once it is added to the schedule. A name is unique if it is no longer in the schedule. The schedule is automatically cleaned up to remove actions with a start time of more than 1 hour ago (approximately) so at that point a name can be reused.
- **ScheduleActionSettings** *(dict) --* Settings for this schedule action.
- **HlsTimedMetadataSettings** *(dict) --* Action to insert HLS metadata
- **Id3** *(string) --* Base64 string formatted according to the ID3 specification: http://id3.org/id3v2.4.0-structure
- **InputSwitchSettings** *(dict) --* Action to switch the input
- **InputAttachmentNameReference** *(string) --* The name of the input attachment that should be switched to by this action.
- **PauseStateSettings** *(dict) --* Action to pause or unpause one or both channel pipelines
- **Pipelines** *(list) --* Placeholder documentation for __listOfPipelinePauseStateSettings
- *(dict) --* Settings for pausing a pipeline.
- **PipelineId** *(string) --* Pipeline ID to pause ("PIPELINE_0" or "PIPELINE_1").
- **Scte35ReturnToNetworkSettings** *(dict) --* Action to insert SCTE-35 return_to_network message
- **SpliceEventId** *(integer) --* The splice_event_id for the SCTE-35 splice_insert, as defined in SCTE-35.
- **Scte35SpliceInsertSettings** *(dict) --* Action to insert SCTE-35 splice_insert message
- **Duration** *(integer) --* Optional, the duration for the splice_insert, in 90 KHz ticks. To convert seconds to ticks, multiple the seconds by 90,000. If you enter a duration, there is an expectation that the downstream system can read the duration and cue in at that time. If you do not enter a duration, the splice_insert will continue indefinitely and there is an expectation that you will enter a return_to_network to end the splice_insert at the appropriate time.
- **SpliceEventId** *(integer) --* The splice_event_id for the SCTE-35 splice_insert, as defined in SCTE-35.
- **Scte35TimeSignalSettings** *(dict) --* Action to insert SCTE-35 time_signal message
- **Scte35Descriptors** *(list) --* The list of SCTE-35 descriptors accompanying the SCTE-35 time_signal.
- *(dict) --* Holds one set of SCTE-35 Descriptor Settings.
- **Scte35DescriptorSettings** *(dict) --* SCTE-35 Descriptor Settings.
- **SegmentationDescriptorScte35DescriptorSettings** *(dict) --* SCTE-35 Segmentation Descriptor.
- **DeliveryRestrictions** *(dict) --* Holds the four SCTE-35 delivery restriction parameters.
- **ArchiveAllowedFlag** *(string) --* Corresponds to SCTE-35 archive_allowed_flag.
- **DeviceRestrictions** *(string) --* Corresponds to SCTE-35 device_restrictions parameter.
- **NoRegionalBlackoutFlag** *(string) --* Corresponds to SCTE-35 no_regional_blackout_flag parameter.
- **WebDeliveryAllowedFlag** *(string) --* Corresponds to SCTE-35 web_delivery_allowed_flag parameter.
- **SegmentNum** *(integer) --* Corresponds to SCTE-35 segment_num. A value that is valid for the specified segmentation_type_id.
- **SegmentationCancelIndicator** *(string) --* Corresponds to SCTE-35 segmentation_event_cancel_indicator.
- **SegmentationDuration** *(integer) --* Corresponds to SCTE-35 segmentation_duration. Optional. The duration for the time_signal, in 90 KHz ticks. To convert seconds to ticks, multiple the seconds by 90,000. Enter time in 90 KHz clock ticks. If you do not enter a duration, the time_signal will continue until you insert a cancellation message.
- **SegmentationEventId** *(integer) --* Corresponds to SCTE-35 segmentation_event_id.
- **SegmentationTypeId** *(integer) --* Corresponds to SCTE-35 segmentation_type_id. One of the segmentation_type_id values listed in the SCTE-35 specification. On the console, enter the ID in decimal (for example, "52"). In the CLI, API, or an SDK, enter the ID in hex (for example, "0x34") or decimal (for example, "52").
- **SegmentationUpid** *(string) --* Corresponds to SCTE-35 segmentation_upid. Enter a string containing the hexadecimal representation of the characters that make up the SCTE-35 segmentation_upid value. Must contain an even number of hex characters. Do not include spaces between each hex pair. For example, the ASCII "ADS Information" becomes hex "41445320496e666f726d6174696f6e.
- **SegmentationUpidType** *(integer) --* Corresponds to SCTE-35 segmentation_upid_type. On the console, enter one of the types listed in the SCTE-35 specification, converted to a decimal. For example, "0x0C" hex from the specification is "12" in decimal. In the CLI, API, or an SDK, enter one of the types listed in the SCTE-35 specification, in either hex (for example, "0x0C" ) or in decimal (for example, "12").
- **SegmentsExpected** *(integer) --* Corresponds to SCTE-35 segments_expected. A value that is valid for the specified segmentation_type_id.
- **SubSegmentNum** *(integer) --* Corresponds to SCTE-35 sub_segment_num. A value that is valid for the specified segmentation_type_id.
- **SubSegmentsExpected** *(integer) --* Corresponds to SCTE-35 sub_segments_expected. A value that is valid for the specified segmentation_type_id.
- **StaticImageActivateSettings** *(dict) --* Action to activate a static image overlay
- **Duration** *(integer) --* The duration in milliseconds for the image to remain on the video. If omitted or set to 0 the duration is unlimited and the image will remain until it is explicitly deactivated.
- **FadeIn** *(integer) --* The time in milliseconds for the image to fade in. The fade-in starts at the start time of the overlay. Default is 0 (no fade-in).
- **FadeOut** *(integer) --* Applies only if a duration is specified. The time in milliseconds for the image to fade out. The fade-out starts when the duration time is hit, so it effectively extends the duration. Default is 0 (no fade-out).
- **Height** *(integer) --* The height of the image when inserted into the video, in pixels. The overlay will be scaled up or down to the specified height. Leave blank to use the native height of the overlay.
- **Image** *(dict) --* The location and filename of the image file to overlay on the video. The file must be a 32-bit BMP, PNG, or TGA file, and must not be larger (in pixels) than the input video.
- **PasswordParam** *(string) --* key used to extract the password from EC2 Parameter store
- **Uri** *(string) --* Uniform Resource Identifier - This should be a path to a file accessible to the Live system (eg. a http:// URI) depending on the output type. For example, a RTMP destination should have a uri simliar to: "rtmp://fmsserver/live".
- **Username** *(string) --* Documentation update needed
- **ImageX** *(integer) --* Placement of the left edge of the overlay relative to the left edge of the video frame, in pixels. 0 (the default) is the left edge of the frame. If the placement causes the overlay to extend beyond the right edge of the underlying video, then the overlay is cropped on the right.
- **ImageY** *(integer) --* Placement of the top edge of the overlay relative to the top edge of the video frame, in pixels. 0 (the default) is the top edge of the frame. If the placement causes the overlay to extend beyond the bottom edge of the underlying video, then the overlay is cropped on the bottom.
- **Layer** *(integer) --* The number of the layer, 0 to 7. There are 8 layers that can be overlaid on the video, each layer with a different image. The layers are in Z order, which means that overlays with higher values of layer are inserted on top of overlays with lower values of layer. Default is 0.
- **Opacity** *(integer) --* Opacity of image where 0 is transparent and 100 is fully opaque. Default is 100.
- **Width** *(integer) --* The width of the image when inserted into the video, in pixels. The overlay will be scaled up or down to the specified width. Leave blank to use the native width of the overlay.
- **StaticImageDeactivateSettings** *(dict) --* Action to deactivate a static image overlay
- **FadeOut** *(integer) --* The time in milliseconds for the image to fade out. Default is 0 (no fade-out).
- **Layer** *(integer) --* The image overlay layer to deactivate, 0 to 7. Default is 0.
- **ScheduleActionStartSettings** *(dict) --* The time for the action to start in the channel.
- **FixedModeScheduleActionStartSettings** *(dict) --* Holds the start time for the action.
- **Time** *(string) --* Start time for the action to start in the channel. (Not the time for the action to be added to the schedule: actions are always added to the schedule immediately.) UTC format: yyyy-mm-ddThh:mm:ss.nnnZ. All the letters are digits (for example, mm might be 01) except for the two constants "T" for time and "Z" for "UTC format".
- **FollowModeScheduleActionStartSettings** *(dict) --* Specifies an action to follow for scheduling this action.
- **FollowPoint** *(string) --* Identifies whether this action starts relative to the start or relative to the end of the reference action.
- **ReferenceActionName** *(string) --* The action name of another action that this one refers to.
:type ChannelId: string
:param ChannelId: **[REQUIRED]** Id of the channel whose schedule is being updated.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListChannels(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`MediaLive.Client.list_channels`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListChannels>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Channels': [
{
'Arn': 'string',
'ChannelClass': 'STANDARD'|'SINGLE_PIPELINE',
'Destinations': [
{
'Id': 'string',
'MediaPackageSettings': [
{
'ChannelId': 'string'
},
],
'Settings': [
{
'PasswordParam': 'string',
'StreamName': 'string',
'Url': 'string',
'Username': 'string'
},
]
},
],
'EgressEndpoints': [
{
'SourceIp': 'string'
},
],
'Id': 'string',
'InputAttachments': [
{
'InputAttachmentName': 'string',
'InputId': 'string',
'InputSettings': {
'AudioSelectors': [
{
'Name': 'string',
'SelectorSettings': {
'AudioLanguageSelection': {
'LanguageCode': 'string',
'LanguageSelectionPolicy': 'LOOSE'|'STRICT'
},
'AudioPidSelection': {
'Pid': 123
}
}
},
],
'CaptionSelectors': [
{
'LanguageCode': 'string',
'Name': 'string',
'SelectorSettings': {
'AribSourceSettings': {},
'DvbSubSourceSettings': {
'Pid': 123
},
'EmbeddedSourceSettings': {
'Convert608To708': 'DISABLED'|'UPCONVERT',
'Scte20Detection': 'AUTO'|'OFF',
'Source608ChannelNumber': 123,
'Source608TrackNumber': 123
},
'Scte20SourceSettings': {
'Convert608To708': 'DISABLED'|'UPCONVERT',
'Source608ChannelNumber': 123
},
'Scte27SourceSettings': {
'Pid': 123
},
'TeletextSourceSettings': {
'PageNumber': 'string'
}
}
},
],
'DeblockFilter': 'DISABLED'|'ENABLED',
'DenoiseFilter': 'DISABLED'|'ENABLED',
'FilterStrength': 123,
'InputFilter': 'AUTO'|'DISABLED'|'FORCED',
'NetworkInputSettings': {
'HlsInputSettings': {
'Bandwidth': 123,
'BufferSegments': 123,
'Retries': 123,
'RetryInterval': 123
},
'ServerValidation': 'CHECK_CRYPTOGRAPHY_AND_VALIDATE_NAME'|'CHECK_CRYPTOGRAPHY_ONLY'
},
'SourceEndBehavior': 'CONTINUE'|'LOOP',
'VideoSelector': {
'ColorSpace': 'FOLLOW'|'REC_601'|'REC_709',
'ColorSpaceUsage': 'FALLBACK'|'FORCE',
'SelectorSettings': {
'VideoSelectorPid': {
'Pid': 123
},
'VideoSelectorProgramId': {
'ProgramId': 123
}
}
}
}
},
],
'InputSpecification': {
'Codec': 'MPEG2'|'AVC'|'HEVC',
'MaximumBitrate': 'MAX_10_MBPS'|'MAX_20_MBPS'|'MAX_50_MBPS',
'Resolution': 'SD'|'HD'|'UHD'
},
'LogLevel': 'ERROR'|'WARNING'|'INFO'|'DEBUG'|'DISABLED',
'Name': 'string',
'PipelinesRunningCount': 123,
'RoleArn': 'string',
'State': 'CREATING'|'CREATE_FAILED'|'IDLE'|'STARTING'|'RUNNING'|'RECOVERING'|'STOPPING'|'DELETING'|'DELETED',
'Tags': {
'string': 'string'
}
},
],
}
**Response Structure**
- *(dict) --* An array of channels
- **Channels** *(list) --* Placeholder documentation for __listOfChannelSummary
- *(dict) --* Placeholder documentation for ChannelSummary
- **Arn** *(string) --* The unique arn of the channel.
- **ChannelClass** *(string) --* The class for this channel. STANDARD for a channel with two pipelines or SINGLE_PIPELINE for a channel with one pipeline.
- **Destinations** *(list) --* A list of destinations of the channel. For UDP outputs, there is one destination per output. For other types (HLS, for example), there is one destination per packager.
- *(dict) --* Placeholder documentation for OutputDestination
- **Id** *(string) --* User-specified id. This is used in an output group or an output.
- **MediaPackageSettings** *(list) --* Destination settings for a MediaPackage output; one destination for both encoders.
- *(dict) --* Media Package Output Destination Settings
- **ChannelId** *(string) --* ID of the channel in MediaPackage that is the destination for this output group. You do not need to specify the individual inputs in MediaPackage; MediaLive will handle the connection of the two MediaLive pipelines to the two MediaPackage inputs. The MediaPackage channel and MediaLive channel must be in the same region.
- **Settings** *(list) --* Destination settings for a standard output; one destination for each redundant encoder.
- *(dict) --* Placeholder documentation for OutputDestinationSettings
- **PasswordParam** *(string) --* key used to extract the password from EC2 Parameter store
- **StreamName** *(string) --* Stream name for RTMP destinations (URLs of type rtmp://)
- **Url** *(string) --* A URL specifying a destination
- **Username** *(string) --* username for destination
- **EgressEndpoints** *(list) --* The endpoints where outgoing connections initiate from
- *(dict) --* Placeholder documentation for ChannelEgressEndpoint
- **SourceIp** *(string) --* Public IP of where a channel's output comes from
- **Id** *(string) --* The unique id of the channel.
- **InputAttachments** *(list) --* List of input attachments for channel.
- *(dict) --* Placeholder documentation for InputAttachment
- **InputAttachmentName** *(string) --* User-specified name for the attachment. This is required if the user wants to use this input in an input switch action.
- **InputId** *(string) --* The ID of the input
- **InputSettings** *(dict) --* Settings of an input (caption selector, etc.)
- **AudioSelectors** *(list) --* Used to select the audio stream to decode for inputs that have multiple available.
- *(dict) --* Audio Selector
- **Name** *(string) --* The name of this AudioSelector. AudioDescriptions will use this name to uniquely identify this Selector. Selector names should be unique per input.
- **SelectorSettings** *(dict) --* The audio selector settings.
- **AudioLanguageSelection** *(dict) --* Audio Language Selection
- **LanguageCode** *(string) --* Selects a specific three-letter language code from within an audio source.
- **LanguageSelectionPolicy** *(string) --* When set to "strict", the transport stream demux strictly identifies audio streams by their language descriptor. If a PMT update occurs such that an audio stream matching the initially selected language is no longer present then mute will be encoded until the language returns. If "loose", then on a PMT update the demux will choose another audio stream in the program with the same stream type if it can't find one with the same language.
- **AudioPidSelection** *(dict) --* Audio Pid Selection
- **Pid** *(integer) --* Selects a specific PID from within a source.
- **CaptionSelectors** *(list) --* Used to select the caption input to use for inputs that have multiple available.
- *(dict) --* Output groups for this Live Event. Output groups contain information about where streams should be distributed.
- **LanguageCode** *(string) --* When specified this field indicates the three letter language code of the caption track to extract from the source.
- **Name** *(string) --* Name identifier for a caption selector. This name is used to associate this caption selector with one or more caption descriptions. Names must be unique within an event.
- **SelectorSettings** *(dict) --* Caption selector settings.
- **AribSourceSettings** *(dict) --* Arib Source Settings
- **DvbSubSourceSettings** *(dict) --* Dvb Sub Source Settings
- **Pid** *(integer) --* When using DVB-Sub with Burn-In or SMPTE-TT, use this PID for the source content. Unused for DVB-Sub passthrough. All DVB-Sub content is passed through, regardless of selectors.
- **EmbeddedSourceSettings** *(dict) --* Embedded Source Settings
- **Convert608To708** *(string) --* If upconvert, 608 data is both passed through via the "608 compatibility bytes" fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded.
- **Scte20Detection** *(string) --* Set to "auto" to handle streams with intermittent and/or non-aligned SCTE-20 and Embedded captions.
- **Source608ChannelNumber** *(integer) --* Specifies the 608/708 channel number within the video track from which to extract captions. Unused for passthrough.
- **Source608TrackNumber** *(integer) --* This field is unused and deprecated.
- **Scte20SourceSettings** *(dict) --* Scte20 Source Settings
- **Convert608To708** *(string) --* If upconvert, 608 data is both passed through via the "608 compatibility bytes" fields of the 708 wrapper as well as translated into 708. 708 data present in the source content will be discarded.
- **Source608ChannelNumber** *(integer) --* Specifies the 608/708 channel number within the video track from which to extract captions. Unused for passthrough.
- **Scte27SourceSettings** *(dict) --* Scte27 Source Settings
- **Pid** *(integer) --* The pid field is used in conjunction with the caption selector languageCode field as follows: - Specify PID and Language: Extracts captions from that PID; the language is "informational". - Specify PID and omit Language: Extracts the specified PID. - Omit PID and specify Language: Extracts the specified language, whichever PID that happens to be. - Omit PID and omit Language: Valid only if source is DVB-Sub that is being passed through; all languages will be passed through.
- **TeletextSourceSettings** *(dict) --* Teletext Source Settings
- **PageNumber** *(string) --* Specifies the teletext page number within the data stream from which to extract captions. Range of 0x100 (256) to 0x8FF (2303). Unused for passthrough. Should be specified as a hexadecimal string with no "0x" prefix.
- **DeblockFilter** *(string) --* Enable or disable the deblock filter when filtering.
- **DenoiseFilter** *(string) --* Enable or disable the denoise filter when filtering.
- **FilterStrength** *(integer) --* Adjusts the magnitude of filtering from 1 (minimal) to 5 (strongest).
- **InputFilter** *(string) --* Turns on the filter for this input. MPEG-2 inputs have the deblocking filter enabled by default. 1) auto - filtering will be applied depending on input type/quality 2) disabled - no filtering will be applied to the input 3) forced - filtering will be applied regardless of input type
- **NetworkInputSettings** *(dict) --* Input settings.
- **HlsInputSettings** *(dict) --* Specifies HLS input settings when the uri is for a HLS manifest.
- **Bandwidth** *(integer) --* When specified the HLS stream with the m3u8 BANDWIDTH that most closely matches this value will be chosen, otherwise the highest bandwidth stream in the m3u8 will be chosen. The bitrate is specified in bits per second, as in an HLS manifest.
- **BufferSegments** *(integer) --* When specified, reading of the HLS input will begin this many buffer segments from the end (most recently written segment). When not specified, the HLS input will begin with the first segment specified in the m3u8.
- **Retries** *(integer) --* The number of consecutive times that attempts to read a manifest or segment must fail before the input is considered unavailable.
- **RetryInterval** *(integer) --* The number of seconds between retries when an attempt to read a manifest or segment fails.
- **ServerValidation** *(string) --* Check HTTPS server certificates. When set to checkCryptographyOnly, cryptography in the certificate will be checked, but not the server's name. Certain subdomains (notably S3 buckets that use dots in the bucket name) do not strictly match the corresponding certificate's wildcard pattern and would otherwise cause the event to error. This setting is ignored for protocols that do not use https.
- **SourceEndBehavior** *(string) --* Loop input if it is a file. This allows a file input to be streamed indefinitely.
- **VideoSelector** *(dict) --* Informs which video elementary stream to decode for input types that have multiple available.
- **ColorSpace** *(string) --* Specifies the colorspace of an input. This setting works in tandem with colorSpaceConversion to determine if any conversion will be performed.
- **ColorSpaceUsage** *(string) --* Applies only if colorSpace is a value other than follow. This field controls how the value in the colorSpace field will be used. fallback means that when the input does include color space data, that data will be used, but when the input has no color space data, the value in colorSpace will be used. Choose fallback if your input is sometimes missing color space data, but when it does have color space data, that data is correct. force means to always use the value in colorSpace. Choose force if your input usually has no color space data or might have unreliable color space data.
- **SelectorSettings** *(dict) --* The video selector settings.
- **VideoSelectorPid** *(dict) --* Video Selector Pid
- **Pid** *(integer) --* Selects a specific PID from within a video source.
- **VideoSelectorProgramId** *(dict) --* Video Selector Program Id
- **ProgramId** *(integer) --* Selects a specific program from within a multi-program transport stream. If the program doesn't exist, the first program within the transport stream will be selected by default.
- **InputSpecification** *(dict) --* Placeholder documentation for InputSpecification
- **Codec** *(string) --* Input codec
- **MaximumBitrate** *(string) --* Maximum input bitrate, categorized coarsely
- **Resolution** *(string) --* Input resolution, categorized coarsely
- **LogLevel** *(string) --* The log level being written to CloudWatch Logs.
- **Name** *(string) --* The name of the channel. (user-mutable)
- **PipelinesRunningCount** *(integer) --* The number of currently healthy pipelines.
- **RoleArn** *(string) --* The Amazon Resource Name (ARN) of the role assumed when running the Channel.
- **State** *(string) --* Placeholder documentation for ChannelState
- **Tags** *(dict) --* A collection of key-value pairs.
- *(string) --* Placeholder documentation for __string
- *(string) --* Placeholder documentation for __string
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListInputSecurityGroups(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`MediaLive.Client.list_input_security_groups`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputSecurityGroups>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'InputSecurityGroups': [
{
'Arn': 'string',
'Id': 'string',
'Inputs': [
'string',
],
'State': 'IDLE'|'IN_USE'|'UPDATING'|'DELETED',
'Tags': {
'string': 'string'
},
'WhitelistRules': [
{
'Cidr': 'string'
},
]
},
],
}
**Response Structure**
- *(dict) --* An array of Input Security Groups
- **InputSecurityGroups** *(list) --* List of input security groups
- *(dict) --* An Input Security Group
- **Arn** *(string) --* Unique ARN of Input Security Group
- **Id** *(string) --* The Id of the Input Security Group
- **Inputs** *(list) --* The list of inputs currently using this Input Security Group.
- *(string) --* Placeholder documentation for __string
- **State** *(string) --* The current state of the Input Security Group.
- **Tags** *(dict) --* A collection of key-value pairs.
- *(string) --* Placeholder documentation for __string
- *(string) --* Placeholder documentation for __string
- **WhitelistRules** *(list) --* Whitelist rules and their sync status
- *(dict) --* Whitelist rule
- **Cidr** *(string) --* The IPv4 CIDR that's whitelisted.
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListInputs(Paginator):
def paginate(self, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`MediaLive.Client.list_inputs`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputs>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Inputs': [
{
'Arn': 'string',
'AttachedChannels': [
'string',
],
'Destinations': [
{
'Ip': 'string',
'Port': 'string',
'Url': 'string',
'Vpc': {
'AvailabilityZone': 'string',
'NetworkInterfaceId': 'string'
}
},
],
'Id': 'string',
'InputClass': 'STANDARD'|'SINGLE_PIPELINE',
'MediaConnectFlows': [
{
'FlowArn': 'string'
},
],
'Name': 'string',
'RoleArn': 'string',
'SecurityGroups': [
'string',
],
'Sources': [
{
'PasswordParam': 'string',
'Url': 'string',
'Username': 'string'
},
],
'State': 'CREATING'|'DETACHED'|'ATTACHED'|'DELETING'|'DELETED',
'Tags': {
'string': 'string'
},
'Type': 'UDP_PUSH'|'RTP_PUSH'|'RTMP_PUSH'|'RTMP_PULL'|'URL_PULL'|'MP4_FILE'|'MEDIACONNECT'
},
],
}
**Response Structure**
- *(dict) --* An array of inputs
- **Inputs** *(list) --* Placeholder documentation for __listOfInput
- *(dict) --* Placeholder documentation for Input
- **Arn** *(string) --* The Unique ARN of the input (generated, immutable).
- **AttachedChannels** *(list) --* A list of channel IDs that that input is attached to (currently an input can only be attached to one channel).
- *(string) --* Placeholder documentation for __string
- **Destinations** *(list) --* A list of the destinations of the input (PUSH-type).
- *(dict) --* The settings for a PUSH type input.
- **Ip** *(string) --* The system-generated static IP address of endpoint. It remains fixed for the lifetime of the input.
- **Port** *(string) --* The port number for the input.
- **Url** *(string) --* This represents the endpoint that the customer stream will be pushed to.
- **Vpc** *(dict) --* The properties for a VPC type input destination.
- **AvailabilityZone** *(string) --* The availability zone of the Input destination.
- **NetworkInterfaceId** *(string) --* The network interface ID of the Input destination in the VPC.
- **Id** *(string) --* The generated ID of the input (unique for user account, immutable).
- **InputClass** *(string) --* STANDARD - MediaLive expects two sources to be connected to this input. If the channel is also STANDARD, both sources will be ingested. If the channel is SINGLE_PIPELINE, only the first source will be ingested; the second source will always be ignored, even if the first source fails. SINGLE_PIPELINE - You can connect only one source to this input. If the ChannelClass is also SINGLE_PIPELINE, this value is valid. If the ChannelClass is STANDARD, this value is not valid because the channel requires two sources in the input.
- **MediaConnectFlows** *(list) --* A list of MediaConnect Flows for this input.
- *(dict) --* The settings for a MediaConnect Flow.
- **FlowArn** *(string) --* The unique ARN of the MediaConnect Flow being used as a source.
- **Name** *(string) --* The user-assigned name (This is a mutable value).
- **RoleArn** *(string) --* The Amazon Resource Name (ARN) of the role this input assumes during and after creation.
- **SecurityGroups** *(list) --* A list of IDs for all the Input Security Groups attached to the input.
- *(string) --* Placeholder documentation for __string
- **Sources** *(list) --* A list of the sources of the input (PULL-type).
- *(dict) --* The settings for a PULL type input.
- **PasswordParam** *(string) --* The key used to extract the password from EC2 Parameter store.
- **Url** *(string) --* This represents the customer's source URL where stream is pulled from.
- **Username** *(string) --* The username for the input source.
- **State** *(string) --* Placeholder documentation for InputState
- **Tags** *(dict) --* A collection of key-value pairs.
- *(string) --* Placeholder documentation for __string
- *(string) --* Placeholder documentation for __string
- **Type** *(string) --* Placeholder documentation for InputType
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListOfferings(Paginator):
def paginate(self, ChannelClass: str = None, ChannelConfiguration: str = None, Codec: str = None, MaximumBitrate: str = None, MaximumFramerate: str = None, Resolution: str = None, ResourceType: str = None, SpecialFeature: str = None, VideoQuality: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`MediaLive.Client.list_offerings`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListOfferings>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ChannelClass='string',
ChannelConfiguration='string',
Codec='string',
MaximumBitrate='string',
MaximumFramerate='string',
Resolution='string',
ResourceType='string',
SpecialFeature='string',
VideoQuality='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Offerings': [
{
'Arn': 'string',
'CurrencyCode': 'string',
'Duration': 123,
'DurationUnits': 'MONTHS',
'FixedPrice': 123.0,
'OfferingDescription': 'string',
'OfferingId': 'string',
'OfferingType': 'NO_UPFRONT',
'Region': 'string',
'ResourceSpecification': {
'ChannelClass': 'STANDARD'|'SINGLE_PIPELINE',
'Codec': 'MPEG2'|'AVC'|'HEVC'|'AUDIO',
'MaximumBitrate': 'MAX_10_MBPS'|'MAX_20_MBPS'|'MAX_50_MBPS',
'MaximumFramerate': 'MAX_30_FPS'|'MAX_60_FPS',
'Resolution': 'SD'|'HD'|'UHD',
'ResourceType': 'INPUT'|'OUTPUT'|'CHANNEL',
'SpecialFeature': 'ADVANCED_AUDIO'|'AUDIO_NORMALIZATION',
'VideoQuality': 'STANDARD'|'ENHANCED'|'PREMIUM'
},
'UsagePrice': 123.0
},
]
}
**Response Structure**
- *(dict) --* List of offerings
- **Offerings** *(list) --* List of offerings
- *(dict) --* Reserved resources available for purchase
- **Arn** *(string) --* Unique offering ARN, e.g. 'arn:aws:medialive:us-west-2:123456789012:offering:87654321'
- **CurrencyCode** *(string) --* Currency code for usagePrice and fixedPrice in ISO-4217 format, e.g. 'USD'
- **Duration** *(integer) --* Lease duration, e.g. '12'
- **DurationUnits** *(string) --* Units for duration, e.g. 'MONTHS'
- **FixedPrice** *(float) --* One-time charge for each reserved resource, e.g. '0.0' for a NO_UPFRONT offering
- **OfferingDescription** *(string) --* Offering description, e.g. 'HD AVC output at 10-20 Mbps, 30 fps, and standard VQ in US West (Oregon)'
- **OfferingId** *(string) --* Unique offering ID, e.g. '87654321'
- **OfferingType** *(string) --* Offering type, e.g. 'NO_UPFRONT'
- **Region** *(string) --* AWS region, e.g. 'us-west-2'
- **ResourceSpecification** *(dict) --* Resource configuration details
- **ChannelClass** *(string) --* Channel class, e.g. 'STANDARD'
- **Codec** *(string) --* Codec, e.g. 'AVC'
- **MaximumBitrate** *(string) --* Maximum bitrate, e.g. 'MAX_20_MBPS'
- **MaximumFramerate** *(string) --* Maximum framerate, e.g. 'MAX_30_FPS' (Outputs only)
- **Resolution** *(string) --* Resolution, e.g. 'HD'
- **ResourceType** *(string) --* Resource type, 'INPUT', 'OUTPUT', or 'CHANNEL'
- **SpecialFeature** *(string) --* Special feature, e.g. 'AUDIO_NORMALIZATION' (Channels only)
- **VideoQuality** *(string) --* Video quality, e.g. 'STANDARD' (Outputs only)
- **UsagePrice** *(float) --* Recurring usage charge for each reserved resource, e.g. '157.0'
:type ChannelClass: string
:param ChannelClass: Filter by channel class, \'STANDARD\' or \'SINGLE_PIPELINE\'
:type ChannelConfiguration: string
:param ChannelConfiguration: Filter to offerings that match the configuration of an existing channel, e.g. \'2345678\' (a channel ID)
:type Codec: string
:param Codec: Filter by codec, \'AVC\', \'HEVC\', \'MPEG2\', or \'AUDIO\'
:type MaximumBitrate: string
:param MaximumBitrate: Filter by bitrate, \'MAX_10_MBPS\', \'MAX_20_MBPS\', or \'MAX_50_MBPS\'
:type MaximumFramerate: string
:param MaximumFramerate: Filter by framerate, \'MAX_30_FPS\' or \'MAX_60_FPS\'
:type Resolution: string
:param Resolution: Filter by resolution, \'SD\', \'HD\', or \'UHD\'
:type ResourceType: string
:param ResourceType: Filter by resource type, \'INPUT\', \'OUTPUT\', or \'CHANNEL\'
:type SpecialFeature: string
:param SpecialFeature: Filter by special feature, \'ADVANCED_AUDIO\' or \'AUDIO_NORMALIZATION\'
:type VideoQuality: string
:param VideoQuality: Filter by video quality, \'STANDARD\', \'ENHANCED\', or \'PREMIUM\'
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
class ListReservations(Paginator):
def paginate(self, ChannelClass: str = None, Codec: str = None, MaximumBitrate: str = None, MaximumFramerate: str = None, Resolution: str = None, ResourceType: str = None, SpecialFeature: str = None, VideoQuality: str = None, PaginationConfig: Dict = None) -> Dict:
"""
Creates an iterator that will paginate through responses from :py:meth:`MediaLive.Client.list_reservations`.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListReservations>`_
**Request Syntax**
::
response_iterator = paginator.paginate(
ChannelClass='string',
Codec='string',
MaximumBitrate='string',
MaximumFramerate='string',
Resolution='string',
ResourceType='string',
SpecialFeature='string',
VideoQuality='string',
PaginationConfig={
'MaxItems': 123,
'PageSize': 123,
'StartingToken': 'string'
}
)
**Response Syntax**
::
{
'Reservations': [
{
'Arn': 'string',
'Count': 123,
'CurrencyCode': 'string',
'Duration': 123,
'DurationUnits': 'MONTHS',
'End': 'string',
'FixedPrice': 123.0,
'Name': 'string',
'OfferingDescription': 'string',
'OfferingId': 'string',
'OfferingType': 'NO_UPFRONT',
'Region': 'string',
'ReservationId': 'string',
'ResourceSpecification': {
'ChannelClass': 'STANDARD'|'SINGLE_PIPELINE',
'Codec': 'MPEG2'|'AVC'|'HEVC'|'AUDIO',
'MaximumBitrate': 'MAX_10_MBPS'|'MAX_20_MBPS'|'MAX_50_MBPS',
'MaximumFramerate': 'MAX_30_FPS'|'MAX_60_FPS',
'Resolution': 'SD'|'HD'|'UHD',
'ResourceType': 'INPUT'|'OUTPUT'|'CHANNEL',
'SpecialFeature': 'ADVANCED_AUDIO'|'AUDIO_NORMALIZATION',
'VideoQuality': 'STANDARD'|'ENHANCED'|'PREMIUM'
},
'Start': 'string',
'State': 'ACTIVE'|'EXPIRED'|'CANCELED'|'DELETED',
'Tags': {
'string': 'string'
},
'UsagePrice': 123.0
},
]
}
**Response Structure**
- *(dict) --* List of reservations
- **Reservations** *(list) --* List of reservations
- *(dict) --* Reserved resources available to use
- **Arn** *(string) --* Unique reservation ARN, e.g. 'arn:aws:medialive:us-west-2:123456789012:reservation:1234567'
- **Count** *(integer) --* Number of reserved resources
- **CurrencyCode** *(string) --* Currency code for usagePrice and fixedPrice in ISO-4217 format, e.g. 'USD'
- **Duration** *(integer) --* Lease duration, e.g. '12'
- **DurationUnits** *(string) --* Units for duration, e.g. 'MONTHS'
- **End** *(string) --* Reservation UTC end date and time in ISO-8601 format, e.g. '2019-03-01T00:00:00'
- **FixedPrice** *(float) --* One-time charge for each reserved resource, e.g. '0.0' for a NO_UPFRONT offering
- **Name** *(string) --* User specified reservation name
- **OfferingDescription** *(string) --* Offering description, e.g. 'HD AVC output at 10-20 Mbps, 30 fps, and standard VQ in US West (Oregon)'
- **OfferingId** *(string) --* Unique offering ID, e.g. '87654321'
- **OfferingType** *(string) --* Offering type, e.g. 'NO_UPFRONT'
- **Region** *(string) --* AWS region, e.g. 'us-west-2'
- **ReservationId** *(string) --* Unique reservation ID, e.g. '1234567'
- **ResourceSpecification** *(dict) --* Resource configuration details
- **ChannelClass** *(string) --* Channel class, e.g. 'STANDARD'
- **Codec** *(string) --* Codec, e.g. 'AVC'
- **MaximumBitrate** *(string) --* Maximum bitrate, e.g. 'MAX_20_MBPS'
- **MaximumFramerate** *(string) --* Maximum framerate, e.g. 'MAX_30_FPS' (Outputs only)
- **Resolution** *(string) --* Resolution, e.g. 'HD'
- **ResourceType** *(string) --* Resource type, 'INPUT', 'OUTPUT', or 'CHANNEL'
- **SpecialFeature** *(string) --* Special feature, e.g. 'AUDIO_NORMALIZATION' (Channels only)
- **VideoQuality** *(string) --* Video quality, e.g. 'STANDARD' (Outputs only)
- **Start** *(string) --* Reservation UTC start date and time in ISO-8601 format, e.g. '2018-03-01T00:00:00'
- **State** *(string) --* Current state of reservation, e.g. 'ACTIVE'
- **Tags** *(dict) --* A collection of key-value pairs
- *(string) --* Placeholder documentation for __string
- *(string) --* Placeholder documentation for __string
- **UsagePrice** *(float) --* Recurring usage charge for each reserved resource, e.g. '157.0'
:type ChannelClass: string
:param ChannelClass: Filter by channel class, \'STANDARD\' or \'SINGLE_PIPELINE\'
:type Codec: string
:param Codec: Filter by codec, \'AVC\', \'HEVC\', \'MPEG2\', or \'AUDIO\'
:type MaximumBitrate: string
:param MaximumBitrate: Filter by bitrate, \'MAX_10_MBPS\', \'MAX_20_MBPS\', or \'MAX_50_MBPS\'
:type MaximumFramerate: string
:param MaximumFramerate: Filter by framerate, \'MAX_30_FPS\' or \'MAX_60_FPS\'
:type Resolution: string
:param Resolution: Filter by resolution, \'SD\', \'HD\', or \'UHD\'
:type ResourceType: string
:param ResourceType: Filter by resource type, \'INPUT\', \'OUTPUT\', or \'CHANNEL\'
:type SpecialFeature: string
:param SpecialFeature: Filter by special feature, \'ADVANCED_AUDIO\' or \'AUDIO_NORMALIZATION\'
:type VideoQuality: string
:param VideoQuality: Filter by video quality, \'STANDARD\', \'ENHANCED\', or \'PREMIUM\'
:type PaginationConfig: dict
:param PaginationConfig:
A dictionary that provides parameters to control pagination.
- **MaxItems** *(integer) --*
The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination.
- **PageSize** *(integer) --*
The size of each page.
- **StartingToken** *(string) --*
A token to specify where to start paginating. This is the ``NextToken`` from a previous response.
:rtype: dict
:returns:
"""
pass
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-01-25 07:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landing', '0006_lead'),
]
operations = [
migrations.AddField(
model_name='lead',
name='name',
field=models.CharField(blank=True, max_length=64, null=True),
),
]
|
import math
import numpy
import scipy
import glob
import sys
import os.path
from scipy.spatial import distance
#filename = str(kmer) + "_mercount_relative.txt"
#files = []
#filename="4_mercount_relative.txt"
#files=glob.glob("*filename")
kmer=6
#[dist[:] for x in [[foo] * len(files)] * len(files)]
#dist = [[[1 for i in range(6)] for j in range(6)] for k in range(7)]
dist = [[1 for i in range(kmer)] for j in range(kmer)
]
#6 is the number of
files = list ()
disc = list ()
file10 = open("filelist_6.txt","r")
for k in file10:
files.append(k.rstrip())
file10.close()
dirsize = int(len(files))
#print files
for i in range(0,dirsize):
file1 = open(str(files[i]),"r")
vector1 = []
for x in file1:
count = float(x.rstrip().split("\t")[1])
vector1.append(count)
file1.close()
#print vector1
for j in range(0,dirsize):
#if i != j:
vector2 = []
file2 = open(str(files[j]),"r")
for y in file2:
count2 = float(y.rstrip().split("\t")[1])
vector2.append(count2)
file2.close()
#distance = dist_4mer(vector1, vector2,kmer)
#distance = dist_4mer(vector1, vector2,kmer)
#distc = float(distance.euclidean(vector1,vector2))
#disc.append(distc)
dist[i][j] = float(distance.euclidean(vector1,vector2))
#print distance
#print vector2
#print distance
print dist
|
# -*- coding: utf-8 -*-
from .context import bot
from bot.strategy import circle
if __name__ == '__main__':
bot.strategy.circle.test()
|
r"""
Subcrystals
These are the crystals that are subsets of a larger ambient crystal.
AUTHORS:
- Travis Scrimshaw (2013-10-16): Initial implementation
"""
#*****************************************************************************
# Copyright (C) 2013 Travis Scrimshaw <tscrim at ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#****************************************************************************
from sage.misc.lazy_attribute import lazy_attribute
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.element import parent
from sage.structure.parent import Parent
from sage.structure.element_wrapper import ElementWrapper
from sage.categories.crystals import Crystals
from sage.categories.finite_crystals import FiniteCrystals
from sage.combinat.root_system.cartan_type import CartanType
from sage.rings.integer import Integer
from sage.rings.infinity import infinity
class Subcrystal(UniqueRepresentation, Parent):
"""
A subcrystal `X` of an ambient crystal `Y` is a crystal formed by taking a
subset of `Y` and whose crystal structure is induced by `Y`.
INPUT:
- ``ambient`` -- the ambient crystal
- ``contained`` -- (optional) a set (or function) which specifies when an
element is contained in the subcrystal; the default is everything
possible is included
- ``generators`` -- (optional) the generators for the subcrystal; the
default is the generators for the ambient crystal
- ``virtualization``, ``scaling_factors`` -- (optional)
dictionaries whose key `i` corresponds to the sets `\sigma_i`
and `\gamma_i` respectively used to define virtual crystals; see
:class:`~sage.combinat.crystals.virtual_crystal.VirtualCrystal`
- ``cartan_type`` -- (optional) the Cartan type for the subcrystal; the
default is the Cartan type for the ambient crystal
- ``index_set`` -- (optional) the index set for the subcrystal; the
default is the index set for the Cartan type
- ``category`` -- (optional) the category for the subcrystal; the
default is the :class:`~sage.categories.crystals.Crystals` category
.. SEEALSO::
:meth:`~sage.categories.crystals.Crystals.ParentMethods.subcrystal`
EXAMPLES:
We build out a subcrystal starting from an element and only going
to the lowest weight::
sage: B = crystals.Tableaux(['A',3], shape=[2,1])
sage: S = B.subcrystal(generators=[B(3,1,2)], direction='lower')
sage: S.cardinality()
11
Here we build out in both directions starting from an element, but we
also have restricted ourselves to type `A_2`::
sage: T = B.subcrystal(index_set=[1,2], generators=[B(3,1,1)])
sage: T.cardinality()
8
sage: list(T)
[[[1, 1], [3]],
[[1, 2], [3]],
[[1, 1], [2]],
[[1, 2], [2]],
[[2, 2], [3]],
[[2, 3], [3]],
[[1, 3], [2]],
[[1, 3], [3]]]
Now we take the crystal corresponding to the intersection of
the previous two subcrystals::
sage: U = B.subcrystal(contained=lambda x: x in S and x in T, generators=B)
sage: list(U)
[[[2, 3], [3]], [[1, 2], [3]], [[2, 2], [3]]]
.. TODO::
Include support for subcrystals which only contains certain arrows.
"""
@staticmethod
def __classcall_private__(cls, ambient, contained=None, generators=None,
virtualization=None, scaling_factors=None,
cartan_type=None, index_set=None, category=None):
"""
Normalize arguments to ensure a (relatively) unique representation.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S1 = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: S2 = B.subcrystal(generators=[B(2,1,1), B(5,2,4)], cartan_type=['A',4], index_set=(1,2))
sage: S1 is S2
True
"""
if isinstance(contained, (list, tuple, set, frozenset)):
contained = frozenset(contained)
#elif contained in Sets():
if cartan_type is None:
cartan_type = ambient.cartan_type()
else:
cartan_type = CartanType(cartan_type)
if index_set is None:
index_set = cartan_type.index_set
if generators is None:
generators = ambient.module_generators
category = Crystals().or_subcategory(category)
if ambient in FiniteCrystals() or isinstance(contained, frozenset):
category = category.Finite()
if virtualization is not None:
if scaling_factors is None:
scaling_factors = {i:1 for i in index_set}
from sage.combinat.crystals.virtual_crystal import VirtualCrystal
return VirtualCrystal(ambient, virtualization, scaling_factors, contained,
generators, cartan_type, index_set, category)
if scaling_factors is not None:
# virtualization must be None
virtualization = {i:(i,) for i in index_set}
from sage.combinat.crystals.virtual_crystal import VirtualCrystal
return VirtualCrystal(ambient, virtualization, scaling_factors, contained,
generators, cartan_type, index_set, category)
# We need to give these as optional arguments so it unpickles correctly
return super(Subcrystal, cls).__classcall__(cls, ambient, contained,
tuple(generators),
cartan_type=cartan_type,
index_set=tuple(index_set),
category=category)
def __init__(self, ambient, contained, generators, cartan_type, index_set, category):
"""
Initialize ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: TestSuite(S).run()
"""
self._ambient = ambient
self._contained = contained
self._cardinality = None # ``None`` means currently unknown
self._cartan_type = cartan_type
self._index_set = tuple(index_set)
Parent.__init__(self, category=category)
self.module_generators = tuple(self.element_class(self, g) for g in generators
if self._containing(g))
if isinstance(contained, frozenset):
self._cardinality = Integer(len(contained))
self._list = [self.element_class(self, x) for x in contained]
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
Subcrystal of The crystal of tableaux of type ['A', 4] and shape(s) [[2, 1]]
"""
return "Subcrystal of {}".format(self._ambient)
@lazy_attribute
def _containing(self):
"""
Check if ``x`` is contained in ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: S._containing(B(5,2,4))
True
sage: S._containing(B(4,2,4))
True
"""
if self._contained is None:
return lambda x: True
if isinstance(self._contained, frozenset):
return self._contained.__contains__
return self._contained # Otherwise it should be a function
def __contains__(self, x):
"""
Check if ``x`` is in ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: B(5,2,4) in S
True
sage: mg = B.module_generators[0]
sage: mg in S
True
sage: mg.f(2).f(3) in S
False
"""
if isinstance(x, Subcrystal.Element) and x.parent() == self:
return True
if x in self._ambient:
if not self._containing(x):
return False
x = self.element_class(self, x)
if self in FiniteCrystals():
return x in self.list()
# TODO: make this work for infinite crystals
import warnings
warnings.warn("Testing containment in an infinite crystal"
" defaults to returning True")
return True
def cardinality(self):
"""
Return the cardinality of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=[B(2,1,1)], index_set=[1,2])
sage: S.cardinality()
8
sage: B = crystals.infinity.Tableaux(['A',2])
sage: S = B.subcrystal(max_depth=4)
sage: S.cardinality()
22
TESTS:
Check that :trac:`19481` is fixed::
sage: from sage.combinat.crystals.virtual_crystal import VirtualCrystal
sage: A = crystals.infinity.Tableaux(['A',3])
sage: V = VirtualCrystal(A, {1:(1,3), 2:(2,)}, {1:1, 2:2}, cartan_type=['C',2])
sage: V.cardinality()
Traceback (most recent call last):
...
NotImplementedError: unknown cardinality
"""
if self._cardinality is not None:
return self._cardinality
try:
card = Integer(len(self._list))
self._cardinality = card
return self._cardinality
except AttributeError:
if self in FiniteCrystals():
return Integer(len(self.list()))
try:
card = super(Subcrystal, self).cardinality()
except AttributeError:
raise NotImplementedError("unknown cardinality")
if card == infinity:
self._cardinality = card
return card
self._cardinality = Integer(len(self.list()))
return self._cardinality
def index_set(self):
"""
Return the index set of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: S.index_set()
(1, 2)
"""
return self._index_set
class Element(ElementWrapper):
"""
An element of a subcrystal. Wraps an element in the ambient crystal.
"""
def __eq__(self, other):
"""
Check sorting
EXAMPLES::
sage: A = crystals.KirillovReshetikhin(['C',2,1], 1,2).affinization()
sage: S = A.subcrystal(max_depth=2)
sage: sorted(S)
[[[1, 1]](-1),
[[1, 2]](-1),
[](0),
[[1, 1]](0),
[[1, 2]](0),
[[1, -2]](0),
[[2, 2]](0),
[](1),
[[2, -1]](1),
[[-2, -1]](1),
[[-1, -1]](1),
[[-1, -1]](2)]
"""
return parent(self) is parent(other) and self.value == other.value
def __ne__(self, other):
"""
TESTS::
sage: A = crystals.KirillovReshetikhin(['C',2,1], 1,2).affinization()
sage: S = A.subcrystal(max_depth=2)
sage: ([(i,j) for i in range(len(S)) for j in range(len(S)) if S[i]!=S[j]]
....: == [(i,j) for i in range(len(S)) for j in range(len(S)) if
....: S[i].value!=S[j].value])
True
"""
return parent(self) is not parent(other) or self.value != other.value
def __lt__(self, other):
"""
TESTS::
sage: A = crystals.KirillovReshetikhin(['C',2,1], 1,2).affinization()
sage: S = A.subcrystal(max_depth=2)
sage: ([(i,j) for i in range(len(S)) for j in range(len(S)) if S[i]<S[j]]
....: == [(i,j) for i in range(len(S)) for j in range(len(S)) if
....: S[i].value<S[j].value])
True
"""
return parent(self) is parent(other) and self.value < other.value
def __le__(self, other):
"""
TESTS::
sage: A = crystals.KirillovReshetikhin(['C',2,1], 1,2).affinization()
sage: S = A.subcrystal(max_depth=2)
sage: ([(i,j) for i in range(len(S)) for j in range(len(S)) if S[i]<=S[j]]
....: == [(i,j) for i in range(len(S)) for j in range(len(S)) if
....: S[i].value<=S[j].value])
True
"""
return parent(self) is parent(other) and self.value <= other.value
def __gt__(self, other):
"""
TESTS::
sage: A = crystals.KirillovReshetikhin(['C',2,1], 1,2).affinization()
sage: S = A.subcrystal(max_depth=2)
sage: ([(i,j) for i in range(len(S)) for j in range(len(S)) if S[i]>S[j]]
....: == [(i,j) for i in range(len(S)) for j in range(len(S)) if
....: S[i].value>S[j].value])
True
"""
return parent(self) is parent(other) and self.value > other.value
def __ge__(self, other):
"""
TESTS::
sage: A = crystals.KirillovReshetikhin(['C',2,1], 1,2).affinization()
sage: S = A.subcrystal(max_depth=2)
sage: ([(i,j) for i in range(len(S)) for j in range(len(S)) if S[i]>=S[j]]
....: == [(i,j) for i in range(len(S)) for j in range(len(S)) if
....: S[i].value>=S[j].value])
True
"""
return parent(self) is parent(other) and self.value >= other.value
def __cmp__(self, other):
"""
TESTS::
sage: A = crystals.KirillovReshetikhin(['C',2,1], 1,2).affinization()
sage: S = A.subcrystal(max_depth=2)
sage: ([(i,j,cmp(S[i],S[j])) for i in range(len(S)) for j in range(len(S))]
....: == [(i,j,cmp(S[i].value,S[j].value)) for i in range(len(S)) for j in range(len(S))])
True
"""
if parent(self) is parent(other):
return cmp(self.value, other.value)
else:
return cmp(parent(self), parent(other))
def e(self, i):
"""
Return `e_i` of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: mg = S.module_generators[1]
sage: mg.e(2)
sage: mg.e(1)
[[1, 4], [5]]
"""
ret = self.value.e(i)
if ret is None or not self.parent()._containing(ret):
return None
return self.__class__(self.parent(), ret)
def f(self, i):
"""
Return `f_i` of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: mg = S.module_generators[1]
sage: mg.f(1)
sage: mg.f(2)
[[3, 4], [5]]
"""
ret = self.value.f(i)
if ret is None or not self.parent()._containing(ret):
return None
return self.__class__(self.parent(), ret)
def epsilon(self, i):
r"""
Return `\varepsilon_i` of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: mg = S.module_generators[1]
sage: mg.epsilon(1)
1
sage: mg.epsilon(2)
0
"""
return self.value.epsilon(i)
def phi(self, i):
r"""
Return `\varphi_i` of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: mg = S.module_generators[1]
sage: mg.phi(1)
0
sage: mg.phi(2)
1
"""
return self.value.phi(i)
def weight(self):
"""
Return the weight of ``self``.
EXAMPLES::
sage: B = crystals.Tableaux(['A',4], shape=[2,1])
sage: S = B.subcrystal(generators=(B(2,1,1), B(5,2,4)), index_set=[1,2])
sage: mg = S.module_generators[1]
sage: mg.weight()
(0, 1, 0, 1, 1)
"""
return self.value.weight()
|
import configparser
import sys
from datetime import datetime
import requests
from requests.auth import HTTPBasicAuth
def help():
print('1.) configure something useful in application.properties \n'
'2.) call with prometheusChamp <QUERY> <STARTISOTIME> <ENDISOTIME> <STEPINSECONDS>\n'
' example prometheusChamp <QUERY> 2020-09-22T14:20 2020-09-23:14:20 60')
def extract(value):
return float(value[1])
def extract_list_of_values():
values = r.json()['data']['result'][0]['values']
return list(map(extract, values))
def parseIsoToUnix(param):
result = datetime.fromisoformat(param)
return str(result.timestamp())
if __name__ == '__main__':
if (len(sys.argv) == 1 or sys.argv[1] == "help"):
help()
else:
config = configparser.RawConfigParser()
config.read("application.properties")
details_config = dict(config.items("BASE"))
username = details_config['user']
password = details_config['password']
start = parseIsoToUnix(sys.argv[2])
end = parseIsoToUnix(sys.argv[3])
queryUri = details_config['prometheus_uri'] + sys.argv[1] + "&start=" + start + "&end=" + end + "&step=" + sys.argv[4]
print(queryUri)
r = requests.get(queryUri, auth=HTTPBasicAuth(username, password))
if (r.status_code == 200):
data_for_calculation = extract_list_of_values()
print("MAX", max(data_for_calculation))
print("MIN", min(data_for_calculation))
print("AVG", sum(data_for_calculation) / len(data_for_calculation))
|
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The Wazzle Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test label RPCs.
RPCs tested are:
- getaddressesbylabel
- listaddressgroupings
- setlabel
"""
from collections import defaultdict
from test_framework.test_framework import WazzleTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class WalletLabelsTest(WazzleTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Check that there's no UTXO on the node
node = self.nodes[0]
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generatetoaddress(nblocks=1, address=node.getnewaddress(label='coinbase'))
node.generatetoaddress(nblocks=101, address=node.getnewaddress(label='coinbase'))
assert_equal(node.getbalance(), 100)
# there should be 2 address groups
# each with 1 address with a balance of 50 Wazzles
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 3)
assert_equal(address_group[0][1], 50)
assert_equal(address_group[0][2], 'coinbase')
linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr"
node.sendmany(
amounts={common_address: 100},
subtractfeefrom=[common_address],
minconf=1,
)
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" label has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
amount_to_send = 1.0
# Create labels and make sure subsequent label API calls
# recognize the label/address associations.
labels = [Label(name) for name in ("a", "b", "c", "d", "e")]
for label in labels:
address = node.getnewaddress(label.name)
label.add_receive_address(address)
label.verify(node)
# Check all labels are returned by listlabels.
assert_equal(node.listlabels(), sorted(['coinbase'] + [label.name for label in labels]))
# Send a transaction to each label.
for label in labels:
node.sendtoaddress(label.addresses[0], amount_to_send)
label.verify(node)
# Check the amounts received.
node.generate(1)
for label in labels:
assert_equal(
node.getreceivedbyaddress(label.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbylabel(label.name), amount_to_send)
for i, label in enumerate(labels):
to_label = labels[(i + 1) % len(labels)]
node.sendtoaddress(to_label.addresses[0], amount_to_send)
node.generate(1)
for label in labels:
address = node.getnewaddress(label.name)
label.add_receive_address(address)
label.verify(node)
assert_equal(node.getreceivedbylabel(label.name), 2)
label.verify(node)
node.generate(101)
# Check that setlabel can assign a label to a new unused address.
for label in labels:
address = node.getnewaddress()
node.setlabel(address, label.name)
label.add_address(address)
label.verify(node)
assert_raises_rpc_error(-11, "No addresses with label", node.getaddressesbylabel, "")
# Check that addmultisigaddress can assign labels.
for label in labels:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, label.name)['address']
label.add_address(multisig_address)
label.purpose[multisig_address] = "send"
label.verify(node)
node.generate(101)
# Check that setlabel can change the label of an address from a
# different label.
change_label(node, labels[0].addresses[0], labels[0], labels[1])
# Check that setlabel can set the label of an address already
# in the label. This is a no-op.
change_label(node, labels[2].addresses[0], labels[2], labels[2])
class Label:
def __init__(self, name):
# Label name
self.name = name
# Current receiving address associated with this label.
self.receive_address = None
# List of all addresses assigned with this label
self.addresses = []
# Map of address to address purpose
self.purpose = defaultdict(lambda: "receive")
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
for address in self.addresses:
assert_equal(
node.getaddressinfo(address)['labels'][0],
{"name": self.name,
"purpose": self.purpose[address]})
assert_equal(node.getaddressinfo(address)['label'], self.name)
assert_equal(
node.getaddressesbylabel(self.name),
{address: {"purpose": self.purpose[address]} for address in self.addresses})
def change_label(node, address, old_label, new_label):
assert_equal(address in old_label.addresses, True)
node.setlabel(address, new_label.name)
old_label.addresses.remove(address)
new_label.add_address(address)
old_label.verify(node)
new_label.verify(node)
if __name__ == '__main__':
WalletLabelsTest().main()
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import functools
import os
import sys
import copy
import time
import types
import signal
import random
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
import salt.serializers.msgpack
from binascii import crc32
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
import salt.defaults.exitcodes
from salt.utils.ctx import RequestContext
# pylint: enable=no-name-in-module,redefined-builtin
import tornado
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
try:
import salt.utils.win_functions
HAS_WIN_FUNCTIONS = True
except ImportError:
HAS_WIN_FUNCTIONS = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.utils.dictupdate
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
SaltMasterUnresolvableError
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
# Since salt.log is imported below, salt.utils.network needs to be imported here as well
import salt.utils.network
if check_dns is True:
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
except SaltClientError:
retry_dns_count = opts.get('retry_dns_count', None)
if opts['retry_dns']:
while True:
if retry_dns_count is not None:
if retry_dns_count == 0:
raise SaltMasterUnresolvableError
retry_dns_count -= 1
import salt.log
msg = ('Master hostname: \'{0}\' not found or not responsive. '
'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning(
'Master ip address changed from %s to %s',
opts['master_ip'], ret['master_ip']
)
if opts['source_interface_name']:
log.trace('Custom source interface required: %s', opts['source_interface_name'])
interfaces = salt.utils.network.interfaces()
log.trace('The following interfaces are available on this Minion:')
log.trace(interfaces)
if opts['source_interface_name'] in interfaces:
if interfaces[opts['source_interface_name']]['up']:
addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\
interfaces[opts['source_interface_name']]['inet6']
ret['source_ip'] = addrs[0]['address']
log.debug('Using %s as source IP address', ret['source_ip'])
else:
log.warning('The interface %s is down so it cannot be used as source to connect to the Master',
opts['source_interface_name'])
else:
log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name'])
elif opts['source_address']:
ret['source_ip'] = salt.utils.network.dns_check(
opts['source_address'],
int(opts['source_ret_port']),
True,
opts['ipv6'])
log.debug('Using %s as source IP address', ret['source_ip'])
if opts['source_ret_port']:
ret['source_ret_port'] = int(opts['source_ret_port'])
log.debug('Using %d as source port for the ret server', ret['source_ret_port'])
if opts['source_publish_port']:
ret['source_publish_port'] = int(opts['source_publish_port'])
log.debug('Using %d as source port for the master pub', ret['source_publish_port'])
ret['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=ret['master_ip'], port=opts['master_port'])
log.debug('Master URI: %s', ret['master_uri'])
return ret
def prep_ip_port(opts):
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
if opts['master_uri_format'] == 'ip_only' or salt.utils.network.is_ipv6(opts['master']):
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(':', 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: [::1]:1234
# Strip off brackets for ipv6 support
ret['master'] = ip_port[0].strip('[]')
# Cast port back to an int! Otherwise a TypeError is thrown
# on some of the socket calls elsewhere in the minion and utils code.
ret['master_port'] = int(ip_port[1])
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
# Check for valid types
if not isinstance(opts['master'], (six.string_types, list)):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module %s', mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('%s returned from %s is not a string', opts['master'], mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: %s', mod_fun)
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
def service_name():
'''
Return the proper service name based on platform
'''
return 'salt_minion' if 'bsd' in sys.platform else 'salt-minion'
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters.
self._discover_masters()
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts['master_type'] in ('failover', 'distributed'):
if isinstance(opts['master'], list):
log.info(
'Got list of available master addresses: %s',
opts['master']
)
if opts['master_type'] == 'distributed':
master_len = len(opts['master'])
if master_len > 1:
secondary_masters = opts['master'][1:]
master_idx = crc32(opts['id']) % master_len
try:
preferred_masters = opts['master']
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'{0}\'.'.format(opts['master'][0]))
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
log.warning('master_type = distributed needs more than 1 master.')
if opts['master_shuffle']:
log.warning(
'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor '
'of \'random_master\'. Please update your minion config file.'
)
opts['random_master'] = opts['master_shuffle']
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info(
'Moving possibly failed master %s to the end of '
'the list of masters', opts['master']
)
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns'] and opts['master_type'] == 'failover':
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
last_exc = None
opts['master_uri_list'] = []
opts['local_masters'] = copy.copy(opts['master'])
# shuffle the masters and then loop through them
if opts['random_master']:
# master_failback is only used when master_type is set to failover
if opts['master_type'] == 'failover' and opts['master_failback']:
secondary_masters = opts['local_masters'][1:]
shuffle(secondary_masters)
opts['local_masters'][1:] = secondary_masters
else:
shuffle(opts['local_masters'])
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
if exc.strerror.startswith('Could not access'):
msg = (
'Failed to initiate connection with Master '
'%s: check ownership/permissions. Error '
'message: %s', opts['master'], exc
)
else:
msg = ('Master %s could not be reached, trying next '
'next master (if any)', opts['master'])
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
log.error(
'No master could be reached or all masters '
'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
def _discover_masters(self):
'''
Discover master(s) and decide where to connect, if SSDP is around.
This modifies the configuration on the fly.
:return:
'''
if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False:
master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient()
masters = {}
for att in range(self.opts['discovery'].get('attempts', 3)):
try:
att += 1
log.info('Attempting {0} time{1} to discover masters'.format(att, (att > 1 and 's' or '')))
masters.update(master_discovery_client.discover())
if not masters:
time.sleep(self.opts['discovery'].get('pause', 5))
else:
break
except Exception as err:
log.error('SSDP discovery failure: {0}'.format(err))
break
if masters:
policy = self.opts.get('discovery', {}).get('match', 'any')
if policy not in ['any', 'all']:
log.error('SSDP configuration matcher failure: unknown value "{0}". '
'Should be "any" or "all"'.format(policy))
else:
mapping = self.opts['discovery'].get('mapping', {})
for addr, mappings in masters.items():
for proto_data in mappings:
cnt = len([key for key, value in mapping.items()
if proto_data.get('mapping', {}).get(key) == value])
if policy == 'any' and bool(cnt) or cnt == len(mapping):
self.opts['master'] = proto_data['master']
return
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
# future lint: disable=str-format-in-logging
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: %s or '
'return_retry_timer_max: %s). Both must be positive '
'integers.',
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
# future lint: enable=str-format-in-logging
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
import salt.loader
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
install_zmq()
io_loop = ZMQDefaultLoop.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import salt.utils.yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['saltenv'] is not None:
penv = self.opts['saltenv']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.files.fopen(ptop, 'wb') as fp_:
salt.utils.yaml.safe_dump(cache_top, fp_)
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.files.fopen(cache_sls, 'wb') as fp_:
salt.utils.yaml.safe_dump(self.opts['pillar'], fp_)
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
# self.matcher = Matcher(self.opts, self.functions)
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(
opts['conf_file'],
ignore_config_errors=ignore_config_errors,
role='master'
)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _check_minions(self):
'''
Check the size of self.minions and raise an error if it's empty
'''
if not self.minions:
err = ('Minion unable to successfully connect to '
'a Salt Master. Exiting.')
log.error(err)
raise SaltSystemExit(code=42, msg=err)
def _spawn_minions(self, timeout=60):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts['master']
if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue,
)
self._connect_minion(minion)
self.io_loop.call_later(timeout, self._check_minions)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
failed = False
while True:
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
self.minions.append(minion)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up minion for multi-master. Is '
'master at %s responding?', minion.opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except SaltMasterUnresolvableError:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(minion.opts['master'])
log.error(err)
break
except Exception as e:
failed = True
log.critical(
'Unexpected error while connecting to %s',
minion.opts['master'], exc_info=True
)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = [] if jid_queue is None else jid_queue
self.periodic_callbacks = {}
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if zmq:
if ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.platform.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager')
if self.opts['random_startup_delay']:
sleep_time = random.randint(0, self.opts['random_startup_delay'])
log.info(
'Minion sleeping for %s seconds due to configured '
'startup_delay between 0 and %s seconds',
sleep_time, self.opts['random_startup_delay']
)
time.sleep(sleep_time)
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True})
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.platform.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None, failed=False):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master(failed=failed)
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
if self._connect_master_future.done():
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
'modules_max_memory set, enforcing a maximum of %s',
self.opts['modules_max_memory']
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts, proxy=proxy)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions, proxy=proxy)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions, proxy=proxy)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
# very likely one of the masters is dead, status.master will flush it
self.functions['status.master'](self.opts['master'])
return False
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
return False
else:
if timeout_handler is None:
def handle_timeout(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
# very likely one of the masters is dead, status.master will flush it
self.functions['status.master'](self.opts['master'])
return True
timeout_handler = handle_timeout
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
if 'user' in data:
log.info(
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
# Don't duplicate jobs
log.trace('Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warning("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''
Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
def run_func(minion_instance, opts, data):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
return Minion._thread_multi_return(minion_instance, opts, data)
else:
return Minion._thread_return(minion_instance, opts, data)
with tornado.stack_context.StackContext(functools.partial(RequestContext,
{'data': data, 'opts': opts})):
with tornado.stack_context.StackContext(minion_instance.ctx):
run_func(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
executors = data.get('module_executors') or \
getattr(minion_instance, 'module_executors', []) or \
opts.get('module_executors', ['direct_call'])
allow_missing_funcs = any([
minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
for executor in executors
if '{0}.allow_missing_func' in minion_instance.executors
])
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data['arg'], data
minion_instance.functions.pack['__context__']['retcode'] = 0
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo' # replace the last one with sudo
log.trace('Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(return_data.get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret['retcode'] = retcode
ret['success'] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
function_name, exc, func.__doc__ or ''
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
else:
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
log.debug('minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
'The return failed for job %s: %s', data['jid'], exc
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
key = ind if multifunc_ordered else data['fun'][ind]
ret['return'][key] = func(*args, **kwargs)
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(ret['return'][key].get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
ret['retcode'][key] = retcode
ret['success'][key] = retcode == 0
except Exception as exc:
trb = traceback.format_exc()
log.warning('The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job %s: %s',
data['jid'], exc
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
log.trace('Return data: %s', ret)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['uid'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
if ret['jid'] == 'req':
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
if not isinstance(rets, list):
rets = [rets]
jids = {}
for ret in rets:
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
load = jids.setdefault(jid, {})
if ret_cmd == '_syndic_return':
if not load:
load.update({'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__'),
'return': {}})
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load.update({'id': self.opts['id']})
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
load = {'cmd': ret_cmd,
'load': list(six.itervalues(jids))}
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning(
'Cannot run startup_states when \'master_type\' is set '
'to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.'
)
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to False in Neon Salt release
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# send name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify=%s', notify)
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def beacons_refresh(self):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing beacons.')
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def matchers_refresh(self):
'''
Refresh the matchers
'''
log.debug('Refreshing matchers.')
self.matchers = salt.loader.matchers(self.opts)
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist)
elif func == 'postpone_job':
self.schedule.postpone_job(name, data)
elif func == 'skip_job':
self.schedule.skip_job(name, data)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
elif func == 'get_next_fire_time':
self.schedule.get_next_fire_time(name)
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
include_pillar = data.get('include_pillar', None)
include_opts = data.get('include_opts', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons(include_opts, include_pillar)
elif func == 'list_available':
self.beacons.list_available_beacons()
elif func == 'validate_beacon':
self.beacons.validate_beacon(name, beacon_data)
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This %s was scheduled to stop. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
elif self._running is True:
log.error(
'This %s is already running. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
try:
log.info(
'%s is starting as user \'%s\'',
self.__class__.__name__, salt.utils.user.get_user()
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting %s',
self.__class__.__name__,
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug(
'Minion of \'%s\' is handling event tag \'%s\'',
self.opts['master'], tag
)
if tag.startswith('module_refresh'):
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
elif tag.startswith('pillar_refresh'):
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False)
)
elif tag.startswith('beacons_refresh'):
self.beacons_refresh()
elif tag.startswith('matchers_refresh'):
self.matchers_refresh()
elif tag.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith('grains_refresh'):
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif tag.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith('_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith('fire_master'):
if self.connected:
log.debug('Forwarding master event tag=%s', data['tag'])
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
log.info('Connection to master %s lost', self.opts['master'])
# we can't use the config default here because the default '0' value is overloaded
# to mean 'if 0 disable the job', but when salt detects a timeout it also sets up
# these jobs
master_alive_interval = self.opts['master_alive_interval'] or 60
if self.opts['master_type'] != 'failover':
# modify the scheduled job to fire on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
# delete the scheduled job to don't interfere with the failover process
if self.opts['transport'] != 'tcp':
self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']),
persist=True)
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info(
'Re-initialising subsystems for new master %s',
self.opts['master']
)
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith(master_event(type='connected')):
# handle this event only once. otherwise it will pollute the log
# also if master type is failover all the reconnection work is done
# by `disconnected` event handler and this event must never happen,
# anyway check it to be sure
if not self.connected and self.opts['master_type'] != 'failover':
log.info('Connection to master %s re-established', self.opts['master'])
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
if self.opts['master_alive_interval'] > 0:
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True)
elif tag.startswith('__schedule_return'):
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug(
'Connected to master %s',
data['schedule'].split(master_event(type='alive', master=''))[1]
)
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
log.debug('Forwarding salt error event tag=%s', tag)
self._fire_master(data, tag)
elif tag.startswith('salt/auth/creds'):
key = tuple(data['key'])
log.debug(
'Updating auth data for %s: %s -> %s',
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']
)
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
# self.matcher = Matcher(self.opts, self.functions)
self.matchers = salt.loader.matchers(self.opts)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(
handle_beacons, loop_interval * 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
utils=self.utils,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'%s\' trying to tune in', self.opts['id'])
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
if HAS_WIN_FUNCTIONS:
salt.utils.win_functions.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
self.setup_beacons()
self.setup_scheduler()
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
def ping_timeout_handler(*_):
if self.opts.get('auth_safemode', False):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)
try:
self.functions['service.restart'](service_name())
except KeyError:
# Probably no init system (running in docker?)
log.warning(
'ping_interval reached without response '
'from the master, but service.restart '
'could not be run to restart the minion '
'daemon. ping_interval requires that the '
'minion is running under an init system.'
)
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
'Broadcast message received not for this minion, Load: %s',
payload['load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matchers['glob_match.match'](load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to false in Neon Salt release.
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
failed = False
while True:
log.debug(
'Syndic attempting to connect to %s',
opts['master']
)
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info(
'Syndic successfully connected to %s',
opts['master']
)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up syndic for multi-syndic. Is the '
'master at %s responding?', opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
failed = True
log.critical(
'Unexpected error while connecting to %s',
opts['master'], exc_info=True
)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
# TODO: debug?
log.info(
'Attempting to mark %s as dead, although it is already '
'marked dead', master
)
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
if not successful:
log.critical('Unable to call %s on any masters!', func)
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id'])
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event %s', mtag) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self._return_retry_timer(),
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = list(six.itervalues(self.job_rets[master]))
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
log.debug("subclassed _post_master_init")
if self.connected:
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar or opts for id ' + self.opts['id'] + '. ' + \
'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
if self.opts.get('proxy_merge_pillar_in_opts'):
# Override proxy opts with pillar data when the user required.
self.opts = salt.utils.dictupdate.merge(self.opts,
self.opts['pillar'],
strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'),
merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False))
elif self.opts.get('proxy_mines_pillar'):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
if 'mine_interval' in self.opts['pillar']:
self.opts['mine_interval'] = self.opts['pillar']['mine_interval']
if 'mine_functions' in self.opts['pillar']:
general_proxy_mines = self.opts.get('mine_functions', [])
specific_proxy_mines = self.opts['pillar']['mine_functions']
try:
self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
self.opts['id']))
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matchers = salt.loader.matchers(self.opts)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
if self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')],
proxy=self.proxy)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
# proxy keepalive
proxy_alive_fn = fq_proxyname+'.alive'
if (proxy_alive_fn in self.proxy
and 'status.proxy_reconnect' in self.functions
and self.opts.get('proxy_keep_alive', True)):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job({
'__proxy_keepalive':
{
'function': 'status.proxy_reconnect',
'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {
'proxy_name': fq_proxyname
}
}
}, persist=True)
self.schedule.enable_schedule()
else:
self.schedule.delete_job('__proxy_keepalive', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
# Need to load the modules so they get all the dunder variables
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
# Pull in the utils
minion_instance.utils = salt.loader.utils(minion_instance.opts)
# Then load the proxy module
minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils)
# And re-load the modules so the __proxy__ variable gets injected
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
minion_instance.functions.pack['__proxy__'] = minion_instance.proxy
minion_instance.proxy.pack['__salt__'] = minion_instance.functions
minion_instance.proxy.pack['__ret__'] = minion_instance.returners
minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy)
minion_instance.proxy.pack['__utils__'] = minion_instance.utils
# Reload all modules so all dunder variables are injected
minion_instance.proxy.reload_modules()
fq_proxyname = opts['proxy']['proxytype']
minion_instance.module_executors = minion_instance.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = minion_instance.proxy[fq_proxyname + '.init']
proxy_init_fn(opts)
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
class SProxyMinion(SMinion):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SProxyMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['grains'] = salt.loader.grains(self.opts)
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = (
'No "proxy" configuration key found in pillar or opts '
'dictionaries for id {id}. Check your pillar/options '
'configuration and contents. Salt-proxy aborted.'
).format(id=self.opts['id'])
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy)
fq_proxyname = self.opts['proxy']['proxytype']
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
|
from xevo import evo
import numpy as np
import time
import random
from trivmute import *
class trivmutetraj(trivmute):
"""trivmute, but for traj applications"""
def __init__(s,forget=0):
s.initial()
s.forget=forget
def generation(s)->None:
os=s.q.strength()
n=s.q.mutate()
ns=n.strength()
if (ns<=os) != n.shallmaximize():
s.q=n
else:s.forget+=1
def _copy(s):
return trivmutetraj(forget=s.forget)
def __str__(s):
return "trivmutetraj in "+str(s.forget)
def __repr__(s):
return str(s)
|
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_dict
from delphin_6_automation.database_interactions.db_templates import sample_entry, delphin_entry
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
# RiBuild Modules
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
# Logger
logger = ribuild_logger(__name__)
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def get_bare():
ids = delphin_entry.Delphin.objects(sample_data__design_option__name__in=['1d_bare'])
print('IDS', ids.count())
return ids
def check_ids(project):
for project in project:
sample = sample_entry.Sample.objects(delphin_docs=project).first()
print(f"Project: {project.id} with sequence {project.sample_data.get('sequence')} is in sample iteration: {sample.iteration}. Data: {project.sample_data.get('exterior_climate')}")
if __name__ == '__main__':
server = mongo_setup.global_init(auth_dict)
ids = get_bare()
check_ids(ids)
mongo_setup.global_end_ssh(server)
|
import argparse
import csv
import sys
from functools import partial
import tensorflow as tf
import tensorflow_text as text
import yaml
from ..configs import DataConfig, get_model_config
from ..data import delta_accelerate, load_audio_file
from ..models import LAS, DeepSpeech2
from ..search import DeepSpeechSearcher, LAS_Searcher
from ..utils import get_device_strategy, get_logger
# fmt: off
parser = argparse.ArgumentParser("This is script to inferece (generate sentence) with seq2seq model")
parser.add_argument("--data-config", type=str, required=True, help="data processing config file")
parser.add_argument("--model-config", type=str, required=True, help="model config file")
parser.add_argument("--audio-files", required=True, help="an audio file or glob pattern of multiple files ex) *.pcm")
parser.add_argument("--model-path", type=str, required=True, help="pretrained model checkpoint")
parser.add_argument("--output-path", default="output.tsv", help="output tsv file path to save generated sentences")
parser.add_argument("--sp-model-path", type=str, required=True, help="sentencepiece model path")
parser.add_argument("--batch-size", type=int, default=512)
parser.add_argument("--beam-size", type=int, default=0, help="not given, use greedy search else beam search with this value as beam size")
parser.add_argument("--mixed-precision", action="store_true", help="Use mixed precision FP16")
parser.add_argument("--device", type=str, default="CPU", help="device to train model")
# fmt: on
def main(args: argparse.Namespace):
strategy = get_device_strategy(args.device)
logger = get_logger("inference")
if args.mixed_precision:
mixed_type = "mixed_bfloat16" if args.device == "TPU" else "mixed_float16"
policy = tf.keras.mixed_precision.experimental.Policy(mixed_type)
tf.keras.mixed_precision.experimental.set_policy(policy)
logger.info("[+] Use Mixed Precision FP16")
# Construct Dataset
with tf.io.gfile.GFile(args.sp_model_path, "rb") as f:
tokenizer = text.SentencepieceTokenizer(f.read(), add_bos=True, add_eos=True)
bos_id, eos_id = tokenizer.tokenize("").numpy().tolist()
dataset_files = sorted(tf.io.gfile.glob(args.audio_files))
if not dataset_files:
logger.error("[Error] Dataset path is invalid!")
sys.exit(1)
# Load Config
logger.info(f"Load Data Config from {args.data_config}")
config = DataConfig.from_yaml(args.data_config)
with strategy.scope():
dataset = (
tf.data.Dataset.from_tensor_slices(dataset_files)
.map(load_audio_file(config.sample_rate, config.file_format, config.sample_rate))
.map(config.audio_feature_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
)
# Delta Accelerate
if config.use_delta_accelerate:
logger.info("[+] Use delta and deltas accelerate")
dataset = dataset.map(delta_accelerate)
dataset = dataset.padded_batch(args.batch_size, [None, config.frequency_dim, config.feature_dim]).prefetch(
tf.data.experimental.AUTOTUNE
)
# Model Initialize & Load pretrained model
model_config = get_model_config(args.model_config)
model = model_config.create_model()
model_input, _ = model.make_example(
tf.keras.Input([None, config.frequency_dim, config.feature_dim], dtype=tf.float32),
tf.keras.Input([None], dtype=tf.int32),
)
model(model_input)
tf.train.Checkpoint(model).restore(args.model_path).expect_partial()
logger.info(f"Loaded weights of model from {args.model_path}")
model.summary()
if isinstance(model, LAS):
searcher = LAS_Searcher(model, config.max_token_length, bos_id, eos_id, model_config.pad_id)
elif isinstance(model, DeepSpeech2):
searcher = DeepSpeechSearcher(model, model_config.blank_index)
# Inference
logger.info("Start Inference")
outputs = []
for batch_input in dataset:
if args.beam_size > 0:
batch_output = searcher.beam_search(batch_input, args.beam_size)
batch_output = batch_output[0][:, 0, :].numpy()
else:
batch_output = searcher.greedy_search(batch_input)[0].numpy()
outputs.extend(batch_output)
outputs = [tokenizer.detokenize(output).numpy().decode("UTF8") for output in outputs]
logger.info("Ended Inference, Start to save...")
# Save file
with open(args.output_path, "w") as fout:
wtr = csv.writer(fout, delimiter="\t")
wtr.writerow(["AudioPath", "DecodedSentence"])
for audio_path, decoded_sentence in zip(dataset_files, outputs):
wtr.writerow((audio_path, decoded_sentence))
logger.info(f"Saved (audio path,decoded sentence) pairs to {args.output_path}")
if __name__ == "__main__":
sys.exit(main(parser.parse_args()))
|
"""EML type utils
This module works directly with EML XML objects in the lxml.etree domain, and so can
be used without having an `rid`.
"""
import csv
import datetime
import enum
import logging
import re
import dex.exc
# This module should not require cache access and so, should not import `dex.eml_cache`
# or `dex.cache`.
log = logging.getLogger(__name__)
# Default start and end datetimes used in the UI if the EML lists one or more datetime
# columns, but no datetime ranges.
FALLBACK_BEGIN_DATETIME = datetime.datetime(2000, 1, 1)
FALLBACK_END_DATETIME = datetime.datetime.now()
DEFAULT_HEADER_LINE_COUNT = 0
DEFAULT_FOOTER_LINE_COUNT = 0
DEFAULT_RECORD_DELIMITER = r'\n'
DEFAULT_FIELD_DELIMITER = ','
DEFAULT_QUOTE_CHARACTER = '"'
class PandasType(str, enum.Enum):
FLOAT = 'FLOAT' # enum.auto()
INT = 'INT' # enum.auto()
CATEGORY = 'CATEGORY' # enum.auto()
DATETIME = 'DATETIME' # enum.auto()
STRING = 'STRING' # enum.auto()
PANDAS_TO_FRIENDLY_DICT = {
PandasType.FLOAT: 'Numeric',
PandasType.INT: 'Numeric',
PandasType.CATEGORY: "Categorical",
PandasType.DATETIME: "Time series",
PandasType.STRING: "Generic",
}
ISO8601_TO_CTIME_DICT = {
# %a
# Weekday as locale’s abbreviated name.
# Sun, Mon, …, Sat (en_US);
'DDD': '%a',
'w': '%a',
# %A
# Weekday as locale’s full name.
# Sunday, Monday, …, Saturday (en_US);
'W': '%a',
# %w
# Weekday as a decimal number, where 0 is Sunday and 6 is Saturday.
# 0, 1, …, 6
'WW': '%w',
# %d
# Day of the month as a zero-padded decimal number.
# 01, 02, …, 31
'DD': '%d',
'dd': '%d',
# %b
# Month as locale’s abbreviated name.
# Jan, Feb, …, Dec (en_US);
'MON': '%b',
'mon': '%b',
# %B
# Month as locale’s full name.
# January, February, …, December (en_US);
# %y
# Year without century as a zero-padded decimal number.
# 00, 01, …, 99
'YY': '%y',
'yy': '%y',
# %Y
# Year with century as a decimal number.
# 0001, 0002, …, 2013, 2014, …, 9998, 9999
'YYYY': '%Y',
'yyyy': '%Y',
# %H
# Hour (24-hour clock) as a zero-padded decimal number.
# 00, 01, …, 23
'HH': '%H',
'HH24': '%H',
'hh': '%H',
'h': '%H',
# %I
# Hour (12-hour clock) as a zero-padded decimal number.
# 01, 02, …, 12
# %p
# Locale’s equivalent of either AM or PM.
# AM, PM (en_US);
# %M
# Minute as a zero-padded decimal number.
# 00, 01, …, 59
'MM': '%m',
'mm': '%M',
'm': '%M',
# %S
# Second as a zero-padded decimal number.
# 00, 01, …, 59
'SS': '%S',
'ss': '%S',
's': '%M',
# %f
# Microsecond as a decimal number, zero-padded on the left.
# 000000, 000001, …, 999999
#
# %z
# UTC offset in the form ±HHMM[SS[.ffffff]] (empty string if the object is naive).
# (empty), +0000, -0400, +1030, +063415, -030712.345216
#
# %Z
# Time zone name (empty string if the object is naive).
# (empty), UTC, GMT
#
# %j
# Day of the year as a zero-padded decimal number.
# 001, 002, …, 366
'DDDD': '%j',
# %U
# Week number of the year (Sunday as the first day of the week) as a zero padded decimal number. All days in a new year preceding the first Sunday are considered to be in week 0.
# 00, 01, …, 53
#
# %W
# Week number of the year (Monday as the first day of the week) as a decimal number. All days in a new year preceding the first Monday are considered to be in week 0.
# 00, 01, …, 53
#
# %c
# Locale’s appropriate date and time representation.
# Tue Aug 16 21:30:00 1988 (en_US);
#
# %x
# Locale’s appropriate date representation.
# 08/16/1988 (en_US);
#
# Locale’s appropriate time representation.
# 21:30:00 (en_US);
# %m
# Month as a zero-padded decimal number.
# 01, 02, …, 12
}
def iso8601_to_c_format(iso_str):
"""Convert an ISO8601-style datetime format spec, as used in EML, to a C-style
format spec, as used by the Python datetime formatting functions.
"""
c_list = []
for x in re.split(
'(\W|integer|alphabetic|[T]|MST|YYYY|YY|MM|DDD|DD|HH|HH24|SS|mon)',
iso_str,
flags=re.IGNORECASE,
):
if x:
c_str = ISO8601_TO_CTIME_DICT.get(x, x)
c_list.append(c_str)
# log.debug(f'Translating datetime format string. ISO8601="{x}" C="{c_str}"')
c_fmt_str = ''.join(c_list)
log.debug(f'Translated datetime format string. ISO8601="{iso_str}" C="{c_fmt_str}"')
return c_fmt_str
def get_dialect(dt_el):
text_format_el = first(dt_el, './/physical/dataFormat/textFormat')
# https://docs.python.org/3/library/csv.html#csv.Dialect
class Dialect(csv.Dialect):
delimiter = first_str(text_format_el, 'fieldDelimiter', DEFAULT_FIELD_DELIMITER)
doublequote = True
escapechar = None
lineterminator = first_str(text_format_el, 'recordDelimiter', DEFAULT_RECORD_DELIMITER)
quotechar = first_str(text_format_el, 'quoteCharacter', DEFAULT_QUOTE_CHARACTER)
quoting = csv.QUOTE_MINIMAL
skipinitialspace = True
strict = False
return Dialect
def get_header_line_count(dt_el):
return first_int(
dt_el,
'.//physical/dataFormat/textFormat/numHeaderLines/text()',
DEFAULT_HEADER_LINE_COUNT,
)
def get_footer_line_count(dt_el):
return first_int(
dt_el,
'.//physical/dataFormat/textFormat/numFooterLines/text()',
DEFAULT_FOOTER_LINE_COUNT,
)
def get_col_attr_list(dt_el):
"""Get set of column attributes for a CSV file."""
col_attr_list = []
# Iterate over 'attribute' elements, one for each column
attr_list = list(dt_el.xpath('.//attributeList/attribute'))
for col_idx, attr_el in enumerate(attr_list):
col_name = first_str_orig(attr_el, './/attributeName/text()')
pandas_type = derive_pandas_type(attr_el)
date_fmt_str = None
c_date_fmt_str = None
if pandas_type == PandasType.DATETIME:
date_fmt_str = get_date_fmt_str(attr_el, col_name)
if date_fmt_str:
c_date_fmt_str = iso8601_to_c_format(date_fmt_str)
else:
pandas_type = PandasType.STRING
col_attr_list.append(
dict(
col_idx=col_idx,
col_name=col_name,
pandas_type=pandas_type,
date_fmt_str=date_fmt_str,
c_date_fmt_str=c_date_fmt_str,
missing_code_list=multiple_str(attr_el, './/missingValueCode/code/text()'),
)
)
return col_attr_list
def derive_pandas_type(attr_el):
"""Heuristics to find a Pandas / NumPy type, to use when processing a CSV file that has EML based type declarations.
Pandas supports a basic set of types, while EML supports much more complex type
declarations and descriptions. The columns for which we are able to derive a dtype
are supported with additional functionality in other areas of of the app.
Pandas dtype Python type NumPy type Usage
object str or mixed string_, unicode_, mixed types Text or mixed numeric and non-numeric values
int64 int int_, int8, int16, int32, int64, uint8, uint16, uint32, uint64 Integer numbers
float64 float float_, float16, float32, float64 Floating point numbers
bool bool bool_ True/False values
datetime64 datetime datetime64[ns] Date and time values
timedelta[ns] NA NA Differences between two datetimes
category NA NA Finite list of text values
"""
if has_el(attr_el, 'enumeratedDomain'):
return PandasType.CATEGORY
if has_el(attr_el, 'dateTime'):
return PandasType.DATETIME
if has_el(attr_el, 'numericDomain'):
number_type = first_str(attr_el, './/numberType/text()')
if number_type in ('real', 'natural'):
return PandasType.FLOAT
if number_type in ('integer', 'whole'):
return PandasType.INT
return PandasType.STRING
# String is our fallback type.
return PandasType.STRING
# if has_el(el, 'nonNumericDomain'):
# return 'type_'
# if has_el(attr_el, 'textDomain'):
def get_date_fmt_str(attr_el, col_name):
date_fmt_str = first_str(attr_el, './/measurementScale/dateTime/formatString/text()')
if date_fmt_str:
return date_fmt_str
if col_name.upper() == 'YEAR':
return 'YYYY'
def is_numeric(pandas_type):
return pandas_type in (PandasType.FLOAT, PandasType.INT)
def has_el(el, el_name):
"""Return True if an element with a given name exists in the branch rooted at el"""
return True if el.xpath(f'.//{el_name}') else False
def first(el, xpath):
"""Return the first match to the xpath if there was a match, else None. Can this be
done directly in xpath 1.0?
"""
# log.debug(f'first() xpath={xpath} ...')
res_el = el.xpath(f'({xpath})[1]')
try:
el = res_el[0]
except IndexError:
el = None
# log.debug(f'first() -> {el}')
return el
def first_str(el, text_xpath, default_val=None):
"""Apply xpath and, if there is a match, assume that the match is a text node, and
convert it to str.
{text_xpath} is an xpath that returns a text node. E.g., `.//text()`.
"""
res_el = first(el, text_xpath)
if res_el is None:
return default_val
return str(res_el).strip()
# try:
# except (ValueError, TypeError, AttributeError):
# return default_val
def first_str_orig(el, text_xpath, default_val=None):
try:
return str(first(el, text_xpath)).strip()
except (ValueError, TypeError, AttributeError):
return default_val
def first_int(el, text_xpath, default_int=None):
try:
# int() includes an implicit strip().
return int(first(el, text_xpath))
except (ValueError, TypeError, AttributeError):
return default_int
def multiple_str(el, text_xpath):
el = el.xpath(text_xpath)
return [str(x) for x in el] if el else []
def has(el, xpath):
return first(el, xpath) is not None
def get_data_table_list(root_el):
"""Return list of dataTable elements in EML doc"""
if not root_el:
return []
return root_el.xpath('.//dataset/dataTable')
def get_data_table_by_data_url(el, data_url):
for dt_el in el.xpath('.//dataset/dataTable'):
url = first_str(dt_el, './/physical/distribution/online/url/text()')
url = url[url.find('/PACKAGE/') :]
if url == data_url.as_posix():
return dt_el
raise dex.exc.EMLError(f'Missing DataTable in EML. data_url="{data_url}"')
def get_data_table_by_package_id(el, pkg_path):
for dt_el in el.xpath('.//dataset/dataTable'):
url = first_str(dt_el, './/physical/distribution/online/url/text()')
# log.debug(f'{url}')
# log.debug(f'{pkg_path}')
if url and url.lower().endswith(pkg_path):
return dt_el
raise dex.exc.EMLError(f'Missing DataTable in EML. pkg_path="{pkg_path}"')
|
# Generated by Django 2.2.4 on 2019-09-02 18:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("bikesharing", "0011_auto_20190911_1936"),
]
operations = [
migrations.AlterModelOptions(
name="location", options={"get_latest_by": "reported_at"},
),
migrations.RemoveField(model_name="bike", name="current_position",),
]
|
import django_heroku
import dj_database_url
from meridien.settings.base import *
# Static files
STATIC_ROOT = 'static'
# Security
DEBUG = False
SECRET_KEY = os.getenv('MERIDIEN_SECRET_KEY')
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = dict()
DATABASES['default'] = dj_database_url.config(conn_max_age=600, ssl_require=True)
# CORS settings
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
os.getenv('MERIDIEN_FRONT_END_DOMAIN'),
)
# Email settings
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = os.getenv('MERIDIEN_EMAIL_HOST')
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = os.getenv('MERIDIEN_EMAIL_USER')
EMAIL_HOST_PASSWORD = os.getenv('MERIDIEN_EMAIL_PASSWORD')
# Communication settings
ALLOWED_HOSTS += [
os.getenv('MERIDIEN_DOMAIN')
]
CSRF_COOKIE_SECURE = True
FRONT_END_DOMAIN = os.getenv('MERIDIEN_FRONT_END_DOMAIN')
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_HSTS_SECONDS = 60
SECURE_REFERRER_POLICY = 'same-origin'
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
# Activate Django-Heroku.
django_heroku.settings(locals())
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import time
import warnings
import torch
import mmcv
from mmcv.runner.base_runner import BaseRunner
from mmcv.runner.epoch_based_runner import EpochBasedRunner
from mmcv.runner.builder import RUNNERS
from mmcv.runner.checkpoint import save_checkpoint
from mmcv.runner.utils import get_host_info
import pandas as pd
@RUNNERS.register_module()
class KDBasedRunnerReadIter(BaseRunner):
"""KD-based Runner.
This runner train models epoch by epoch. For each epoch, the runner feed in the teacher model.
"""
def __init__(self,
model,
file_log_min_epochs="../../scratch/dso/openss/work_dirs/min_loss_epochs.csv",
batch_processor=None,
optimizer=None,
work_dir=None,
logger=None,
meta=None,
max_iters=None,
max_epochs=None
):
super(KDBasedRunnerReadIter, self).__init__(
model,
batch_processor,
optimizer,
work_dir,
logger,
meta,
max_iters,
max_epochs)
# self.t_model = t_model
self.log_min_epochs = None
self.file_log_min_epochs = file_log_min_epochs
self.tensor_epoch = torch.tensor([0]).cuda()
def run_iter(self, data_batch, train_mode, **kwargs):
if self.batch_processor is not None:
outputs = self.batch_processor(
self.model, data_batch, train_mode=train_mode, **kwargs)
elif train_mode:
outputs = self.model.train_step(data_batch, self.optimizer, self.log_min_epochs,
self.tensor_epoch, **kwargs)
else:
outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)
if not isinstance(outputs, dict):
raise TypeError('"batch_processor()" or "model.train_step()"'
'and "model.val_step()" must return a dict')
if 'log_vars' in outputs:
self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
self.outputs = outputs
def train(self, data_loader, **kwargs):
self.model.train()
self.mode = 'train'
self.data_loader = data_loader
self._max_iters = self._max_epochs * len(self.data_loader)
self.call_hook('before_train_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
for i, data_batch in enumerate(self.data_loader):
self._inner_iter = i
self.call_hook('before_train_iter')
self.run_iter(data_batch, train_mode=True, **kwargs)
self.call_hook('after_train_iter')
self._iter += 1
self.call_hook('after_train_epoch')
self._epoch += 1
self.tensor_epoch[0] += 1
@torch.no_grad()
def val(self, data_loader, **kwargs):
self.model.eval()
self.mode = 'val'
self.data_loader = data_loader
self.call_hook('before_val_epoch')
time.sleep(2) # Prevent possible deadlock during epoch transition
for i, data_batch in enumerate(self.data_loader):
self._inner_iter = i
self.call_hook('before_val_iter')
self.run_iter(data_batch, train_mode=False)
self.call_hook('after_val_iter')
self.call_hook('after_val_epoch')
def run(self, data_loaders, workflow, max_epochs=None, **kwargs):
"""Start running.
Args:
data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
and validation.
workflow (list[tuple]): A list of (phase, epochs) to specify the
running order and epochs. E.g, [('train', 2), ('val', 1)] means
running 2 epochs for training and 1 epoch for validation,
iteratively.
"""
assert isinstance(data_loaders, list)
assert mmcv.is_list_of(workflow, tuple)
assert len(data_loaders) == len(workflow)
if max_epochs is not None:
warnings.warn(
'setting max_epochs in run is deprecated, '
'please set max_epochs in runner_config', DeprecationWarning)
self._max_epochs = max_epochs
assert self._max_epochs is not None, (
'max_epochs must be specified during instantiation')
for i, flow in enumerate(workflow):
mode, epochs = flow
if mode == 'train':
self._max_iters = self._max_epochs * len(data_loaders[i])
break
work_dir = self.work_dir if self.work_dir is not None else 'NONE'
self.logger.info('Start running, host: %s, work_dir: %s',
get_host_info(), work_dir)
self.logger.info('Hooks will be executed in the following order:\n%s',
self.get_hook_info())
self.logger.info('workflow: %s, max: %d epochs', workflow,
self._max_epochs)
self.call_hook('before_run')
df = pd.read_csv(self.file_log_min_epochs, header=None)
s_array = df[[1, 2]].to_numpy().min(axis=1)
self.log_min_epochs = torch.from_numpy(s_array).cuda()
while self.epoch < self._max_epochs:
for i, flow in enumerate(workflow):
mode, epochs = flow
if isinstance(mode, str): # self.train()
if not hasattr(self, mode):
raise ValueError(
f'runner has no method named "{mode}" to run an '
'epoch')
epoch_runner = getattr(self, mode)
else:
raise TypeError(
'mode in workflow must be a str, but got {}'.format(
type(mode)))
for _ in range(epochs):
if mode == 'train' and self.epoch >= self._max_epochs:
break
epoch_runner(data_loaders[i], **kwargs)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_run')
def save_checkpoint(self,
out_dir,
filename_tmpl='epoch_{}.pth',
save_optimizer=True,
meta=None,
create_symlink=True):
"""Save the checkpoint.
Args:
out_dir (str): The directory that checkpoints are saved.
filename_tmpl (str, optional): The checkpoint filename template,
which contains a placeholder for the epoch number.
Defaults to 'epoch_{}.pth'.
save_optimizer (bool, optional): Whether to save the optimizer to
the checkpoint. Defaults to True.
meta (dict, optional): The meta information to be saved in the
checkpoint. Defaults to None.
create_symlink (bool, optional): Whether to create a symlink
"latest.pth" to point to the latest checkpoint.
Defaults to True.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(
f'meta should be a dict or None, but got {type(meta)}')
if self.meta is not None:
meta.update(self.meta)
# Note: meta.update(self.meta) should be done before
# meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise
# there will be problems with resumed checkpoints.
# More details in https://github.com/open-mmlab/mmcv/pull/1108
meta.update(epoch=self.epoch + 1, iter=self.iter)
filename = filename_tmpl.format(self.epoch + 1)
filepath = osp.join(out_dir, filename)
optimizer = self.optimizer if save_optimizer else None
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
# in some environments, `os.symlink` is not supported, you may need to
# set `create_symlink` to False
if create_symlink:
dst_file = osp.join(out_dir, 'latest.pth')
if platform.system() != 'Windows':
mmcv.symlink(filename, dst_file)
else:
shutil.copy(filepath, dst_file)
|
# encoding: utf-8
from sqlalchemy.orm import relation
from sqlalchemy import types, Column, Table, ForeignKey, and_, UniqueConstraint
from ckan.model import (
core,
meta,
types as _types,
domain_object,
vocabulary,
extension as _extension,
)
import ckan # this import is needed
import ckan.model
import ckan.lib.dictization
import ckan.lib.maintain as maintain
__all__ = ['tag_table', 'package_tag_table', 'Tag', 'PackageTag',
'MAX_TAG_LENGTH', 'MIN_TAG_LENGTH']
MAX_TAG_LENGTH = 100
MIN_TAG_LENGTH = 2
tag_table = Table('tag', meta.metadata,
Column('id', types.UnicodeText, primary_key=True, default=_types.make_uuid),
Column('name', types.Unicode(MAX_TAG_LENGTH), nullable=False),
Column('vocabulary_id',
types.Unicode(vocabulary.VOCABULARY_NAME_MAX_LENGTH),
ForeignKey('vocabulary.id')),
UniqueConstraint('name', 'vocabulary_id')
)
package_tag_table = Table('package_tag', meta.metadata,
Column('id', types.UnicodeText, primary_key=True, default=_types.make_uuid),
Column('package_id', types.UnicodeText, ForeignKey('package.id')),
Column('tag_id', types.UnicodeText, ForeignKey('tag.id')),
Column('state', types.UnicodeText, default=core.State.ACTIVE),
)
class Tag(domain_object.DomainObject):
def __init__(self, name='', vocabulary_id=None):
self.name = name
self.vocabulary_id = vocabulary_id
# not stateful so same as purge
def delete(self):
self.purge()
@classmethod
def by_id(cls, tag_id, autoflush=True):
'''Return the tag with the given id, or None.
:param tag_id: the id of the tag to return
:type tag_id: string
:returns: the tag with the given id, or None if there is no tag with
that id
:rtype: ckan.model.tag.Tag
'''
query = meta.Session.query(Tag).filter(Tag.id==tag_id)
query = query.autoflush(autoflush)
tag = query.first()
return tag
@classmethod
def by_name(cls, name, vocab=None, autoflush=True):
'''Return the tag with the given name, or None.
By default only free tags (tags which do not belong to any vocabulary)
are returned.
If the optional argument ``vocab`` is given then only tags from that
vocabulary are returned, or ``None`` if there is no tag with that name
in that vocabulary.
:param name: the name of the tag to return
:type name: string
:param vocab: the vocabulary to look in (optional, default: None)
:type vocab: ckan.model.vocabulary.Vocabulary
:returns: the tag object with the given id or name, or None if there is
no tag with that id or name
:rtype: ckan.model.tag.Tag
'''
if vocab:
query = meta.Session.query(Tag).filter(Tag.name==name).filter(
Tag.vocabulary_id==vocab.id)
else:
query = meta.Session.query(Tag).filter(Tag.name==name).filter(
Tag.vocabulary_id==None)
query = query.autoflush(autoflush)
tag = query.first()
return tag
@classmethod
def get(cls, tag_id_or_name, vocab_id_or_name=None):
'''Return the tag with the given id or name, or None.
By default only free tags (tags which do not belong to any vocabulary)
are returned.
If the optional argument ``vocab_id_or_name`` is given then only tags
that belong to that vocabulary will be returned, and ``None`` will be
returned if there is no vocabulary with that vocabulary id or name or
if there is no tag with that tag id or name in that vocabulary.
:param tag_id_or_name: the id or name of the tag to return
:type tag_id_or_name: string
:param vocab_id_or_name: the id or name of the vocabulary to look for
the tag in
:type vocab_id_or_name: string
:returns: the tag object with the given id or name, or None if there is
no tag with that id or name
:rtype: ckan.model.tag.Tag
'''
# First try to get the tag by ID.
tag = Tag.by_id(tag_id_or_name)
if tag:
return tag
else:
# If that didn't work, try to get the tag by name and vocabulary.
if vocab_id_or_name:
vocab = vocabulary.Vocabulary.get(vocab_id_or_name)
if vocab is None:
# The user specified an invalid vocab.
raise ckan.logic.NotFound("could not find vocabulary '%s'"
% vocab_id_or_name)
else:
vocab = None
tag = Tag.by_name(tag_id_or_name, vocab=vocab)
return tag
# Todo: Make sure tag names can't be changed to look like tag IDs?
@classmethod
@maintain.deprecated()
def search_by_name(cls, search_term, vocab_id_or_name=None):
'''DEPRECATED
Return all tags whose names contain a given string.
By default only free tags (tags which do not belong to any vocabulary)
are returned. If the optional argument ``vocab_id_or_name`` is given
then only tags from that vocabulary are returned.
:param search_term: the string to search for in the tag names
:type search_term: string
:param vocab_id_or_name: the id or name of the vocabulary to look in
(optional, default: None)
:type vocab_id_or_name: string
:returns: a list of tags that match the search term
:rtype: list of ckan.model.tag.Tag objects
'''
if vocab_id_or_name:
vocab = vocabulary.Vocabulary.get(vocab_id_or_name)
if vocab is None:
# The user specified an invalid vocab.
return None
query = meta.Session.query(Tag).filter(Tag.vocabulary_id==vocab.id)
else:
query = meta.Session.query(Tag)
search_term = search_term.strip().lower()
query = query.filter(Tag.name.contains(search_term))
query = query.distinct().join(Tag.package_tags)
return query
@classmethod
def all(cls, vocab_id_or_name=None):
'''Return all tags that are currently applied to any dataset.
By default only free tags (tags which do not belong to any vocabulary)
are returned. If the optional argument ``vocab_id_or_name`` is given
then only tags from that vocabulary are returned.
:param vocab_id_or_name: the id or name of the vocabulary to look in
(optional, default: None)
:type vocab_id_or_name: string
:returns: a list of all tags that are currently applied to any dataset
:rtype: list of ckan.model.tag.Tag objects
'''
if vocab_id_or_name:
vocab = vocabulary.Vocabulary.get(vocab_id_or_name)
if vocab is None:
# The user specified an invalid vocab.
raise ckan.logic.NotFound("could not find vocabulary '%s'"
% vocab_id_or_name)
query = meta.Session.query(Tag).filter(Tag.vocabulary_id==vocab.id)
else:
query = meta.Session.query(Tag).filter(Tag.vocabulary_id == None)
query = query.distinct().join(PackageTag)
query = query.filter_by(state='active')
return query
@property
def packages(self):
'''Return a list of all packages that have this tag, sorted by name.
:rtype: list of ckan.model.package.Package objects
'''
q = meta.Session.query(ckan.model.package.Package)
q = q.join(PackageTag)
q = q.filter_by(tag_id=self.id)
q = q.filter_by(state='active')
q = q.order_by(ckan.model.package.Package.name)
packages = q.all()
return packages
def __repr__(self):
return '<Tag %s>' % self.name
class PackageTag(core.StatefulObjectMixin,
domain_object.DomainObject):
def __init__(self, package=None, tag=None, state=None, **kwargs):
self.package = package
self.tag = tag
self.state = state
for k,v in kwargs.items():
setattr(self, k, v)
def __repr__(self):
s = u'<PackageTag package=%s tag=%s>' % (self.package.name, self.tag.name)
return s.encode('utf8')
@classmethod
@maintain.deprecated()
def by_name(cls, package_name, tag_name, vocab_id_or_name=None,
autoflush=True):
'''DEPRECATED (and broken - missing the join to Tag)
Return the PackageTag for the given package and tag names, or None.
By default only PackageTags for free tags (tags which do not belong to
any vocabulary) are returned. If the optional argument
``vocab_id_or_name`` is given then only PackageTags for tags from that
vocabulary are returned.
:param package_name: the name of the package to look for
:type package_name: string
:param tag_name: the name of the tag to look for
:type tag_name: string
:param vocab_id_or_name: the id or name of the vocabulary to look for
the tag in
:type vocab_id_or_name: string
:returns: the PackageTag for the given package and tag names, or None
if there is no PackageTag for those package and tag names
:rtype: ckan.model.tag.PackageTag
'''
if vocab_id_or_name:
vocab = vocabulary.Vocabulary.get(vocab_id_or_name)
if vocab is None:
# The user specified an invalid vocab.
return None
query = (meta.Session.query(PackageTag, Tag, ckan.model.Package)
.filter(Tag.vocabulary_id == vocab.id)
.filter(ckan.model.Package.name==package_name)
.filter(Tag.name==tag_name))
else:
query = (meta.Session.query(PackageTag)
.filter(ckan.model.Package.name==package_name)
.filter(Tag.name==tag_name))
query = query.autoflush(autoflush)
return query.one()[0]
def related_packages(self):
return [self.package]
meta.mapper(Tag, tag_table, properties={
'package_tags': relation(PackageTag, backref='tag',
cascade='all, delete, delete-orphan',
),
'vocabulary': relation(vocabulary.Vocabulary,
order_by=tag_table.c.name)
},
order_by=tag_table.c.name,
)
# NB meta.mapper(tag.PackageTag... is found in package.py, because if it was
# here it we'd get circular references
|
# Copyright 2018 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
# Distributed MNIST on grid based on TensorFlow MNIST example
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
def print_log(worker_num, arg):
print("{0}: {1}".format(worker_num, arg))
def map_fun(args, ctx):
from datetime import datetime
import math
import numpy
import tensorflow as tf
import time
worker_num = ctx.worker_num
job_name = ctx.job_name
task_index = ctx.task_index
# Delay PS nodes a bit, since workers seem to reserve GPUs more quickly/reliably (w/o conflict)
if job_name == "ps":
time.sleep((worker_num + 1) * 5)
# Parameters
IMAGE_PIXELS = 28
hidden_units = 128
batch_size = args.batch_size
# Get TF cluster and server instances
cluster, server = ctx.start_cluster_server(1, args.rdma)
def feed_dict(batch):
# Convert from [(images, labels)] to two numpy arrays of the proper type
images = []
labels = []
for item in batch:
images.append(item[0])
labels.append(item[1])
xs = numpy.array(images)
xs = xs.astype(numpy.float32)
xs = xs / 255.0
ys = numpy.array(labels)
ys = ys.astype(numpy.uint8)
return (xs, ys)
if job_name == "ps":
server.join()
elif job_name == "worker":
# Assigns ops to the local worker by default.
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % task_index,
cluster=cluster)):
# Placeholders or QueueRunner/Readers for input data
with tf.name_scope('inputs'):
x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS], name="x")
y_ = tf.placeholder(tf.float32, [None, 10], name="y_")
x_img = tf.reshape(x, [-1, IMAGE_PIXELS, IMAGE_PIXELS, 1])
tf.summary.image("x_img", x_img)
with tf.name_scope('layer'):
# Variables of the hidden layer
with tf.name_scope('hidden_layer'):
hid_w = tf.Variable(tf.truncated_normal([IMAGE_PIXELS * IMAGE_PIXELS, hidden_units], stddev=1.0 / IMAGE_PIXELS), name="hid_w")
hid_b = tf.Variable(tf.zeros([hidden_units]), name="hid_b")
tf.summary.histogram("hidden_weights", hid_w)
hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
hid = tf.nn.relu(hid_lin)
# Variables of the softmax layer
with tf.name_scope('softmax_layer'):
sm_w = tf.Variable(tf.truncated_normal([hidden_units, 10], stddev=1.0 / math.sqrt(hidden_units)), name="sm_w")
sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
tf.summary.histogram("softmax_weights", sm_w)
y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
global_step = tf.train.get_or_create_global_step()
with tf.name_scope('loss'):
loss = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
tf.summary.scalar("loss", loss)
with tf.name_scope('train'):
train_op = tf.train.AdagradOptimizer(0.01).minimize(loss, global_step=global_step)
# Test trained model
label = tf.argmax(y_, 1, name="label")
prediction = tf.argmax(y, 1, name="prediction")
correct_prediction = tf.equal(prediction, label)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="accuracy")
tf.summary.scalar("acc", accuracy)
summary_op = tf.summary.merge_all()
logdir = ctx.absolute_path(args.model)
print("tensorflow model path: {0}".format(logdir))
hooks = [tf.train.StopAtStepHook(last_step=100000)]
if job_name == "worker" and task_index == 0:
summary_writer = tf.summary.FileWriter(logdir, graph=tf.get_default_graph())
# The MonitoredTrainingSession takes care of session initialization, restoring from
# a checkpoint, and closing when done or an error occurs
with tf.train.MonitoredTrainingSession(master=server.target,
is_chief=(task_index == 0),
checkpoint_dir=logdir,
hooks=hooks) as mon_sess:
step = 0
tf_feed = ctx.get_data_feed(args.mode == "train")
while not mon_sess.should_stop() and not tf_feed.should_stop() and step < args.steps:
# Run a training step asynchronously
# See `tf.train.SyncReplicasOptimizer` for additional details on how to
# perform *synchronous* training.
# using feed_dict
batch_xs, batch_ys = feed_dict(tf_feed.next_batch(batch_size))
feed = {x: batch_xs, y_: batch_ys}
if len(batch_xs) > 0:
if args.mode == "train":
_, summary, step = mon_sess.run([train_op, summary_op, global_step], feed_dict=feed)
# print accuracy and save model checkpoint to HDFS every 100 steps
if (step % 100 == 0):
print("{0} step: {1} accuracy: {2}".format(datetime.now().isoformat(), step, mon_sess.run(accuracy, {x: batch_xs, y_: batch_ys})))
if task_index == 0:
summary_writer.add_summary(summary, step)
else: # args.mode == "inference"
labels, preds, acc = mon_sess.run([label, prediction, accuracy], feed_dict=feed)
results = ["{0} Label: {1}, Prediction: {2}".format(datetime.now().isoformat(), l, p) for l, p in zip(labels, preds)]
tf_feed.batch_results(results)
print("results: {0}, acc: {1}".format(results, acc))
if mon_sess.should_stop() or step >= args.steps:
tf_feed.terminate()
# Ask for all the services to stop.
print("{0} stopping MonitoredTrainingSession".format(datetime.now().isoformat()))
if job_name == "worker" and task_index == 0:
summary_writer.close()
|
from fastapi import APIRouter, Header, Query, Depends
from application.controllers.courses_controller import *
from application.controllers.users_controller import *
from application.controllers.payments_controller import *
from application.services.auth import auth_service
from typing import Optional
router = APIRouter()
#Courses
@router.get('/courses/{course_id}', response_model = dict, status_code = 200)
async def get_courses_metrics_by_id(course_id: str,
apikey: str = Header(None)
):
auth_service.check_api_key(apikey)
return CoursesController.get_metrics_by_id(course_id)
@router.get('/courses/', status_code = 200)
async def get_all_courses_metrics(apikey: str = Header(None),
category: Optional[int] = None,
subscription: Optional[str] = None
):
auth_service.check_api_key(apikey)
return CoursesController.get_metrics(category, subscription)
#Users
@router.get('/users/{user_id}', response_model = dict, status_code = 200)
async def get_user_metrics_by_id(user_id: str,
apikey: str = Header(None)
):
auth_service.check_api_key(apikey)
return UsersController.get_metrics_by_id(user_id)
@router.get('/users/', response_model = dict, status_code = 200)
async def get_users_metrics(apikey: str = Header(None)):
auth_service.check_api_key(apikey)
return UsersController.get_metrics()
#Payments
@router.get('/payments/', status_code = 200)
async def get_payments_metrics(apikey: str = Header(None)):
auth_service.check_api_key(apikey)
return PaymentsController.get_metrics()
|
from __future__ import absolute_import, division, print_function, with_statement
import os
import sys
import traceback
from tornado.escape import utf8, native_str, to_unicode
from tornado.template import Template, DictLoader, ParseError, Loader
from tornado.test.util import unittest
from tornado.util import u, ObjectDict, unicode_type
class TemplateTest(unittest.TestCase):
def test_simple(self):
template = Template("Hello {{ name }}!")
self.assertEqual(template.generate(name="Ben"),
b"Hello Ben!")
def test_bytes(self):
template = Template("Hello {{ name }}!")
self.assertEqual(template.generate(name=utf8("Ben")),
b"Hello Ben!")
def test_expressions(self):
template = Template("2 + 2 = {{ 2 + 2 }}")
self.assertEqual(template.generate(), b"2 + 2 = 4")
def test_comment(self):
template = Template("Hello{# TODO i18n #} {{ name }}!")
self.assertEqual(template.generate(name=utf8("Ben")),
b"Hello Ben!")
def test_include(self):
loader = DictLoader({
"index.html": '{% include "header.html" %}\nbody text',
"header.html": "header text",
})
self.assertEqual(loader.load("index.html").generate(),
b"header text\nbody text")
def test_extends(self):
loader = DictLoader({
"base.html": """\
<title>{% block title %}default title{% end %}</title>
<body>{% block body %}default body{% end %}</body>
""",
"page.html": """\
{% extends "base.html" %}
{% block title %}page title{% end %}
{% block body %}page body{% end %}
""",
})
self.assertEqual(loader.load("page.html").generate(),
b"<title>page title</title>\n<body>page body</body>\n")
def test_relative_load(self):
loader = DictLoader({
"a/1.html": "{% include '2.html' %}",
"a/2.html": "{% include '../b/3.html' %}",
"b/3.html": "ok",
})
self.assertEqual(loader.load("a/1.html").generate(),
b"ok")
def test_escaping(self):
self.assertRaises(ParseError, lambda: Template("{{"))
self.assertRaises(ParseError, lambda: Template("{%"))
self.assertEqual(Template("{{!").generate(), b"{{")
self.assertEqual(Template("{%!").generate(), b"{%")
self.assertEqual(Template("{{ 'expr' }} {{!jquery expr}}").generate(),
b"expr {{jquery expr}}")
def test_unicode_template(self):
template = Template(utf8(u("\u00e9")))
self.assertEqual(template.generate(), utf8(u("\u00e9")))
def test_unicode_literal_expression(self):
# Unicode literals should be usable in templates. Note that this
# test simulates unicode characters appearing directly in the
# template file (with utf8 encoding), i.e. \u escapes would not
# be used in the template file itself.
if str is unicode_type:
# python 3 needs a different version of this test since
# 2to3 doesn't run on template internals
template = Template(utf8(u('{{ "\u00e9" }}')))
else:
template = Template(utf8(u('{{ u"\u00e9" }}')))
self.assertEqual(template.generate(), utf8(u("\u00e9")))
def test_custom_namespace(self):
loader = DictLoader({"test.html": "{{ inc(5) }}"}, namespace={"inc": lambda x: x + 1})
self.assertEqual(loader.load("test.html").generate(), b"6")
def test_apply(self):
def upper(s):
return s.upper()
template = Template(utf8("{% apply upper %}foo{% end %}"))
self.assertEqual(template.generate(upper=upper), b"FOO")
def test_unicode_apply(self):
def upper(s):
return to_unicode(s).upper()
template = Template(utf8(u("{% apply upper %}foo \u00e9{% end %}")))
self.assertEqual(template.generate(upper=upper), utf8(u("FOO \u00c9")))
def test_bytes_apply(self):
def upper(s):
return utf8(to_unicode(s).upper())
template = Template(utf8(u("{% apply upper %}foo \u00e9{% end %}")))
self.assertEqual(template.generate(upper=upper), utf8(u("FOO \u00c9")))
def test_if(self):
template = Template(utf8("{% if x > 4 %}yes{% else %}no{% end %}"))
self.assertEqual(template.generate(x=5), b"yes")
self.assertEqual(template.generate(x=3), b"no")
def test_if_empty_body(self):
template = Template(utf8("{% if True %}{% else %}{% end %}"))
self.assertEqual(template.generate(), b"")
def test_try(self):
template = Template(utf8("""{% try %}
try{% set y = 1/x %}
{% except %}-except
{% else %}-else
{% finally %}-finally
{% end %}"""))
self.assertEqual(template.generate(x=1), b"\ntry\n-else\n-finally\n")
self.assertEqual(template.generate(x=0), b"\ntry-except\n-finally\n")
def test_comment_directive(self):
template = Template(utf8("{% comment blah blah %}foo"))
self.assertEqual(template.generate(), b"foo")
def test_break_continue(self):
template = Template(utf8("""\
{% for i in range(10) %}
{% if i == 2 %}
{% continue %}
{% end %}
{{ i }}
{% if i == 6 %}
{% break %}
{% end %}
{% end %}"""))
result = template.generate()
# remove extraneous whitespace
result = b''.join(result.split())
self.assertEqual(result, b"013456")
def test_break_outside_loop(self):
try:
Template(utf8("{% break %}"))
raise Exception("Did not get expected exception")
except ParseError:
pass
def test_break_in_apply(self):
# This test verifies current behavior, although of course it would
# be nice if apply didn't cause seemingly unrelated breakage
try:
Template(utf8("{% for i in [] %}{% apply foo %}{% break %}{% end %}{% end %}"))
raise Exception("Did not get expected exception")
except ParseError:
pass
@unittest.skipIf(sys.version_info >= division.getMandatoryRelease(),
'no testable future imports')
def test_no_inherit_future(self):
# This file has from __future__ import division...
self.assertEqual(1 / 2, 0.5)
# ...but the template doesn't
template = Template('{{ 1 / 2 }}')
self.assertEqual(template.generate(), '0')
def test_non_ascii_name(self):
loader = DictLoader({u("t\u00e9st.html"): "hello"})
self.assertEqual(loader.load(u("t\u00e9st.html")).generate(), b"hello")
class StackTraceTest(unittest.TestCase):
def test_error_line_number_expression(self):
loader = DictLoader({"test.html": """one
two{{1/0}}
three
"""})
try:
loader.load("test.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# test.html:2" in traceback.format_exc())
def test_error_line_number_directive(self):
loader = DictLoader({"test.html": """one
two{%if 1/0%}
three{%end%}
"""})
try:
loader.load("test.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# test.html:2" in traceback.format_exc())
def test_error_line_number_module(self):
loader = DictLoader({
"base.html": "{% module Template('sub.html') %}",
"sub.html": "{{1/0}}",
}, namespace={"_tt_modules": ObjectDict({"Template": lambda path, **kwargs: loader.load(path).generate(**kwargs)})})
try:
loader.load("base.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
exc_stack = traceback.format_exc()
self.assertTrue('# base.html:1' in exc_stack)
self.assertTrue('# sub.html:1' in exc_stack)
def test_error_line_number_include(self):
loader = DictLoader({
"base.html": "{% include 'sub.html' %}",
"sub.html": "{{1/0}}",
})
try:
loader.load("base.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# sub.html:1 (via base.html:1)" in
traceback.format_exc())
def test_error_line_number_extends_base_error(self):
loader = DictLoader({
"base.html": "{{1/0}}",
"sub.html": "{% extends 'base.html' %}",
})
try:
loader.load("sub.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
exc_stack = traceback.format_exc()
self.assertTrue("# base.html:1" in exc_stack)
def test_error_line_number_extends_sub_error(self):
loader = DictLoader({
"base.html": "{% block 'block' %}{% end %}",
"sub.html": """
{% extends 'base.html' %}
{% block 'block' %}
{{1/0}}
{% end %}
"""})
try:
loader.load("sub.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# sub.html:4 (via base.html:1)" in
traceback.format_exc())
def test_multi_includes(self):
loader = DictLoader({
"a.html": "{% include 'b.html' %}",
"b.html": "{% include 'c.html' %}",
"c.html": "{{1/0}}",
})
try:
loader.load("a.html").generate()
self.fail("did not get expected exception")
except ZeroDivisionError:
self.assertTrue("# c.html:1 (via b.html:1, a.html:1)" in
traceback.format_exc())
class ParseErrorDetailTest(unittest.TestCase):
def test_details(self):
loader = DictLoader({
"foo.html": "\n\n{{",
})
with self.assertRaises(ParseError) as cm:
loader.load("foo.html")
self.assertEqual("Missing end expression }} at foo.html:3",
str(cm.exception))
self.assertEqual("foo.html", cm.exception.filename)
self.assertEqual(3, cm.exception.lineno)
class AutoEscapeTest(unittest.TestCase):
def setUp(self):
self.templates = {
"escaped.html": "{% autoescape xhtml_escape %}{{ name }}",
"unescaped.html": "{% autoescape None %}{{ name }}",
"default.html": "{{ name }}",
"include.html": """\
escaped: {% include 'escaped.html' %}
unescaped: {% include 'unescaped.html' %}
default: {% include 'default.html' %}
""",
"escaped_block.html": """\
{% autoescape xhtml_escape %}\
{% block name %}base: {{ name }}{% end %}""",
"unescaped_block.html": """\
{% autoescape None %}\
{% block name %}base: {{ name }}{% end %}""",
# Extend a base template with different autoescape policy,
# with and without overriding the base's blocks
"escaped_extends_unescaped.html": """\
{% autoescape xhtml_escape %}\
{% extends "unescaped_block.html" %}""",
"escaped_overrides_unescaped.html": """\
{% autoescape xhtml_escape %}\
{% extends "unescaped_block.html" %}\
{% block name %}extended: {{ name }}{% end %}""",
"unescaped_extends_escaped.html": """\
{% autoescape None %}\
{% extends "escaped_block.html" %}""",
"unescaped_overrides_escaped.html": """\
{% autoescape None %}\
{% extends "escaped_block.html" %}\
{% block name %}extended: {{ name }}{% end %}""",
"raw_expression.html": """\
{% autoescape xhtml_escape %}\
expr: {{ name }}
raw: {% raw name %}""",
}
def test_default_off(self):
loader = DictLoader(self.templates, autoescape=None)
name = "Bobby <table>s"
self.assertEqual(loader.load("escaped.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("unescaped.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("default.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("include.html").generate(name=name),
b"escaped: Bobby <table>s\n"
b"unescaped: Bobby <table>s\n"
b"default: Bobby <table>s\n")
def test_default_on(self):
loader = DictLoader(self.templates, autoescape="xhtml_escape")
name = "Bobby <table>s"
self.assertEqual(loader.load("escaped.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("unescaped.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("default.html").generate(name=name),
b"Bobby <table>s")
self.assertEqual(loader.load("include.html").generate(name=name),
b"escaped: Bobby <table>s\n"
b"unescaped: Bobby <table>s\n"
b"default: Bobby <table>s\n")
def test_unextended_block(self):
loader = DictLoader(self.templates)
name = "<script>"
self.assertEqual(loader.load("escaped_block.html").generate(name=name),
b"base: <script>")
self.assertEqual(loader.load("unescaped_block.html").generate(name=name),
b"base: <script>")
def test_extended_block(self):
loader = DictLoader(self.templates)
def render(name):
return loader.load(name).generate(name="<script>")
self.assertEqual(render("escaped_extends_unescaped.html"),
b"base: <script>")
self.assertEqual(render("escaped_overrides_unescaped.html"),
b"extended: <script>")
self.assertEqual(render("unescaped_extends_escaped.html"),
b"base: <script>")
self.assertEqual(render("unescaped_overrides_escaped.html"),
b"extended: <script>")
def test_raw_expression(self):
loader = DictLoader(self.templates)
def render(name):
return loader.load(name).generate(name='<>&"')
self.assertEqual(render("raw_expression.html"),
b"expr: <>&"\n"
b"raw: <>&\"")
def test_custom_escape(self):
loader = DictLoader({"foo.py":
"{% autoescape py_escape %}s = {{ name }}\n"})
def py_escape(s):
self.assertEqual(type(s), bytes)
return repr(native_str(s))
def render(template, name):
return loader.load(template).generate(py_escape=py_escape,
name=name)
self.assertEqual(render("foo.py", "<html>"),
b"s = '<html>'\n")
self.assertEqual(render("foo.py", "';sys.exit()"),
b"""s = "';sys.exit()"\n""")
self.assertEqual(render("foo.py", ["not a string"]),
b"""s = "['not a string']"\n""")
def test_manual_minimize_whitespace(self):
# Whitespace including newlines is allowed within template tags
# and directives, and this is one way to avoid long lines while
# keeping extra whitespace out of the rendered output.
loader = DictLoader({'foo.txt': """\
{% for i in items
%}{% if i > 0 %}, {% end %}{#
#}{{i
}}{% end
%}""",
})
self.assertEqual(loader.load("foo.txt").generate(items=range(5)),
b"0, 1, 2, 3, 4")
def test_whitespace_by_filename(self):
# Default whitespace handling depends on the template filename.
loader = DictLoader({
"foo.html": " \n\t\n asdf\t ",
"bar.js": " \n\n\n\t qwer ",
"baz.txt": "\t zxcv\n\n",
"include.html": " {% include baz.txt %} \n ",
"include.txt": "\t\t{% include foo.html %} ",
})
# HTML and JS files have whitespace compressed by default.
self.assertEqual(loader.load("foo.html").generate(),
b"\nasdf ")
self.assertEqual(loader.load("bar.js").generate(),
b"\nqwer ")
# TXT files do not.
self.assertEqual(loader.load("baz.txt").generate(),
b"\t zxcv\n\n")
# Each file maintains its own status even when included in
# a file of the other type.
self.assertEqual(loader.load("include.html").generate(),
b" \t zxcv\n\n\n")
self.assertEqual(loader.load("include.txt").generate(),
b"\t\t\nasdf ")
def test_whitespace_by_loader(self):
templates = {
"foo.html": "\t\tfoo\n\n",
"bar.txt": "\t\tbar\n\n",
}
loader = DictLoader(templates, whitespace='all')
self.assertEqual(loader.load("foo.html").generate(), b"\t\tfoo\n\n")
self.assertEqual(loader.load("bar.txt").generate(), b"\t\tbar\n\n")
loader = DictLoader(templates, whitespace='single')
self.assertEqual(loader.load("foo.html").generate(), b" foo\n")
self.assertEqual(loader.load("bar.txt").generate(), b" bar\n")
loader = DictLoader(templates, whitespace='oneline')
self.assertEqual(loader.load("foo.html").generate(), b" foo ")
self.assertEqual(loader.load("bar.txt").generate(), b" bar ")
def test_whitespace_directive(self):
loader = DictLoader({
"foo.html": """\
{% whitespace oneline %}
{% for i in range(3) %}
{{ i }}
{% end %}
{% whitespace all %}
pre\tformatted
"""})
self.assertEqual(loader.load("foo.html").generate(),
b" 0 1 2 \n pre\tformatted\n")
class TemplateLoaderTest(unittest.TestCase):
def setUp(self):
self.loader = Loader(os.path.join(os.path.dirname(__file__), "templates"))
def test_utf8_in_file(self):
tmpl = self.loader.load("utf8.html")
result = tmpl.generate()
self.assertEqual(to_unicode(result).strip(), u("H\u00e9llo"))
|
"""
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
WSGI config for Toaster project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "Toaster.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "toastermain.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
import os
def limpar_tela():
os.system('cls' if os.name == 'nt' else 'clear')
|
import pygtrie
import json
import pickle
category = json.load( open( "dumps/category.json", "r" ) )
trie = pygtrie.CharTrie()
catMap = dict()
idx = int(0)
for data in category:
description = data["description"]
for word in description.split():
word = word.lower()
trie[word] = True
if catMap.has_key(word):
catMap[word].append(idx)
else:
catMap[word] = []
catMap[word].append(idx)
idx= idx +1
pickle.dump( trie, open( "dumps/trie.p", "wb" ))
pickle.dump( catMap, open( "dumps/catMap.p","wb"))
# to load back
# u = pickle.load( open( "trie.p", "rb" ) ) to load back
# u = pickle.load( open( "dumps/catMap.json", "r" ) )
|
import logging
import numpy as np
import ray.ray_constants as ray_constants
logger = logging.getLogger(__name__)
class RayParams:
"""A class used to store the parameters used by Ray.
Attributes:
redis_address (str): The address of the Redis server to connect to. If
this address is not provided, then this command will start Redis, a
raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then a random port will be chosen.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards.
num_cpus (int): Number of CPUs to configure the raylet with.
num_gpus (int): Number of GPUs to configure the raylet with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
memory: Total available memory for workers requesting memory.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
redis_max_memory: The max amount of memory (in bytes) to allow redis
to use, or None for no limit. Once the limit is exceeded, redis
will start LRU eviction of entries. This only applies to the
sharded redis tables (task and object tables).
object_manager_port int: The port to use for the object manager.
node_manager_port: The port to use for the node manager.
node_ip_address (str): The IP address of the node that we are on.
raylet_ip_address (str): The IP address of the raylet that this node
connects to.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same job in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different jobs.
redirect_worker_output: True if the stdout and stderr of worker
processes should be redirected to files.
redirect_output (bool): True if stdout and stderr for non-worker
processes should be redirected to files and false otherwise.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
worker_path (str): The path of the source code that will be run by the
worker.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which displays the status of the Ray cluster. If this value is
None, then the UI will be started if the relevant dependencies are
present.
webui_host: The host to bind the web UI server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
logging_level: Logging level, default will be logging.INFO.
logging_format: Logging format, default contains a timestamp,
filename, line number, and message. See ray_constants.py.
plasma_store_socket_name (str): If provided, it will specify the socket
name used by the plasma store.
raylet_socket_name (str): If provided, it will specify the socket path
used by the raylet process.
temp_dir (str): If provided, it will specify the root temporary
directory for the Ray process.
include_log_monitor (bool): If True, then start a log monitor to
monitor the log files for all processes on this node and push their
contents to Redis.
autoscaling_config: path to autoscaling config file.
include_java (bool): If True, the raylet backend can also support
Java worker.
java_worker_options (list): The command options for Java worker.
load_code_from_local: Whether load code from local file or from GCS.
_internal_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
"""
def __init__(self,
redis_address=None,
num_cpus=None,
num_gpus=None,
resources=None,
memory=None,
object_store_memory=None,
redis_max_memory=None,
redis_port=None,
redis_shard_ports=None,
object_manager_port=None,
node_manager_port=None,
node_ip_address=None,
raylet_ip_address=None,
object_id_seed=None,
driver_mode=None,
redirect_worker_output=None,
redirect_output=None,
num_redis_shards=None,
redis_max_clients=None,
redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
plasma_directory=None,
worker_path=None,
huge_pages=False,
include_webui=None,
webui_host="localhost",
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
plasma_store_socket_name=None,
raylet_socket_name=None,
temp_dir=None,
include_log_monitor=None,
autoscaling_config=None,
include_java=False,
java_worker_options=None,
load_code_from_local=False,
_internal_config=None):
self.object_id_seed = object_id_seed
self.redis_address = redis_address
self.num_cpus = num_cpus
self.num_gpus = num_gpus
self.memory = memory
self.object_store_memory = object_store_memory
self.resources = resources
self.redis_max_memory = redis_max_memory
self.redis_port = redis_port
self.redis_shard_ports = redis_shard_ports
self.object_manager_port = object_manager_port
self.node_manager_port = node_manager_port
self.node_ip_address = node_ip_address
self.raylet_ip_address = raylet_ip_address
self.driver_mode = driver_mode
self.redirect_worker_output = redirect_worker_output
self.redirect_output = redirect_output
self.num_redis_shards = num_redis_shards
self.redis_max_clients = redis_max_clients
self.redis_password = redis_password
self.plasma_directory = plasma_directory
self.worker_path = worker_path
self.huge_pages = huge_pages
self.include_webui = include_webui
self.webui_host = webui_host
self.plasma_store_socket_name = plasma_store_socket_name
self.raylet_socket_name = raylet_socket_name
self.temp_dir = temp_dir
self.include_log_monitor = include_log_monitor
self.autoscaling_config = autoscaling_config
self.include_java = include_java
self.java_worker_options = java_worker_options
self.load_code_from_local = load_code_from_local
self._internal_config = _internal_config
self._check_usage()
def update(self, **kwargs):
"""Update the settings according to the keyword arguments.
Args:
kwargs: The keyword arguments to set corresponding fields.
"""
for arg in kwargs:
if hasattr(self, arg):
setattr(self, arg, kwargs[arg])
else:
raise ValueError("Invalid RayParams parameter in"
" update: %s" % arg)
self._check_usage()
def update_if_absent(self, **kwargs):
"""Update the settings when the target fields are None.
Args:
kwargs: The keyword arguments to set corresponding fields.
"""
for arg in kwargs:
if hasattr(self, arg):
if getattr(self, arg) is None:
setattr(self, arg, kwargs[arg])
else:
raise ValueError("Invalid RayParams parameter in"
" update_if_absent: %s" % arg)
self._check_usage()
def _check_usage(self):
if self.resources is not None:
assert "CPU" not in self.resources, (
"'CPU' should not be included in the resource dictionary. Use "
"num_cpus instead.")
assert "GPU" not in self.resources, (
"'GPU' should not be included in the resource dictionary. Use "
"num_gpus instead.")
if self.redirect_worker_output is not None:
raise DeprecationWarning(
"The redirect_worker_output argument is deprecated. To "
"control logging to the driver, use the 'log_to_driver' "
"argument to 'ray.init()'")
if self.redirect_output is not None:
raise DeprecationWarning(
"The redirect_output argument is deprecated.")
# Parse the numpy version.
numpy_version = np.__version__.split(".")
numpy_major, numpy_minor = int(numpy_version[0]), int(numpy_version[1])
if numpy_major <= 1 and numpy_minor < 16:
logger.warning("Using ray with numpy < 1.16.0 will result in slow "
"serialization. Upgrade numpy if using with ray.")
|
"""
Runner script for visualisation.
This can perform graphing of the neighbours with edge width in proportion to the attention coeffs, entropy histograms for the dist of attention weights, and then
also plotting the normalised weights as a histogram.
"""
import argparse
from torch_geometric.data import DataLoader
from torch_geometric.datasets import Planetoid, PPI, GNNBenchmarkDataset
import data_utils
from run_config import data_config
from visualisation.entropy_histograms import draw_entropy_histogram
from visualisation.neighbourhood_attention_weights import draw_neighbour_attention_distribution
from visualisation.weight_histograms import draw_weights_histogram
def get_test_data(dataset_name):
# Quick and easy function to get the data we require for a test run to get the attention weights.
planetoid_datasets = ['Cora', 'Citeseer', 'Pubmed']
if dataset_name in planetoid_datasets:
return DataLoader(Planetoid(root='/tmp/' + dataset_name, name=dataset_name))
elif dataset_name == "PPI":
return DataLoader(PPI(root='/tmp/PPI', split='test'))
elif dataset_name == "PATTERN":
return DataLoader(GNNBenchmarkDataset(root='/tmp/Pattern', name="PATTERN", split='test'))
else:
raise ValueError(f"Dataset name not valid. Expected one of {planetoid_datasets}/PPI/PATTERN")
def main(dataset, vis_type, checkpoint_path=None, show=True, save=False):
# Used to run for each dataset the epoch 100 checkpoint, which should give a good level of accuracy / F1 score.
default_file_name_ending = "-100epochs.ckpt"
if dataset not in data_config.keys():
print(f"Dataset not valid. Must be one of {data_config.keys()}. {dataset} given.")
else:
config = data_config[dataset]
config['dataset'] = dataset # Necessary for some of the GATModel files
config['test_type'] = 'Inductive' if dataset == 'PPI' else "Transductive"
# Load the model, get the test data and prepare a test batch and then make a call to the forwards function.
gat_model = data_utils.load(config, default_file_name_ending, checkpoint_path=checkpoint_path)
test_data = get_test_data(config['dataset'])
batch = next(iter(test_data))
outputs, edge_index, attention_list = gat_model.forward_and_return_attention(batch,
return_attention_weights=True)
# Detach tensors so that we can use them
attention_list = [att.detach().cpu() for att in attention_list]
if vis_type == "Entropy":
draw_entropy_histogram(edge_index=edge_index, attention_weights=attention_list, num_nodes=batch.x.size()[0],
dataset_name=config['dataset'], save=save)
elif vis_type == "Neighbourhood":
draw_neighbour_attention_distribution(graph_labels=batch.y, edge_index=edge_index,
attention_weights=attention_list, dataset_name=config['dataset'],
layer_num=0, head_num=0, show=show, save=save)
elif vis_type == "Weight":
if config['dataset'] == "PPI":
epochs_recorded = [1, 5, 20, 50, 100]
for epoch_number in epochs_recorded:
# We need to load up the different modules in succesion for the different. Once this is loaded we complete a forward pass on a batch of then
# test data and plot the weight histogram for these.
file_ending = "-" + str(epoch_number) + "epochs.ckpt"
gat_model = data_utils.load(config, file_ending)
outputs, edge_index, attention_list = gat_model.forward_and_return_attention(batch,
return_attention_weights=True)
draw_weights_histogram(edge_index=edge_index, attention_weights=attention_list,
num_nodes=batch.x.size()[0], epoch_number=epoch_number,
dataset_name=config['dataset'], save=save)
else:
file_ending = "-100epochs.ckpt"
gat_model = data_utils.load(config, file_ending)
outputs, edge_index, attention_list = gat_model.forward_and_return_attention(batch,
return_attention_weights=True)
draw_weights_histogram(edge_index=edge_index, attention_weights=attention_list,
num_nodes=batch.x.size()[0], epoch_number=100,
dataset_name=config['dataset'], save=True)
else:
raise Exception(
"Unknown visualisation type. Please use one of 'Entropy', 'Weight (only for PPI)' or 'Neighbourhood'")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Read in dataset and any other flags from command line')
parser.add_argument('--dataset', default='Cora')
parser.add_argument('--vis_type', default='Entropy') # Entropy or Neighbour or Weight.
parser.add_argument('--checkpoint_path')
args = parser.parse_args()
dataset = args.dataset
main(dataset, args.vis_type, args.checkpoint_path)
|
import typing as t
import os
import requests
import pandas as pd
def assert_pvwatts_ready():
assert os.environ.get('PVWATTS_API_KEY') is not None, 'Missing PVWATTS_API_KEY envvar! Set it as envvar or into ' \
'os.environ["PVWATTS_API_KEY"]!'
def v6_1_kw_solar_ac_production(
lat: float,
lon: float,
losses: float = 10,
system_capacity_kw: float = 1,
country_code: str = 'US',
tilt: t.Optional[float] = None,
azimuth: float = 180,
module_type: int = 0,
pvwatts_params: t.Optional[dict] = None,
):
"""
Makes a call to PVWatts v6
Most params defined as per https://developer.nrel.gov/docs/solar/pvwatts/v6/
:param lat:
:param lon:
:param losses: System losses as a percent (default 10, or 10% losses)
:param system_capacity_kw: Defaults to 1kW for easy scaling
:param country_code:
:param tilt: 0 is facing up (horizontal), 90 is facing the horizon (vertical)
:param azimuth: 0 is North-facing, 180 is south-facing
:param module_type: 0 = Standard, 1 = Premium, 2 = Thin film
:param pvwatts_params: Key-values of any other pvwatts params to use/override
Other params to consider using:
- dc_ac_ratio: DC to AC ratio (default 1.2)
- inv_eff: Inverter efficiency at rated power (default 96)
- gcr: Ground coverage ratio, a measure of solar panel overlap (default 0.4)
:return:
"""
if tilt is not None:
assert 0 <= tilt <= 90, 'invalid tilt'
else:
tilt = lat # Good rule of thumb is panels should be angled at latitude
assert 0 <= azimuth < 360, 'invalid azimuth'
params = {
'lat': lat,
'lon': lon,
'losses': losses,
'system_capacity': system_capacity_kw,
'country_code': country_code,
'tilt': tilt,
'azimuth': azimuth,
'dataset': 'nsrdb' if country_code == 'US' else 'intl',
'radius': 0, # Pass in radius=0 to use the closest station regardless of the distance.
'timeframe': 'hourly',
'module_type': module_type,
'array_type': 0, # Fixed - Open Rack
'api_key': os.environ["PVWATTS_API_KEY"],
**(pvwatts_params or {}),
}
resp = requests.get(
url='https://developer.nrel.gov/api/pvwatts/v6.json',
params=params,
)
if not resp.ok:
raise RuntimeError(f'PVWatts API call failed with {resp.status_code}: {resp.text}')
return resp
def resp_to_df(json):
return pd.DataFrame(
{
'ac': json['outputs']['dc'], # AC system output, Wac
'dc': json['outputs']['dc'], # DC array output, Wdc
'temp_amb': json['outputs']['tamb'], # Ambient temperature, deg C
'temp_cell': json['outputs']['tcell'], # Module temperature, deg C
'irr_diffuse': json['outputs']['df'], # Diffuse irradiance (W/m2)
'irr_poa': json['outputs']['poa'], # Plane-of-array irradiance (W/m2)
'irr_normal': json['outputs']['dn'], # Beam normal irradiance (W/m2)
},
index=pd.date_range(
start='2021-01-01',
end='2022-01-01', # Normally end date is inclusive, but see 'closed' kwarg
closed='left', # start date inclusive, end date exclusive
# tz=json['station_info']['tz'], # TODO: This is a constant offset like -08, param needs a str
freq='H',
)
)
|
# -*- coding: utf-8 -*-
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from PIL import Image
VERSION = "1.1.4"
def pull_screenshot():
os.system('adb shell screencap -p /sdcard/autojump.png')
os.system('adb pull /sdcard/autojump.png .')
def jump(distance):
press_time = distance * 1.35
press_time = int(press_time)
cmd = 'adb shell input swipe 320 410 320 410 ' + str(press_time)
print(cmd)
os.system(cmd)
fig = plt.figure()
pull_screenshot()
img = np.array(Image.open('autojump.png'))
im = plt.imshow(img, animated=True)
update = True
click_count = 0
cor = []
def update_data():
return np.array(Image.open('autojump.png'))
def updatefig(*args):
global update
if update:
time.sleep(1.5)
pull_screenshot()
im.set_array(update_data())
update = False
return im,
def on_click(event):
global update
global ix, iy
global click_count
global cor
ix, iy = event.xdata, event.ydata
coords = [(ix, iy)]
print('now = ', coords)
cor.append(coords)
click_count += 1
if click_count > 1:
click_count = 0
cor1 = cor.pop()
cor2 = cor.pop()
distance = (cor1[0][0] - cor2[0][0])**2 + (cor1[0][1] - cor2[0][1])**2
distance = distance ** 0.5
print('distance = ', distance)
jump(distance)
update = True
fig.canvas.mpl_connect('button_press_event', on_click)
ani = animation.FuncAnimation(fig, updatefig, interval=50, blit=True)
plt.show()
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import json
import logging
import mimetypes
import os
import re
import time
import zipfile
from collections import OrderedDict
import html5lib
from cheroot import wsgi
from django.conf import settings
from django.contrib.staticfiles.finders import find
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.cache import cache
from django.core.handlers.wsgi import WSGIRequest
from django.http import HttpResponse
from django.http import HttpResponseNotAllowed
from django.http import HttpResponseNotFound
from django.http import HttpResponseNotModified
from django.http import HttpResponsePermanentRedirect
from django.http.response import FileResponse
from django.http.response import StreamingHttpResponse
from django.template import Context
from django.template.engine import Engine
from django.utils.cache import patch_response_headers
from django.utils.encoding import force_str
from django.utils.http import http_date
from django.utils.safestring import mark_safe
from kolibri.core.content.errors import InvalidStorageFilenameError
from kolibri.core.content.utils.paths import get_content_storage_file_path
from kolibri.core.content.utils.paths import get_hashi_base_path
from kolibri.core.content.utils.paths import get_hashi_filename
from kolibri.core.content.utils.paths import get_hashi_path
from kolibri.core.content.utils.paths import get_zip_content_base_path
logger = logging.getLogger(__name__)
def add_security_headers(request, response):
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "GET, OPTIONS"
requested_headers = request.META.get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS", "")
if requested_headers:
response["Access-Control-Allow-Headers"] = requested_headers
# restrict CSP to only allow resources to be loaded from self, to prevent info leakage
# (e.g. via passing user info out as GET parameters to an attacker's server), or inadvertent data usage
response[
"Content-Security-Policy"
] = "default-src 'self' 'unsafe-inline' 'unsafe-eval' data: blob:"
return response
def django_response_to_wsgi(response, environ, start_response):
status = "%d %s" % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str("Set-Cookie"), str(c.output(header=""))))
start_response(force_str(status), response_headers)
if getattr(response, "file_to_stream", None) is not None and environ.get(
"wsgi.file_wrapper"
):
response = environ["wsgi.file_wrapper"](response.file_to_stream)
return response
hashi_template = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=Edge,chrome=1">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="google" content="notranslate">
</head>
<body style="margin: 0; padding: 0;">
<script type="text/javascript">{}</script>
</body>
</html>
"""
allowed_methods = set(["GET", "OPTIONS"])
def _hashi_response_from_request(request):
if request.method not in allowed_methods:
return HttpResponseNotAllowed(allowed_methods)
if request.path_info.lstrip("/") != get_hashi_filename():
return HttpResponsePermanentRedirect(get_hashi_path())
if request.method == "OPTIONS":
return HttpResponse()
# if client has a cached version, use that we can safely assume nothing has changed
# as we provide a unique path per compiled hashi JS file.
if request.META.get("HTTP_IF_MODIFIED_SINCE"):
return HttpResponseNotModified()
CACHE_KEY = "HASHI_VIEW_RESPONSE_{}".format(get_hashi_filename())
cached_response = cache.get(CACHE_KEY)
if cached_response is not None:
return cached_response
# Removes Byte Order Mark
charset = "utf-8-sig"
basename = "content/{filename}".format(filename=get_hashi_filename())
filename = None
# First try finding the file using the storage class.
# This is skipped in DEVELOPER_MODE mode as files might be outdated
# Or may not even be on disk.
if not getattr(settings, "DEVELOPER_MODE", False):
filename = staticfiles_storage.path(basename)
else:
filename = find(basename)
with codecs.open(filename, "r", charset) as fd:
content = fd.read()
content_type = "text/html"
response = HttpResponse(
mark_safe(hashi_template.format(content)), content_type=content_type
)
response["Content-Length"] = len(response.content)
response["Last-Modified"] = http_date(time.time())
patch_response_headers(response, cache_timeout=YEAR_IN_SECONDS)
cache.set(CACHE_KEY, response, YEAR_IN_SECONDS)
return response
def get_hashi_view_response(environ):
request = WSGIRequest(environ)
response = _hashi_response_from_request(request)
add_security_headers(request, response)
return response
def hashi_view(environ, start_response):
response = get_hashi_view_response(environ)
return django_response_to_wsgi(response, environ, start_response)
def load_json_from_zipfile(zf, filepath):
with zf.open(filepath, "r") as f:
return json.load(f)
def recursive_h5p_dependencies(zf, data, prefix=""):
jsfiles = OrderedDict()
cssfiles = OrderedDict()
# load the dependencies, recursively, to extract their JS and CSS paths to include
for dep in data.get("preloadedDependencies", []):
packagepath = "{machineName}-{majorVersion}.{minorVersion}/".format(**dep)
librarypath = packagepath + "library.json"
content = load_json_from_zipfile(zf, librarypath)
newjs, newcss = recursive_h5p_dependencies(zf, content, packagepath)
cssfiles.update(newcss)
jsfiles.update(newjs)
# load the JS required for the current package
for js in data.get("preloadedJs", []):
path = prefix + js["path"]
jsfiles[path] = True
# load the CSS required for the current package
for css in data.get("preloadedCss", []):
path = prefix + css["path"]
cssfiles[path] = True
return jsfiles, cssfiles
INITIALIZE_HASHI_FROM_IFRAME = "if (window.parent && window.parent.hashi) {try {window.parent.hashi.initializeIframe(window);} catch (e) {}}"
def parse_html(content):
try:
document = html5lib.parse(content, namespaceHTMLElements=False)
if not document:
# Could not parse
return content
# Because html5lib parses like a browser, it will
# always create head and body tags if they are missing.
head = document.find("head")
# Use the makeelement method of the head tag here to ensure that we use the same
# Element class for both. Depending on the system and python version we are on,
# we may be using the C implementation or the pure python and a mismatch will cause an error.
script_tag = head.makeelement("script", {"type": "text/javascript"})
script_tag.text = INITIALIZE_HASHI_FROM_IFRAME
head.insert(0, script_tag)
# Currently, html5lib strips the doctype, but it's important for correct rendering, so check the original
# content for the doctype and, if found, prepend it to the content serialized by html5lib
doctype = None
try:
# Now parse the content as a dom tree instead, so that we capture
# any doctype node as a dom node that we can read.
tree_builder_dom = html5lib.treebuilders.getTreeBuilder("dom")
parser_dom = html5lib.HTMLParser(
tree_builder_dom, namespaceHTMLElements=False
)
tree = parser_dom.parse(content)
# By HTML Spec if doctype is included, it must be the first thing
# in the document, so it has to be the first child node of the document
doctype_node = tree.childNodes[0]
# Check that this node is in fact a doctype node
if doctype_node.nodeType == doctype_node.DOCUMENT_TYPE_NODE:
# render to a string by calling the toxml method
# toxml uses single quotes by default, replace with ""
doctype = doctype_node.toxml().replace("'", '"')
except Exception as e:
logger.warn("Error in HTML5 parsing to determine doctype {}".format(e))
html = html5lib.serialize(
document,
quote_attr_values="always",
omit_optional_tags=False,
minimize_boolean_attributes=False,
use_trailing_solidus=True,
space_before_trailing_solidus=False,
)
if doctype:
html = doctype + html
return html
except html5lib.html5parser.ParseError:
return content
h5p_engine = Engine(
dirs=[os.path.join(os.path.dirname(__file__), "./templates/content")]
)
h5p_template = h5p_engine.get_template("h5p.html")
def get_h5p(zf, embedded_filepath):
file_size = 0
if not embedded_filepath:
# Get the h5p bootloader, and then run it through our hashi templating code.
# return the H5P bootloader code
try:
h5pdata = load_json_from_zipfile(zf, "h5p.json")
contentdata = load_json_from_zipfile(zf, "content/content.json")
except KeyError:
return HttpResponseNotFound("No valid h5p file was found at this location")
jsfiles, cssfiles = recursive_h5p_dependencies(zf, h5pdata)
jsfiles = jsfiles.keys()
cssfiles = cssfiles.keys()
path_includes_version = (
"true"
if "-" in [name for name in zf.namelist() if "/" in name][0]
else "false"
)
main_library_data = [
lib
for lib in h5pdata["preloadedDependencies"]
if lib["machineName"] == h5pdata["mainLibrary"]
][0]
bootstrap_content = h5p_template.render(
Context(
{
"jsfiles": jsfiles,
"cssfiles": cssfiles,
"content": json.dumps(
json.dumps(
contentdata, separators=(",", ":"), ensure_ascii=False
)
),
"library": "{machineName} {majorVersion}.{minorVersion}".format(
**main_library_data
),
"path_includes_version": path_includes_version,
}
),
)
content = parse_html(bootstrap_content)
content_type = "text/html"
response = HttpResponse(content, content_type=content_type)
file_size = len(response.content)
elif embedded_filepath.startswith("dist/"):
# return static H5P dist resources
# First try finding the file using the storage class.
# This is skipped in DEVELOPER_MODE mode as files might be outdated
basename = "assets/h5p-standalone-" + embedded_filepath
if not getattr(settings, "DEVELOPER_MODE", False):
path = staticfiles_storage.path(basename)
else:
path = find(basename)
if path is None:
return HttpResponseNotFound("{} not found".format(embedded_filepath))
# try to guess the MIME type of the embedded file being referenced
content_type = (
mimetypes.guess_type(embedded_filepath)[0] or "application/octet-stream"
)
response = FileResponse(open(path, "rb"), content_type=content_type)
file_size = os.stat(path).st_size
if file_size:
response["Content-Length"] = file_size
return response
def get_embedded_file(zipped_path, zipped_filename, embedded_filepath):
with zipfile.ZipFile(zipped_path) as zf:
# handle H5P files
if zipped_path.endswith("h5p") and (
not embedded_filepath or embedded_filepath.startswith("dist/")
):
return get_h5p(zf, embedded_filepath)
# if no path, or a directory, is being referenced, look for an index.html file
if not embedded_filepath or embedded_filepath.endswith("/"):
embedded_filepath += "index.html"
# get the details about the embedded file, and ensure it exists
try:
info = zf.getinfo(embedded_filepath)
except KeyError:
return HttpResponseNotFound(
'"{}" does not exist inside "{}"'.format(
embedded_filepath, zipped_filename
)
)
# file size
file_size = 0
# try to guess the MIME type of the embedded file being referenced
content_type = (
mimetypes.guess_type(embedded_filepath)[0] or "application/octet-stream"
)
if zipped_filename.endswith("zip") and (
embedded_filepath.endswith("htm") or embedded_filepath.endswith("html")
):
content = zf.open(info).read()
html = parse_html(content)
response = HttpResponse(html, content_type=content_type)
file_size = len(response.content)
else:
# generate a streaming response object, pulling data from within the zip file
response = FileResponse(zf.open(info), content_type=content_type)
file_size = info.file_size
# set the content-length header to the size of the embedded file
if file_size:
response["Content-Length"] = file_size
return response
path_regex = re.compile("/(?P<zipped_filename>[^/]+)/(?P<embedded_filepath>.*)")
def get_zipped_file_path(zipped_filename):
# calculate the local file path to the zip file
zipped_path = get_content_storage_file_path(zipped_filename)
# if the zipfile does not exist on disk, return a 404
if not os.path.exists(zipped_path):
raise InvalidStorageFilenameError()
return zipped_path
YEAR_IN_SECONDS = 60 * 60 * 24 * 365
def _zip_content_from_request(request):
if request.method not in allowed_methods:
return HttpResponseNotAllowed(allowed_methods)
match = path_regex.match(request.path_info)
if match is None:
return HttpResponseNotFound("Path not found")
if request.method == "OPTIONS":
return HttpResponse()
zipped_filename, embedded_filepath = match.groups()
try:
zipped_path = get_zipped_file_path(zipped_filename)
except InvalidStorageFilenameError:
return HttpResponseNotFound(
'"%(filename)s" is not a valid zip file' % {"filename": zipped_filename}
)
# Sometimes due to URL concatenation, we get URLs with double-slashes in them, like //path/to/file.html.
# the zipped_filename and embedded_filepath are defined by the regex capturing groups in the URL defined
# in urls.py in the same folder as this file:
# r"^zipcontent/(?P<zipped_filename>[^/]+)/(?P<embedded_filepath>.*)"
# If the embedded_filepath contains a leading slash because of an input URL like:
# /zipcontent/filename.zip//file.html
# then the embedded_filepath will have a value of "/file.html"
# we detect this leading slash in embedded_filepath and remove it.
if embedded_filepath.startswith("/"):
embedded_filepath = embedded_filepath[1:]
# Any double-slashes later in the URL will be present as double-slashes, such as:
# /zipcontent/filename.zip/path//file.html
# giving an embedded_filepath value of "path//file.html"
# Normalize the path by converting double-slashes occurring later in the path to a single slash.
# This would change our example embedded_filepath to "path/file.html" which will resolve properly.
embedded_filepath = embedded_filepath.replace("//", "/")
# if client has a cached version, use that (we can safely assume nothing has changed, due to MD5)
if request.META.get("HTTP_IF_MODIFIED_SINCE"):
return HttpResponseNotModified()
CACHE_KEY = "ZIPCONTENT_VIEW_RESPONSE_{}/{}".format(
zipped_filename, embedded_filepath
)
cached_response = cache.get(CACHE_KEY)
if cached_response is not None:
return cached_response
response = get_embedded_file(zipped_path, zipped_filename, embedded_filepath)
# ensure the browser knows not to try byte-range requests, as we don't support them here
response["Accept-Ranges"] = "none"
response["Last-Modified"] = http_date(time.time())
patch_response_headers(response, cache_timeout=YEAR_IN_SECONDS)
if not isinstance(response, StreamingHttpResponse):
cache.set(CACHE_KEY, response, YEAR_IN_SECONDS)
return response
def generate_zip_content_response(environ):
request = WSGIRequest(environ)
response = _zip_content_from_request(request)
add_security_headers(request, response)
return response
def zip_content_view(environ, start_response):
"""
Handles GET requests and serves a static file from within the zip file.
"""
response = generate_zip_content_response(environ)
return django_response_to_wsgi(response, environ, start_response)
def get_application():
path_map = {
get_hashi_base_path(): hashi_view,
get_zip_content_base_path(): zip_content_view,
}
return wsgi.PathInfoDispatcher(path_map)
|
import numpy as np
import mne
from warnings import warn
from sarna.utils import group, _invert_selection, _transfer_selection_to_raw
# from numba import jit
def dB(x):
return 10 * np.log10(x)
# - [ ] add detrending (1 + x + 1/x) or FOOOF cooperation
def transform_spectrum(spectrum, dB=False, normalize=False, detrend=False):
"""Common spectrum transformations.
Parameters
----------
spectrum : numpy array
channels x frequencies
"""
if dB:
spectrum = 10 * np.log10(spectrum)
if normalize:
if dB:
# move whole spectrum up, so that normalization
# does not return weird results
min_val = spectrum.min() - 0.01
spectrum -= min_val
spectrum /= spectrum.sum(axis=1)[:, np.newaxis]
return spectrum
# - [ ] consider moving to utils
# - [x] warn if sfreq not given and some values are float
# - [x] treat floats as time and int as samples
# - [ ] maybe a smarter API or a class...
def window_steps(window_length, window_step, signal_len, sfreq=None):
is_float = [isinstance(x, float)
for x in [window_length, window_step, signal_len]]
any_float = any(is_float)
if any_float and sfreq is None:
raise TypeError('Some variables are float but sfreq was not given.')
if any_float and sfreq is not None:
if is_float[0]:
window_length = int(np.round(window_length * sfreq))
if is_float[1]:
window_step = int(np.round(window_step * sfreq))
if is_float[2]:
signal_len = int(np.round(signal_len * sfreq))
num_steps = int(np.floor((signal_len - window_length) / window_step)) + 1
for w in range(num_steps):
yield slice(w * window_step, window_length + w * window_step)
def plot_topo_and_psd(inst, mean_psd, freqs, channels):
from matplotlib import gridspec
import matplotlib.pyplot as plt
from mne.viz.topomap import plot_psds_topomap
fig = plt.figure(figsize=(8, 3))
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 2])
ax = [plt.subplot(g) for g in gs]
plot_psds_topomap(psds=mean_psd, freqs=freqs, pos=inst.info,
dB=False, axes=[ax[0]], bands=[(4., 8., 'Theta')],
normalize=False, cmap='inferno', show=False)
# highlight channels
circles = ax[0].findobj(plt.Circle)
for ch in channels:
circles[ch].set_color('r')
circles[ch].set_radius(0.025)
plot_freq = (freqs > 1.) & (freqs < 15.)
ax[1].plot(freqs[plot_freq], mean_psd[channels, :][:, plot_freq].T)
chan_avg = mean_psd[channels, :].mean(axis=0)
ax[1].plot(freqs[plot_freq], chan_avg[plot_freq], color='k', lw=2)
return fig
def _correct_overlap(periods):
'''
Parameters
----------
periods : np.ndarray
Numpy array of (n_periods, 3) shape. The columns are: epoch index,
within-epoch sample index of period start, within-epoch sample index of
period end.
Returns
-------
periods : np.ndarray
Corrected numpy array of (n_periods, 3) shape. The columns are: epoch
index, within-epoch sample index of period start, within-epoch sample
index of period end.
'''
n_rows = periods.shape[0]
current_period = periods[0, :].copy()
correct = list()
for idx in range(1, n_rows):
overlap = ((periods[idx, 0] == current_period[0])
and (periods[idx, 1] <= current_period[2]))
if overlap:
current_period[-1] = periods[idx, -1]
else:
correct.append(current_period)
current_period = periods[idx, :].copy()
correct.append(current_period)
periods = np.stack(correct, axis=0)
return periods
def _find_high_amplitude_periods(epochs, threshold=2.5, min_period=0.1,
extend=None):
'''
Find segments of high amplitude in filtered, hilbert-transformed signal.
Parameters
----------
epochs : mne.Epochs
Epoched data. Must be filtered and hilbert-transformed.
threshold : float, str
Threshold defining high amplitude periods to select: if float, it
is interpreted as a z value threshold; if str, as percentage of
fragments with in the highest amplitude in form of ``'xx%'``
(for example with ``'25%'`` 25% of singal with highest amplitude will
be selected ). Defaults to ``2.5``.
min_period : float
Minimum length of high amplitude period in seconds.
Defaults to ``0.1``.
extend : float | None
Extend each period by this many seconds on both sides (before and
after). Defaults to ``None`` which does not extend the periods.
Returns
-------
periods : np.ndarray
Numpy array of (n_periods, 3) shape. The columns are: epoch index,
within-epoch sample index of period start, within-epoch sample index of
period end.
'''
from scipy.stats import zscore
# amplitude periods
n_epochs, n_channels, n_samples = epochs._data.shape
comp_data = epochs._data.transpose([1, 0, 2]).reshape((n_channels, -1))
# find segments with elevated amplitude
envelope = np.nanmean(comp_data, axis=0)
if isinstance(threshold, str) and '%' in threshold:
perc = 100 - float(threshold.replace('%', ''))
threshold = np.nanpercentile(envelope, perc)
else:
envelope = zscore(envelope, nan_policy='omit')
grp = group(envelope > threshold)
if len(grp) == 0:
raise ValueError('No high amplitude periods were found.')
# check if there are some segments that start at one epoch
# and end in another
# -> if so, they could be split, but we will ignore them for now
epoch_idx = np.floor(grp / n_samples)
epoch_diff = np.diff(epoch_idx, axis=0)
epochs_joint = epoch_diff > 0
if epochs_joint.any():
msg = ('{:d} high-amplitude segments will be ignored because'
' the developer was lazy.')
warn(msg.format(epochs_joint.sum()))
epoch_diff = epoch_diff[~epochs_joint]
segment_len = np.diff(grp, axis=1)
good_length = segment_len[:, 0] * (1 / data.info['sfreq']) > min_period
grp = grp[good_length, :]
epoch_idx = np.floor(grp[:, [0]] / n_samples).astype('int')
grp -= epoch_idx * n_samples
if extend is not None:
extend_samples = int(np.round(extend * data.info['sfreq']))
extend_samples = np.array([-extend_samples, extend_samples])
grp += extend_samples[np.newaxis, :]
# check for limits
msk1 = grp[:, 0] < 0
msk2 = grp[:, 1] >= n_samples
grp[msk1, 0] = 0
grp[msk2, 1] = n_samples - 1
periods = np.append(epoch_idx, grp, axis=1)
periods = periods if extend is None else _correct_overlap(periods)
return periods
def create_amplitude_annotations(raw, freq=None, events=None, event_id=None,
picks=None, tmin=-0.2, tmax=0.5,
threshold=2., min_period=0.1,
extend=None):
'''
Parameters
----------
raw : mne.Raw
Raw file to use.
events: numpy array | None
Mne events array of shape (n_events, 3). If None (default) `tmin` and
`tmax` are not calculated with respect to events but the whole time
range of the `raw` file.
event_id: list | numpy array
Event types (IDs) to use in defining segments for which psd is
computed. If None (default) and events were passed all event types are
used.
freq : list | numpy array
Frequency limits defining a range for which low amplitude periods will
be calculated.
picks : list
List of channels for which low amplitude periods will be calculated.
tmin : float
Start time before event.
tmax : float
End time after event.
threshold : float, str
Threshold defining high amplitude periods to select: if float, it
is interpreted as a z value threshold; if str, as percentage of
fragments with in the highest amplitude in form of ``'xx%'``
(for example with ``'25%'`` 25% of singal with highest amplitude will
be selected ). Defaults to ``2.5``.
min_period : float
Minimum length of high amplitude period in seconds.
Defaults to ``0.1``.
extend : float | None
Extend each period by this many seconds on both sides (before and
after). Defaults to ``None`` which does not extend the periods.
Returns
-------
raw_annot : mne.Raw
Raw files with annotations.
'''
if freq is None:
raise TypeError('Frequencies have to be defined')
if events is None:
raise TypeError('Events have to be defined')
if event_id is None:
event_id = np.unique(events[:, 2]).tolist()
filt_raw = raw.copy().filter(freq[0], freq[1])
filt_raw_nan = filt_raw.copy()
filt_raw_nan._data = raw.get_data(reject_by_annotation='NaN')
epochs_nan = mne.Epochs(filt_raw_nan, events=events, event_id=event_id,
tmin=tmin, tmax=tmax, baseline=None, preload=True,
reject_by_annotation=False)
epochs = mne.Epochs(filt_raw, events=events, event_id=event_id, tmin=tmin,
tmax=tmax, baseline=None, preload=True,
reject_by_annotation=False)
del filt_raw_nan
filt_hilb_data = np.abs(epochs.copy().pick(picks).apply_hilbert()._data)
filt_hilb_data[np.isnan(epochs_nan._data)] = np.nan
epochs_nan._data = filt_hilb_data
hi_amp_epochs = _find_high_amplitude_periods(epochs_nan,
threshold=threshold,
min_period=min_period,
extend=extend)
hi_amp_raw = _transfer_selection_to_raw(epochs, raw,
selection=hi_amp_epochs)
amp_inv_samples = _invert_selection(raw, selection=hi_amp_raw)
sfreq = raw.info['sfreq']
amp_inv_annot_sec = amp_inv_samples / sfreq
n_segments = amp_inv_samples.shape[0]
amp_annot = mne.Annotations(amp_inv_annot_sec[:, 0],
amp_inv_annot_sec[:, 1],
['BAD_lowamp'] * n_segments)
return amp_annot
def grand_average_psd(psd_list):
'''Perform grand average on a list of PSD objects.
Parameters
----------
psd_list : list
List of ``borsar.freq.PSD`` objects.
Returns
-------
grand_psd : borsar.freq.PSD
Grand averaged spectrum.
'''
assert isinstance(psd_list, list)
# make sure that all psds have the same number and order of channels
# and the same frequencies
freq1 = psd_list[0].freqs
ch_names1 = psd_list[0].ch_names
n_channels = len(ch_names1)
for psd in psd_list:
assert len(psd.freqs) == len(freq1)
assert (psd.freqs == freq1).all()
assert len(psd.ch_names) == n_channels
assert all([ch_names1[idx] == psd.ch_names[idx]
for idx in range(n_channels)])
all_psds = list()
for this_psd in psd_list:
# upewniamy się, że epoki są uśrednione
this_psd = this_psd.copy().average()
all_psds.append(this_psd.data)
# łączymy widma w macierz (osoby x kanały x częstotliwości)
all_psds = np.stack(all_psds, axis=0)
# uśredniamy wymiar osób, zostają nam kanały x częstotliwości
all_psds = all_psds.mean(axis=0)
# kopiujemy wybrane psd z listy
grand_psd = this_psd.copy()
# i wypełniamy wartościamy średniej po osobach
grand_psd._data = all_psds
return grand_psd
|
#!/usr/bin/env python3
# test_get_exclusions.py
""" Test the glob/wildcard functions in xlattice/util.py """
import time
import unittest
from xlutil import get_exclusions, make_ex_re
from rnglib import SimpleRNG
class TestGetExclusions(unittest.TestCase):
""" Test the glob/wildcard functions in xlattice/util.py """
def setUp(self):
self.rng = SimpleRNG(time.time()) # XXX NOT USED
def tearDown(self):
pass
def do_test_expected_exclusions(self, exre):
""" Should always match. """
self.assertIsNotNone(exre.match('merkle.pyc'))
self.assertIsNotNone(exre.match('.svn'))
self.assertIsNotNone(exre.match('.foo.swp')) # vi backup file
self.assertIsNotNone(exre.match('junkEverywhere')) # note 'junk'
self.assertIsNotNone(exre.match('.merkle'))
def do_test_expected_matches(self, match_re, names):
""" Should always match. """
for name in names:
self.assertIsNotNone(match_re.match(name))
def do_test_expected_match_failures(self, match_re, names):
""" Should never match. """
for name in names:
m__ = match_re.match(name)
if m__:
print("WE HAVE A MATCH ON '%s'" % name)
# self.assertEquals( None, where )
def do_test_get_exclusions(self, proj_dir):
"""
This test assumes that there is a local .gitignore containing
at least '.merkle', '.svn*' '*.swp', and 'junk*'
"""
# convert .gitignore's contents to a list of parenthesized
# regular expressions
globs = get_exclusions(proj_dir, '.gitignore')
self.assertIsNotNone(len(globs) > 0)
exre = make_ex_re(globs)
self.assertIsNotNone(exre is not None)
self.do_test_expected_exclusions(exre)
def test_get_exclusions(self):
""" Exercise get_exclusions and related functions. """
self.do_test_get_exclusions('.')
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""best one.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1be2MmgS_huYhmgc0tKhXGWBddmri8ClC
"""
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.utils.np_utils import to_categorical
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import layers
import tensorflow as tf
from keras.callbacks import TensorBoard
import numpy as np
import cv2
import glob
import os
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from itertools import chain
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import datetime, os
import numpy as np
import cv2
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D
from keras.optimizers import Adam,Adamax,Nadam,Adagrad,SGD,RMSprop,Ftrl,Adadelta
from keras.layers import MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
import pandas as pd
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import keras
from sklearn.model_selection import train_test_split
from keras.layers import Conv2D, MaxPool2D, AveragePooling2D, Input, BatchNormalization, MaxPooling2D, Activation, Flatten, Dense, Dropout,Convolution2D,GlobalAveragePooling2D
from keras.models import Model
from sklearn.metrics import classification_report
from imblearn.over_sampling import RandomOverSampler
from keras.preprocessing import image
import scipy
import os
import cv2
from google.colab import drive
import os
drive.mount('/content/gdrive/')
x_train=[]
for i in range (1,491):
strj ="/content/gdrive/MyDrive/GradProject/Train/ ("+str(i)+").JPG"
path = strj
image = plt.imread(path)
image = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
image = tf.image.resize(np.expand_dims(image,axis=-1),[400,400])
image = np.squeeze(image,axis=-1)
x_train.append(image)
x_test=[]
for i in range (1,365):
strj ="/content/gdrive/MyDrive/GradProject/Test/ ("+str(i)+").jpg"
path = strj
image = plt.imread(path)
image = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
image = tf.image.resize(np.expand_dims(image,axis=-1),[400,400])
image = np.squeeze(image,axis=-1)
x_test.append(image)
#plt.imshow(x_test[363],cmap="gray")
x_train = np.array(x_train,dtype=np.float32)
x_test = np.array(x_test,dtype=np.float32)
#print(x_test.shape)
import pandas as pd
import numpy as np
import pandas_datareader
#path = "/content/Train.csv"
#df_bonus = pd.read_csv(path).vaules
#df_bonus.head()
y_train = pd.read_csv("/content/gdrive/MyDrive/GradProject/Train2.csv").values
y_train = np.array(y_train,dtype=np.float32)
y_test = pd.read_csv("/content/gdrive/MyDrive/GradProject/Test.csv").values
y_test = np.array(y_test,dtype=np.float32)
num_classes =y_train[1].shape
x_train[1].shape
print(type(y_train))
print(y_train.shape)
print(num_classes)
# Lines 1 and 2
x_train = x_train.reshape((x_train.shape[0], 400, 400, 1)).astype('float32')
x_test = x_test.reshape((x_test.shape[0], 400, 400, 1)).astype('float32')
# Lines 3 and 4
x_train = x_train / 255
x_test = x_test / 255
# Lines 5 and 6
y_train = y_train/y_train.max()
y_test = y_test/y_test.max()
num_classes = y_train.shape[1]
print(x_train.shape); print(num_classes) ;print(y_train.shape) ;print(type(y_train))
#$1 opt=adam loss=Adam tmm 3 expression
# $2
#zy 5ara
def cnn_model():
# create model
model = Sequential()
model.add(Conv2D(32, (5, 5), input_shape=(400,400,1), activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(5, 5), activation='relu'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='relu'))
# Compile model
model.compile(loss='mse', optimizer=Adam(learning_rate=0.001), metrics=['accuracy'])
return model
model = cnn_model()
os.listdir(checkpoint_dir)
model.fit(x_train,y_train,validation_data=(x_test,y_test),epochs=300,batch_size=32)
scores = model.evaluate(x_train, y_train, verbose=0)
print("CNN Error: %.2f%%" % (100-scores[1]*100))
model.save_weights('/content/gdrive/MyDrive/CNN-Model-weights/cnn8/weights')
# Save the weights
#model.save_weights('/content/gdrive/MyDrive/CNN-Model-weights/weights')
# Create a new model instance
model = cnn_model()
# Restore the weights
model.load_weights('/content/gdrive/MyDrive/CNN-Model-weights/cnn9/weights')
# Evaluate the model
#loss, acc = model.evaluate(x_test, y_test, verbose=2)
#print("Restored model, accuracy: {:5.2f}%".format(100 * acc))
model.save_weights('/content/gdrive/MyDrive/CNN-Model-weights/cnn6/weights')
x_t=[]
path ="/content/ (158).jpg"
image = plt.imread(path)
image = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
plt.imshow(image,cmap="gray")
image = tf.image.resize(np.expand_dims(image,axis=-1),[400,400])
image = np.squeeze(image,axis=-1)
x_t.append(image)
x_t=np.array(x_t,dtype=np.float32)
x_t = x_t.reshape((x_t.shape[0], 400, 400, 1)).astype('float32')
v=model.predict(x_t)
#model.predict(x_train)
print(v[0])
a_file = open("test.txt", "w")
for row in v:
np.savetxt(a_file, row)
a_file.close()
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Capnproto(AutotoolsPackage):
"""Cap'n Proto is an insanely fast data interchange
format and capability-based RPC system.
"""
homepage = "https://capnproto.org/"
url = "https://capnproto.org/capnproto-c++-0.7.0.tar.gz"
git = "https://github.com/capnproto/capnproto"
version('0.8.0', sha256='d1f40e47574c65700f0ec98bf66729378efabe3c72bc0cda795037498541c10d')
version('0.7.0', sha256='c9a4c0bd88123064d483ab46ecee777f14d933359e23bff6fb4f4dbd28b4cd41')
def configure_args(self):
return ['--without-openssl']
|
#
# This file is part of pyasn1-alt-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
# Modified by Russ Housley to update the S/MIME Capabilities map.
# Modified by Russ Housley to include the opentypemap manager.
#
# Copyright (c) 2019-2022, Vigil Security, LLC
# License: http://vigilsec.com/pyasn1-alt-modules-license.txt
#
# Use of the RSA-KEM Key Transport Algorithm in the CMS
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc5990.txt
#
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import univ
from pyasn1_alt_modules import rfc5280
from pyasn1_alt_modules import rfc5751
from pyasn1_alt_modules import opentypemap
algorithmIdentifierMap = opentypemap.get('algorithmIdentifierMap')
smimeCapabilityMap = opentypemap.get('smimeCapabilityMap')
MAX = float('inf')
# Imports from RFC 5280
AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
# Useful types and definitions
class NullParms(univ.Null):
pass
# Object identifier arcs
is18033_2 = univ.ObjectIdentifier((1, 0, 18033, 2))
nistAlgorithm = univ.ObjectIdentifier((2, 16, 840, 1, 101, 3, 4))
pkcs_1 = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 1))
x9_44 = univ.ObjectIdentifier((1, 3, 133, 16, 840, 9, 44))
x9_44_components = x9_44 + (1,)
# Types for algorithm identifiers
class Camellia_KeyWrappingScheme(AlgorithmIdentifier):
pass
class DataEncapsulationMechanism(AlgorithmIdentifier):
pass
class KDF2_HashFunction(AlgorithmIdentifier):
pass
class KDF3_HashFunction(AlgorithmIdentifier):
pass
class KeyDerivationFunction(AlgorithmIdentifier):
pass
class KeyEncapsulationMechanism(AlgorithmIdentifier):
pass
class X9_SymmetricKeyWrappingScheme(AlgorithmIdentifier):
pass
# RSA-KEM Key Transport Algorithm
id_rsa_kem = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, 3, 14))
class GenericHybridParameters(univ.Sequence):
pass
GenericHybridParameters.componentType = namedtype.NamedTypes(
namedtype.NamedType('kem', KeyEncapsulationMechanism()),
namedtype.NamedType('dem', DataEncapsulationMechanism())
)
rsa_kem = AlgorithmIdentifier()
rsa_kem['algorithm'] = id_rsa_kem
rsa_kem['parameters'] = GenericHybridParameters()
# KEM-RSA Key Encapsulation Mechanism
id_kem_rsa = is18033_2 + (2, 4)
class KeyLength(univ.Integer):
pass
KeyLength.subtypeSpec = constraint.ValueRangeConstraint(1, MAX)
class RsaKemParameters(univ.Sequence):
pass
RsaKemParameters.componentType = namedtype.NamedTypes(
namedtype.NamedType('keyDerivationFunction', KeyDerivationFunction()),
namedtype.NamedType('keyLength', KeyLength())
)
kem_rsa = AlgorithmIdentifier()
kem_rsa['algorithm'] = id_kem_rsa
kem_rsa['parameters'] = RsaKemParameters()
# Key Derivation Functions
id_kdf_kdf2 = x9_44_components + (1,)
id_kdf_kdf3 = x9_44_components + (2,)
kdf2 = AlgorithmIdentifier()
kdf2['algorithm'] = id_kdf_kdf2
kdf2['parameters'] = KDF2_HashFunction()
kdf3 = AlgorithmIdentifier()
kdf3['algorithm'] = id_kdf_kdf3
kdf3['parameters'] = KDF3_HashFunction()
# Hash Functions
id_sha1 = univ.ObjectIdentifier((1, 3, 14, 3, 2, 26))
id_sha224 = univ.ObjectIdentifier((2, 16, 840, 1, 101, 3, 4, 2, 4))
id_sha256 = univ.ObjectIdentifier((2, 16, 840, 1, 101, 3, 4, 2, 1))
id_sha384 = univ.ObjectIdentifier((2, 16, 840, 1, 101, 3, 4, 2, 2))
id_sha512 = univ.ObjectIdentifier((2, 16, 840, 1, 101, 3, 4, 2, 3))
sha1 = AlgorithmIdentifier()
sha1['algorithm'] = id_sha1
sha1['parameters'] = univ.Null("")
sha224 = AlgorithmIdentifier()
sha224['algorithm'] = id_sha224
sha224['parameters'] = univ.Null("")
sha256 = AlgorithmIdentifier()
sha256['algorithm'] = id_sha256
sha256['parameters'] = univ.Null("")
sha384 = AlgorithmIdentifier()
sha384['algorithm'] = id_sha384
sha384['parameters'] = univ.Null("")
sha512 = AlgorithmIdentifier()
sha512['algorithm'] = id_sha512
sha512['parameters'] = univ.Null("")
# Symmetric Key-Wrapping Schemes
id_aes128_Wrap = nistAlgorithm + (1, 5)
id_aes192_Wrap = nistAlgorithm + (1, 25)
id_aes256_Wrap = nistAlgorithm + (1, 45)
id_alg_CMS3DESwrap = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, 3, 6))
id_camellia128_Wrap = univ.ObjectIdentifier((1, 2, 392, 200011, 61, 1, 1, 3, 2))
id_camellia192_Wrap = univ.ObjectIdentifier((1, 2, 392, 200011, 61, 1, 1, 3, 3))
id_camellia256_Wrap = univ.ObjectIdentifier((1, 2, 392, 200011, 61, 1, 1, 3, 4))
aes128_Wrap = AlgorithmIdentifier()
aes128_Wrap['algorithm'] = id_aes128_Wrap
# aes128_Wrap['parameters'] are absent
aes192_Wrap = AlgorithmIdentifier()
aes192_Wrap['algorithm'] = id_aes128_Wrap
# aes192_Wrap['parameters'] are absent
aes256_Wrap = AlgorithmIdentifier()
aes256_Wrap['algorithm'] = id_sha256
# aes256_Wrap['parameters'] are absent
tdes_Wrap = AlgorithmIdentifier()
tdes_Wrap['algorithm'] = id_alg_CMS3DESwrap
tdes_Wrap['parameters'] = univ.Null("")
camellia128_Wrap = AlgorithmIdentifier()
camellia128_Wrap['algorithm'] = id_camellia128_Wrap
# camellia128_Wrap['parameters'] are absent
camellia192_Wrap = AlgorithmIdentifier()
camellia192_Wrap['algorithm'] = id_camellia192_Wrap
# camellia192_Wrap['parameters'] are absent
camellia256_Wrap = AlgorithmIdentifier()
camellia256_Wrap['algorithm'] = id_camellia256_Wrap
# camellia256_Wrap['parameters'] are absent
# Update the Algorithm Identifier map and the S/MIME Capabilities map.
# Note that the ones that must not have parameters are not added to the maps.
_algorithmIdentifierMapUpdate = {
id_rsa_kem: GenericHybridParameters(),
id_kem_rsa: RsaKemParameters(),
id_kdf_kdf2: KDF2_HashFunction(),
id_kdf_kdf3: KDF3_HashFunction(),
id_sha1: univ.Null(),
id_sha224: univ.Null(),
id_sha256: univ.Null(),
id_sha384: univ.Null(),
id_sha512: univ.Null(),
id_alg_CMS3DESwrap: univ.Null(),
}
algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
smimeCapabilityMap.update(_algorithmIdentifierMapUpdate)
|
##############################################################################
#
# Copyright (c) 2015-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2015-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
import esys.escriptcore.utestselect as unittest
from esys.escriptcore.testing import *
from esys.dudley import Rectangle, Brick
from test_splitworld import Test_SplitWorld, sw_testing
NE=4 # number elements, must be even
class Test_SplitOnDudley(Test_SplitWorld):
def setUp(self):
self.domainpars=[Rectangle, NE, NE]
def tearDown(self):
del self.domainpars
class Test_dudley_sw_2D(sw_testing):
def setUp(self):
self.domain_ctr=Rectangle
self.domain_vec=(6,6)
self.domain_dict={}
def tearDown(self):
del self.domain_ctr
del self.domain_vec
class Test_dudley_sw_3D(sw_testing):
def setUp(self):
self.domain_ctr=Brick
self.domain_vec=(6,6,6)
self.domain_dict={}
def tearDown(self):
del self.domain_ctr
del self.domain_vec
if __name__ == '__main__':
run_tests(__name__, exit_on_failure=True)
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import copy
from collections import ChainMap, OrderedDict, defaultdict
from typing import Any, Dict, List
from django.db.models import Max, Subquery
from django.utils.translation import ugettext as _
from rest_framework import exceptions, serializers
from apps.backend.plugin.tools import fetch_latest_config_templates
from apps.backend.subscription import errors
from apps.node_man import constants, models
class ConfigTemplateSerializer(serializers.Serializer):
name = serializers.CharField(required=True, label="配置文件名")
version = serializers.CharField(required=True, label="配置文件版本号")
is_main = serializers.BooleanField(default=False, label="是否主配置")
class PluginStepConfigSerializer(serializers.Serializer):
plugin_name = serializers.CharField(required=True, label="插件名称")
plugin_version = serializers.CharField(required=True, label="插件版本")
config_templates = ConfigTemplateSerializer(default=[], many=True, label="配置模板列表", allow_empty=True)
job_type = serializers.ChoiceField(required=False, allow_null=True, allow_blank=True, choices=constants.JOB_TUPLE)
class PolicyConfigTemplateSerializer(ConfigTemplateSerializer):
id = serializers.IntegerField(required=True, label="配置模板id")
class PolicyStepConfigSerializer(serializers.Serializer):
class PolicyPackageSerializer(serializers.Serializer):
project = serializers.CharField(required=True, label="插件名称") # TODO 策略传参优化后移除
id = serializers.IntegerField(required=True, label="package id")
version = serializers.CharField(required=True, label="插件版本")
os = serializers.CharField(required=True, label="操作系统")
cpu_arch = serializers.CharField(required=True, label="cpu架构")
config_templates = serializers.ListField(
required=False, label="配置模板列表", child=PolicyConfigTemplateSerializer(), default=[]
)
job_type = serializers.ChoiceField(required=False, allow_null=True, allow_blank=True, choices=constants.JOB_TUPLE)
plugin_name = serializers.CharField(required=False, label="插件名称", default="")
details = serializers.ListField(required=True, min_length=1, child=PolicyPackageSerializer())
def validate(self, attrs):
# TODO 后续策略配置移除project,放到字典第一层作为plugin_name
project_set = set([detail["project"] for detail in attrs["details"]])
if len(project_set) != 1:
raise serializers.ValidationError(f"package's project should be the same, but not {project_set}")
if not attrs["plugin_name"]:
attrs["plugin_name"] = attrs["details"][0]["project"]
return attrs
class PluginStepParamsSerializer(serializers.Serializer):
port_range = serializers.CharField(label="端口范围", required=False, allow_null=True, allow_blank=True)
context = serializers.DictField(default={}, label="配置文件渲染上下文")
keep_config = serializers.BooleanField(label="是否保留原有配置文件", allow_null=True, required=False, default=False)
no_restart = serializers.BooleanField(label="仅更新文件,不重启进程", allow_null=True, required=False, default=False)
class PolicyStepParamsSerializer(serializers.Serializer):
class PolicyStepParamsInfo(PluginStepParamsSerializer):
os = serializers.CharField(required=True, label="操作系统")
cpu_arch = serializers.CharField(required=True, label="cpu架构")
details = serializers.ListField(child=PolicyStepParamsInfo())
class PolicyStepAdapter:
def __init__(self, subscription_step: models.SubscriptionStep):
self.subscription_step = subscription_step
self.subscription = subscription_step.subscription
self.plugin_name: str = self.config["plugin_name"]
self.selected_pkg_infos: List[Dict] = self.config["details"]
self.except_os_key_pkg_map: Dict[str, Dict] = {
self.get_os_key(pkg["os"], pkg["cpu_arch"]): pkg for pkg in self.selected_pkg_infos
}
@property
def config(self) -> OrderedDict:
if hasattr(self, "_config") and self._config:
return self._config
policy_config = self.format2policy_config(self.subscription_step.config)
# 处理同名配置模板,取最新版本
for selected_pkg_info in policy_config["details"]:
selected_pkg_info["config_templates"] = fetch_latest_config_templates(selected_pkg_info["config_templates"])
setattr(self, "_config", self.validated_data(data=policy_config, serializer=PolicyStepConfigSerializer))
return self._config
@property
def params(self) -> OrderedDict:
if hasattr(self, "_params") and self._params:
return self._params
policy_params = self.format2policy_params(self.subscription_step.params)
setattr(self, "_params", self.validated_data(data=policy_params, serializer=PolicyStepParamsSerializer))
return self._params
@property
def plugin_desc(self) -> models.GsePluginDesc:
if hasattr(self, "_plugin_desc") and self._plugin_desc:
return self._plugin_desc
try:
plugin_desc = models.GsePluginDesc.objects.get(name=self.plugin_name)
except models.GsePluginDesc.DoesNotExist:
raise errors.PluginValidationError(msg="插件 [{name}] 信息不存在".format(name=self.plugin_name))
setattr(self, "_plugin_desc", plugin_desc)
return self._plugin_desc
@property
def config_inst_gby_os_key(self) -> Dict[str, List[models.PluginConfigInstance]]:
if hasattr(self, "_config_inst_gby_os_key"):
return self._config_inst_gby_os_key
config_inst_gby_os_key = {}
for os_key, config_templates in self.config_tmpl_obj_gby_os_key.items():
config_inst_gby_os_key[os_key] = [
config_template.create_instance(
data=self.get_matching_step_params(os_key=os_key).get("context", {}),
source_app_code=self.subscription.from_system,
)
for config_template in config_templates
]
setattr(self, "_config_inst_gby_os_key", config_inst_gby_os_key)
return self._config_inst_gby_os_key
@property
def config_tmpl_obj_gby_os_key(self) -> Dict[str, List[models.PluginConfigTemplate]]:
if hasattr(self, "_config_tmpl_obj_gby_os_key"):
return self._config_tmpl_obj_gby_os_key
all_config_tmpl_ids = []
os_key_gby_config_tmpl_id = defaultdict(list)
for pkg in self.selected_pkg_infos:
config_tmpl_ids = [config_tmpl["id"] for config_tmpl in pkg["config_templates"]]
for config_tmpl_id in config_tmpl_ids:
os_key_gby_config_tmpl_id[config_tmpl_id].append(self.get_os_key(pkg["os"], pkg["cpu_arch"]))
all_config_tmpl_ids.extend(config_tmpl_ids)
all_config_tmpl_ids = list(set(all_config_tmpl_ids))
config_tmpl_obj_gby_os_key: Dict[str, List[models.PluginConfigTemplate]] = defaultdict(list)
for config_template in models.PluginConfigTemplate.objects.filter(id__in=all_config_tmpl_ids):
for os_key in os_key_gby_config_tmpl_id[config_template.id]:
config_tmpl_obj_gby_os_key[os_key].append(config_template)
setattr(self, "_config_tmpl_obj_gby_os_key", config_tmpl_obj_gby_os_key)
return self._config_tmpl_obj_gby_os_key
@property
def os_key_pkg_map(self) -> Dict[str, models.Packages]:
if hasattr(self, "_os_key_pkg_map"):
return self._os_key_pkg_map
packages = models.Packages.objects.filter(id__in=[pkg["id"] for pkg in self.selected_pkg_infos])
os_cpu_pkg_map = {self.get_os_key(package.os, package.cpu_arch): package for package in packages}
if not os_cpu_pkg_map:
raise errors.PluginValidationError(
msg="插件 [{name}-{versions}] 不存在".format(
name=self.plugin_name, versions=set([pkg["version"] for pkg in self.selected_pkg_infos])
)
)
setattr(self, "_os_key_pkg_map", os_cpu_pkg_map)
return self._os_key_pkg_map
@property
def os_key_params_map(self) -> Dict[str, Dict[str, Any]]:
if hasattr(self, "_os_key_params_map"):
return self._os_key_params_map
policy_params_list = self.params["details"]
os_cpu_params_map = {
self.get_os_key(policy_params["os"], policy_params["cpu_arch"]): policy_params
for policy_params in policy_params_list
}
setattr(self, "_os_key_params_map", os_cpu_params_map)
return self._os_key_params_map
def format2policy_config(self, original_config: Dict) -> Dict:
try:
self.validated_data(data=original_config, serializer=PolicyStepConfigSerializer)
except exceptions.ValidationError:
pass
else:
return original_config
validated_config = self.validated_data(data=original_config, serializer=PluginStepConfigSerializer)
plugin_name = validated_config["plugin_name"]
plugin_version = validated_config["plugin_version"]
if plugin_version != "latest":
packages = models.Packages.objects.filter(
project=plugin_name,
version=plugin_version,
)
else:
newest = (
models.Packages.objects.filter(project=plugin_name).values("os", "cpu_arch").annotate(max_id=Max("id"))
)
packages = models.Packages.objects.filter(id__in=Subquery(newest.values("max_id")))
if not packages:
raise errors.PluginValidationError(
msg=_("插件包 [{name}-{version}] 不存在").format(name=plugin_name, version=plugin_version)
)
config_templates = []
for template in validated_config["config_templates"]:
is_main_template = template["is_main"]
if template["version"] != "latest":
config_template = (
models.PluginConfigTemplate.objects.filter(
plugin_version__in=[plugin_version, "*"],
name=template["name"],
plugin_name=plugin_name,
is_main=is_main_template,
)
.order_by("id")
.last()
)
else:
config_template = (
models.PluginConfigTemplate.objects.filter(
plugin_version__in=[packages[0].version, "*"],
name=template["name"],
plugin_name=plugin_name,
is_main=is_main_template,
)
.order_by("id")
.last()
)
if not config_template:
if is_main_template:
# 不校验主配置模板是否存在是为了兼容老版本插件没有主配置模板
continue
raise errors.PluginValidationError(
msg=_("配置模板 [{name}-{version}] 不存在").format(name=template["name"], version=template["version"])
)
config_templates.append(
{
"id": config_template.id,
"version": config_template.version,
"name": config_template.name,
"is_main": config_template.is_main,
}
)
policy_packages = []
for package in packages:
policy_packages.append(
{
"id": package.id,
"project": package.project,
"version": package.version,
"cpu_arch": package.cpu_arch,
"os": package.os,
"config_templates": config_templates,
}
)
policy_step_config = {**copy.deepcopy(validated_config), "details": policy_packages}
# 补充original_config中部分必要参数
return self.fill_additional_keys(
target=policy_step_config, origin=original_config, additional_keys=["job_type"]
)
def format2policy_params(self, original_params: Dict) -> Dict:
try:
# 尝试序列化为策略类型的参数
self.validated_data(data=original_params, serializer=PolicyStepParamsSerializer)
except exceptions.ValidationError:
validated_params = self.validated_data(data=original_params, serializer=PluginStepParamsSerializer)
policy_params_list = [
{**copy.deepcopy(validated_params), "os": package["os"], "cpu_arch": package["cpu_arch"]}
for package in self.config["details"]
]
return {**copy.deepcopy(validated_params), "details": policy_params_list}
else:
# 适配操作系统类型参数
for params in original_params["details"]:
params["os"] = params.get("os") or params["os_type"]
if self.subscription_step.id:
self.subscription_step.save()
return original_params
@staticmethod
def validated_data(data, serializer) -> OrderedDict:
data_serializer = serializer(data=data)
data_serializer.is_valid(raise_exception=True)
return data_serializer.validated_data
@staticmethod
def fill_additional_keys(target: Dict, origin: Dict, additional_keys: List[str]) -> Dict:
additional_map = {
additional_key: origin[additional_key] for additional_key in additional_keys if additional_key in origin
}
# 字典具有相同键的情况下,位于ChainMap前的字典覆盖靠后的键值
return dict(ChainMap(target, additional_map))
@staticmethod
def get_os_key(os_type: str, cpu_arch: str) -> str:
# 默认为 linux-x86_64,兼容CMDB同步过来的主机没有操作系统和CPU架构的场景
os_type = os_type or constants.OsType.LINUX
cpu_arch = cpu_arch or constants.CpuType.x86_64
return f"{os_type.lower()}-{cpu_arch}"
def get_matching_package_dict(self, os_type: str = None, cpu_arch: str = None, os_key: str = None) -> Dict:
os_key = os_key or self.get_os_key(os_type, cpu_arch)
package_dict = self.except_os_key_pkg_map.get(os_key)
if not package_dict:
raise errors.PluginValidationError(
_("订阅安装插件[{plugin_name}] 未选择支持 [{os_key}] 的插件包").format(plugin_name=self.plugin_name, os_key=os_key)
)
return package_dict
def get_matching_step_params(self, os_type: str = None, cpu_arch: str = None, os_key: str = None) -> Dict[str, Any]:
# TODO 对于普通订阅,一份context被多个包用于渲染,存在windows机器使用linux的包,需要在获取参数也兼容一下🐎?
if os_key:
return self.os_key_params_map.get(os_key)
return self.os_key_params_map.get(self.get_os_key(os_type, cpu_arch), {})
def get_matching_package_obj(self, os_type: str, cpu_arch: str) -> models.Packages:
try:
return self.os_key_pkg_map[self.get_os_key(os_type, cpu_arch)]
except KeyError:
msg = _("插件 [{name}] 不支持 系统:{os_type}-架构:{cpu_arch}-版本:{plugin_version}").format(
name=self.plugin_name,
os_type=os_type,
cpu_arch=cpu_arch,
plugin_version=self.get_matching_package_dict(os_type, cpu_arch)["version"],
)
raise errors.PackageNotExists(msg)
def get_matching_config_tmpl_objs(self, os_type: str, cpu_arch: str) -> List[models.PluginConfigTemplate]:
return self.config_tmpl_obj_gby_os_key.get(self.get_os_key(os_type, cpu_arch), [])
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.sitemaps import CMSSitemap
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.static import serve
admin.autodiscover()
urlpatterns = [
url(r'^sitemap\.xml$', sitemap,
{'sitemaps': {'cmspages': CMSSitemap}}),
]
urlpatterns += (
url(r'^admin/', include(admin.site.urls)), # NOQA
url(r'^', include('cms.urls')),
)
# This is only needed when using a runserver command.
if settings.DEBUG:
urlpatterns = [
url(r'^media/(?P<path>.*)$', serve,
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
] + staticfiles_urlpatterns() + urlpatterns
|
#!/usr/bin/env python
# Globally average the variance of CMIP5/AMIP mean-diurnal-cycle precipitation from a given month and range of years.
# Square the standard deviatios before averaging over area, and then take the square root. This is the correct order of
# operations to get the fraction of total variance, per Covey and Gehne
# 2016 (https://e-reports-ext.llnl.gov/pdf/823104.pdf).
# This has the PMP Parser "wrapped" around it, so it's executed with both input and output parameters specified in the
# Unix command line.
# Charles Doutriaux September 2017
# Curt Covey (from ./old_meandiurnalcycWrappedInOut.py) July 2017
from __future__ import print_function
import collections
import glob
import json
import multiprocessing as mp
import os
import cdms2
import cdp
import cdutil
import genutil
import pkg_resources
import pcmdi_metrics
from pcmdi_metrics.diurnal.common import (
INPUT,
P,
monthname_d,
populateStringConstructor,
)
def main():
def compute(param):
template = populateStringConstructor(args.filename_template, args)
template.variable = param.varname
template.month = param.monthname
fnameRoot = param.fileName
reverted = template.reverse(os.path.basename(fnameRoot))
model = reverted["model"]
print("Specifying latitude / longitude domain of interest ...")
datanameID = "diurnalmean" # Short ID name of output data
latrange = (param.args.lat1, param.args.lat2)
lonrange = (param.args.lon1, param.args.lon2)
region = cdutil.region.domain(latitude=latrange, longitude=lonrange)
if param.args.region_name == "":
region_name = "{:g}_{:g}&{:g}_{:g}".format(*(latrange + lonrange))
else:
region_name = param.args.region_name
print("Reading %s ..." % fnameRoot)
try:
f = cdms2.open(fnameRoot)
x = f(datanameID, region)
units = x.units
print(" Shape =", x.shape)
print("Finding standard deviation over first dimension (time of day) ...")
x = genutil.statistics.std(x)
print(" Shape =", x.shape)
print("Finding r.m.s. average over 2nd-3rd dimensions (area) ...")
x = x * x
x = cdutil.averager(x, axis="xy")
x = cdms2.MV2.sqrt(x)
print(
"For %8s in %s, average variance of hourly values = (%5.2f %s)^2"
% (model, monthname, x, units)
)
f.close()
except Exception as err:
print("Failed model %s with error" % (err))
x = 1.0e20
return model, region, {region_name: float(x)}
P.add_argument(
"-j",
"--outnamejson",
type=str,
dest="outnamejson",
default="pr_%(month)_%(firstyear)-%(lastyear)_std_of_meandiurnalcyc.json",
help="Output name for jsons",
)
P.add_argument("--lat1", type=float, default=-50.0, help="First latitude")
P.add_argument("--lat2", type=float, default=50.0, help="Last latitude")
P.add_argument("--lon1", type=float, default=0.0, help="First longitude")
P.add_argument("--lon2", type=float, default=360.0, help="Last longitude")
P.add_argument(
"--region_name",
type=str,
default="TRMM",
help="name for the region of interest",
)
P.add_argument(
"-t",
"--filename_template",
default="pr_%(model)_%(month)_%(firstyear)-%(lastyear)_diurnal_avg.nc",
)
P.add_argument("--model", default="*")
P.add_argument(
"--cmec",
dest="cmec",
action="store_true",
default=False,
help="Use to save metrics in CMEC JSON format",
)
P.add_argument(
"--no_cmec",
dest="cmec",
action="store_false",
default=False,
help="Use to disable saving metrics in CMEC JSON format",
)
args = P.get_parameter()
month = args.month
monthname = monthname_d[month]
startyear = args.firstyear # noqa: F841
finalyear = args.lastyear # noqa: F841
cmec = args.cmec
template = populateStringConstructor(args.filename_template, args)
template.month = monthname
print("TEMPLATE NAME:", template())
print("Specifying latitude / longitude domain of interest ...")
# TRMM (observed) domain:
latrange = (args.lat1, args.lat2)
lonrange = (args.lon1, args.lon2)
region = cdutil.region.domain(latitude=latrange, longitude=lonrange)
# Amazon basin:
# latrange = (-15.0, -5.0)
# lonrange = (285.0, 295.0)
print("Preparing to write output to JSON file ...")
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
jsonFile = populateStringConstructor(args.outnamejson, args)
jsonFile.month = monthname
jsonname = os.path.join(os.path.abspath(args.results_dir), jsonFile())
if not os.path.exists(jsonname) or args.append is False:
print("Initializing dictionary of statistical results ...")
stats_dic = {}
metrics_dictionary = collections.OrderedDict()
else:
with open(jsonname) as f:
metrics_dictionary = json.load(f)
print("LOADE WITH KEYS:", list(metrics_dictionary.keys()))
stats_dic = metrics_dictionary["RESULTS"]
OUT = pcmdi_metrics.io.base.Base(os.path.abspath(args.results_dir), jsonFile())
try:
egg_pth = pkg_resources.resource_filename(
pkg_resources.Requirement.parse("pcmdi_metrics"), "share/pmp"
)
except Exception:
# python 2 seems to fail when ran in home directory of source?
egg_pth = os.path.join(os.getcwd(), "share", "pmp")
disclaimer = open(os.path.join(egg_pth, "disclaimer.txt")).read()
metrics_dictionary["DISCLAIMER"] = disclaimer
metrics_dictionary["REFERENCE"] = (
"The statistics in this file are based on Trenberth, Zhang & Gehne, "
"J Hydromet. 2017"
)
files = glob.glob(os.path.join(args.modpath, template()))
print(files)
params = [INPUT(args, name, template) for name in files]
print("PARAMS:", params)
results = cdp.cdp_run.multiprocess(compute, params, num_workers=args.num_workers)
for r in results:
m, region, res = r
if r[0] not in stats_dic:
stats_dic[m] = res
else:
stats_dic[m].update(res)
print("Writing output to JSON file ...")
metrics_dictionary["RESULTS"] = stats_dic
print("KEYS AT END:", list(metrics_dictionary.keys()))
rgmsk = metrics_dictionary.get("RegionalMasking", {})
print("REG MASK:", rgmsk)
nm = list(res.keys())[0]
region.id = nm
rgmsk[nm] = {"id": nm, "domain": region}
metrics_dictionary["RegionalMasking"] = rgmsk
OUT.write(
metrics_dictionary,
json_structure=["model", "domain"],
indent=4,
separators=(",", ": "),
)
if cmec:
print("Writing cmec file")
OUT.write_cmec(indent=4, separators=(",", ": "))
print("done")
# Good practice to place contents of script under this check
if __name__ == "__main__":
# Related to script being installed as executable
mp.freeze_support()
main()
|
import copy
import itertools
import multiprocessing
import pickle
import threading
import time
from unittest.mock import Mock # NOQA
from unittest.mock import patch
import uuid
import _pytest.capture
import joblib
import pandas as pd
import pytest
import optuna
from optuna.testing.storage import StorageSupplier
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from typing import Any # NOQA
from typing import Callable # NOQA
from typing import Dict # NOQA
from typing import Optional # NOQA
from typing import Tuple # NOQA
from _pytest.recwarn import WarningsRecorder # NOQA
CallbackFuncType = Callable[[optuna.study.Study, optuna.trial.FrozenTrial], None]
# TODO(ytsmiling) Add tests for multi-worker settings.
STORAGE_MODES = [
"inmemory",
"sqlite",
"redis",
]
def func(trial, x_max=1.0):
# type: (optuna.trial.Trial, float) -> float
x = trial.suggest_uniform("x", -x_max, x_max)
y = trial.suggest_loguniform("y", 20, 30)
z = trial.suggest_categorical("z", (-1.0, 1.0))
assert isinstance(z, float)
return (x - 2) ** 2 + (y - 25) ** 2 + z
class Func(object):
def __init__(self, sleep_sec=None):
# type: (Optional[float]) -> None
self.n_calls = 0
self.sleep_sec = sleep_sec
self.lock = threading.Lock()
self.x_max = 10.0
def __call__(self, trial):
# type: (optuna.trial.Trial) -> float
with self.lock:
self.n_calls += 1
x_max = self.x_max
self.x_max *= 0.9
# Sleep for testing parallelism
if self.sleep_sec is not None:
time.sleep(self.sleep_sec)
value = func(trial, x_max)
check_params(trial.params)
return value
def check_params(params):
# type: (Dict[str, Any]) -> None
assert sorted(params.keys()) == ["x", "y", "z"]
def check_value(value):
# type: (Optional[float]) -> None
assert isinstance(value, float)
assert -1.0 <= value <= 12.0 ** 2 + 5.0 ** 2 + 1.0
def check_frozen_trial(frozen_trial):
# type: (optuna.trial.FrozenTrial) -> None
if frozen_trial.state == optuna.trial.TrialState.COMPLETE:
check_params(frozen_trial.params)
check_value(frozen_trial.value)
def check_study(study):
# type: (optuna.Study) -> None
for trial in study.trials:
check_frozen_trial(trial)
complete_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]
if len(complete_trials) == 0:
with pytest.raises(ValueError):
study.best_params
with pytest.raises(ValueError):
study.best_value
with pytest.raises(ValueError):
study.best_trial
else:
check_params(study.best_params)
check_value(study.best_value)
check_frozen_trial(study.best_trial)
def test_optimize_trivial_in_memory_new():
# type: () -> None
study = optuna.create_study()
study.optimize(func, n_trials=10)
check_study(study)
def test_optimize_trivial_in_memory_resume():
# type: () -> None
study = optuna.create_study()
study.optimize(func, n_trials=10)
study.optimize(func, n_trials=10)
check_study(study)
def test_optimize_trivial_rdb_resume_study():
# type: () -> None
study = optuna.create_study("sqlite:///:memory:")
study.optimize(func, n_trials=10)
check_study(study)
def test_optimize_with_direction():
# type: () -> None
study = optuna.create_study(direction="minimize")
study.optimize(func, n_trials=10)
assert study.direction == optuna.study.StudyDirection.MINIMIZE
check_study(study)
study = optuna.create_study(direction="maximize")
study.optimize(func, n_trials=10)
assert study.direction == optuna.study.StudyDirection.MAXIMIZE
check_study(study)
with pytest.raises(ValueError):
optuna.create_study(direction="test")
@pytest.mark.parametrize(
"n_trials, n_jobs, storage_mode",
itertools.product(
(0, 1, 20), (1, 2, -1), STORAGE_MODES, # n_trials # n_jobs # storage_mode
),
)
def test_optimize_parallel(n_trials, n_jobs, storage_mode):
# type: (int, int, str)-> None
f = Func()
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(f, n_trials=n_trials, n_jobs=n_jobs)
assert f.n_calls == len(study.trials) == n_trials
check_study(study)
@pytest.mark.parametrize(
"n_trials, n_jobs, storage_mode",
itertools.product(
(0, 1, 20, None), (1, 2, -1), STORAGE_MODES, # n_trials # n_jobs # storage_mode
),
)
def test_optimize_parallel_timeout(n_trials, n_jobs, storage_mode):
# type: (int, int, str) -> None
sleep_sec = 0.1
timeout_sec = 1.0
f = Func(sleep_sec=sleep_sec)
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(f, n_trials=n_trials, n_jobs=n_jobs, timeout=timeout_sec)
assert f.n_calls == len(study.trials)
if n_trials is not None:
assert f.n_calls <= n_trials
# A thread can process at most (timeout_sec / sleep_sec + 1) trials.
n_jobs_actual = n_jobs if n_jobs != -1 else multiprocessing.cpu_count()
max_calls = (timeout_sec / sleep_sec + 1) * n_jobs_actual
assert f.n_calls <= max_calls
check_study(study)
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_optimize_with_catch(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
def func_value_error(_):
# type: (optuna.trial.Trial) -> float
raise ValueError
# Test default exceptions.
with pytest.raises(ValueError):
study.optimize(func_value_error, n_trials=20)
assert len(study.trials) == 1
assert all(trial.state == optuna.trial.TrialState.FAIL for trial in study.trials)
# Test acceptable exception.
study.optimize(func_value_error, n_trials=20, catch=(ValueError,))
assert len(study.trials) == 21
assert all(trial.state == optuna.trial.TrialState.FAIL for trial in study.trials)
# Test trial with unacceptable exception.
with pytest.raises(ValueError):
study.optimize(func_value_error, n_trials=20, catch=(ArithmeticError,))
assert len(study.trials) == 22
assert all(trial.state == optuna.trial.TrialState.FAIL for trial in study.trials)
@pytest.mark.parametrize("catch", [[], [Exception], None, 1])
def test_optimize_with_catch_invalid_type(catch):
# type: (Any) -> None
study = optuna.create_study()
def func_value_error(_):
# type: (optuna.trial.Trial) -> float
raise ValueError
with pytest.raises(TypeError):
study.optimize(func_value_error, n_trials=20, catch=catch)
def test_optimize_parallel_storage_warning(recwarn):
# type: (WarningsRecorder) -> None
study = optuna.create_study()
# Default joblib backend is threading and no warnings will be captured.
study.optimize(lambda t: t.suggest_uniform("x", 0, 1), n_trials=20, n_jobs=2)
assert len(recwarn) == 0
with pytest.warns(UserWarning):
with joblib.parallel_backend("loky"):
study.optimize(lambda t: t.suggest_uniform("x", 0, 1), n_trials=20, n_jobs=2)
@pytest.mark.parametrize(
"n_jobs, storage_mode", itertools.product((2, -1), STORAGE_MODES,), # n_jobs # storage_mode
)
def test_optimize_with_reseeding(n_jobs, storage_mode):
# type: (int, str)-> None
f = Func()
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
sampler = study.sampler
with patch.object(sampler, "reseed_rng", wraps=sampler.reseed_rng) as mock_object:
study.optimize(f, n_trials=1, n_jobs=2)
assert mock_object.call_count == 1
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_study_set_and_get_user_attrs(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.set_user_attr("dataset", "MNIST")
assert study.user_attrs["dataset"] == "MNIST"
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_study_set_and_get_system_attrs(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.set_system_attr("system_message", "test")
assert study.system_attrs["system_message"] == "test"
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_trial_set_and_get_user_attrs(storage_mode):
# type: (str) -> None
def f(trial):
# type: (optuna.trial.Trial) -> float
trial.set_user_attr("train_accuracy", 1)
assert trial.user_attrs["train_accuracy"] == 1
return 0.0
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(f, n_trials=1)
frozen_trial = study.trials[0]
assert frozen_trial.user_attrs["train_accuracy"] == 1
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_trial_set_and_get_system_attrs(storage_mode):
# type: (str) -> None
def f(trial):
# type: (optuna.trial.Trial) -> float
trial.set_system_attr("system_message", "test")
assert trial.system_attrs["system_message"] == "test"
return 0.0
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(f, n_trials=1)
frozen_trial = study.trials[0]
assert frozen_trial.system_attrs["system_message"] == "test"
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_get_all_study_summaries(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(Func(), n_trials=5)
summaries = optuna.get_all_study_summaries(study._storage)
summary = [s for s in summaries if s._study_id == study._study_id][0]
assert summary.study_name == study.study_name
assert summary.n_trials == 5
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_get_all_study_summaries_with_no_trials(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
summaries = optuna.get_all_study_summaries(study._storage)
summary = [s for s in summaries if s._study_id == study._study_id][0]
assert summary.study_name == study.study_name
assert summary.n_trials == 0
assert summary.datetime_start is None
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_run_trial(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
# Test trial without exception.
study._run_trial(func, catch=(Exception,), gc_after_trial=True)
check_study(study)
# Test trial with acceptable exception.
def func_value_error(_):
# type: (optuna.trial.Trial) -> float
raise ValueError
trial = study._run_trial(func_value_error, catch=(ValueError,), gc_after_trial=True)
frozen_trial = study._storage.get_trial(trial._trial_id)
expected_message = "Trial 1 failed because of the following error: ValueError()"
assert frozen_trial.state == optuna.trial.TrialState.FAIL
assert frozen_trial.system_attrs["fail_reason"] == expected_message
# Test trial with unacceptable exception.
with pytest.raises(ValueError):
study._run_trial(func_value_error, catch=(ArithmeticError,), gc_after_trial=True)
# Test trial with invalid objective value: None
def func_none(_):
# type: (optuna.trial.Trial) -> float
return None # type: ignore
trial = study._run_trial(func_none, catch=(Exception,), gc_after_trial=True)
frozen_trial = study._storage.get_trial(trial._trial_id)
expected_message = (
"Trial 3 failed, because the returned "
"value from the objective function cannot be cast to float. "
"Returned value is: None"
)
assert frozen_trial.state == optuna.trial.TrialState.FAIL
assert frozen_trial.system_attrs["fail_reason"] == expected_message
# Test trial with invalid objective value: nan
def func_nan(_):
# type: (optuna.trial.Trial) -> float
return float("nan")
trial = study._run_trial(func_nan, catch=(Exception,), gc_after_trial=True)
frozen_trial = study._storage.get_trial(trial._trial_id)
expected_message = "Trial 4 failed, because the objective function returned nan."
assert frozen_trial.state == optuna.trial.TrialState.FAIL
assert frozen_trial.system_attrs["fail_reason"] == expected_message
# TODO(Yanase): Remove this test function after removing `optuna.structs.TrialPruned`.
@pytest.mark.parametrize(
"trial_pruned_class",
[optuna.TrialPruned, optuna.exceptions.TrialPruned, optuna.structs.TrialPruned],
)
@pytest.mark.parametrize("report_value", [None, 1.2])
def test_run_trial_with_trial_pruned(trial_pruned_class, report_value):
# type: (Callable[[], optuna.exceptions.TrialPruned], Optional[float]) -> None
study = optuna.create_study()
def func_with_trial_pruned(trial):
# type: (optuna.trial.Trial) -> float
if report_value is not None:
trial.report(report_value, 1)
raise trial_pruned_class()
trial = study._run_trial(func_with_trial_pruned, catch=(), gc_after_trial=True)
frozen_trial = study._storage.get_trial(trial._trial_id)
assert frozen_trial.value == report_value
assert frozen_trial.state == optuna.trial.TrialState.PRUNED
def test_study_pickle():
# type: () -> None
study_1 = optuna.create_study()
study_1.optimize(func, n_trials=10)
check_study(study_1)
assert len(study_1.trials) == 10
dumped_bytes = pickle.dumps(study_1)
study_2 = pickle.loads(dumped_bytes)
check_study(study_2)
assert len(study_2.trials) == 10
study_2.optimize(func, n_trials=10)
check_study(study_2)
assert len(study_2.trials) == 20
def test_study_trials_dataframe_with_no_trials():
# type: () -> None
study_with_no_trials = optuna.create_study()
trials_df = study_with_no_trials.trials_dataframe()
assert trials_df.empty
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
@pytest.mark.parametrize(
"attrs",
[
(
"number",
"value",
"datetime_start",
"datetime_complete",
"params",
"user_attrs",
"system_attrs",
"state",
),
(
"number",
"value",
"datetime_start",
"datetime_complete",
"duration",
"params",
"user_attrs",
"system_attrs",
"state",
"intermediate_values",
"_trial_id",
"distributions",
),
],
)
@pytest.mark.parametrize("multi_index", [True, False])
def test_trials_dataframe(storage_mode, attrs, multi_index):
# type: (str, Tuple[str, ...], bool) -> None
def f(trial):
# type: (optuna.trial.Trial) -> float
x = trial.suggest_int("x", 1, 1)
y = trial.suggest_categorical("y", (2.5,))
assert isinstance(y, float)
trial.set_user_attr("train_loss", 3)
trial.set_system_attr("foo", "bar")
value = x + y # 3.5
# Test reported intermediate values, although it in practice is not "intermediate".
trial.report(value, step=0)
return value
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(f, n_trials=3)
df = study.trials_dataframe(attrs=attrs, multi_index=multi_index)
# Change index to access rows via trial number.
if multi_index:
df.set_index(("number", ""), inplace=True, drop=False)
else:
df.set_index("number", inplace=True, drop=False)
assert len(df) == 3
# Number columns are as follows (total of 13):
# non-nested: 6 (number, value, state, datetime_start, datetime_complete, duration)
# params: 2
# distributions: 2
# user_attrs: 1
# system_attrs: 1
# intermediate_values: 1
expected_n_columns = len(attrs)
if "params" in attrs:
expected_n_columns += 1
if "distributions" in attrs:
expected_n_columns += 1
assert len(df.columns) == expected_n_columns
for i in range(3):
assert df.number[i] == i
assert df.state[i] == "COMPLETE"
assert df.value[i] == 3.5
assert isinstance(df.datetime_start[i], pd.Timestamp)
assert isinstance(df.datetime_complete[i], pd.Timestamp)
if multi_index:
if "distributions" in attrs:
assert ("distributions", "x") in df.columns
assert ("distributions", "y") in df.columns
if "_trial_id" in attrs:
assert ("trial_id", "") in df.columns # trial_id depends on other tests.
if "duration" in attrs:
assert ("duration", "") in df.columns
assert df.params.x[i] == 1
assert df.params.y[i] == 2.5
assert df.user_attrs.train_loss[i] == 3
assert df.system_attrs.foo[i] == "bar"
else:
if "distributions" in attrs:
assert "distributions_x" in df.columns
assert "distributions_y" in df.columns
if "_trial_id" in attrs:
assert "trial_id" in df.columns # trial_id depends on other tests.
if "duration" in attrs:
assert "duration" in df.columns
assert df.params_x[i] == 1
assert df.params_y[i] == 2.5
assert df.user_attrs_train_loss[i] == 3
assert df.system_attrs_foo[i] == "bar"
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_trials_dataframe_with_failure(storage_mode):
# type: (str) -> None
def f(trial):
# type: (optuna.trial.Trial) -> float
x = trial.suggest_int("x", 1, 1)
y = trial.suggest_categorical("y", (2.5,))
trial.set_user_attr("train_loss", 3)
raise ValueError()
return x + y # 3.5
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(f, n_trials=3, catch=(ValueError,))
df = study.trials_dataframe()
# Change index to access rows via trial number.
df.set_index("number", inplace=True, drop=False)
assert len(df) == 3
# non-nested: 6, params: 2, user_attrs: 1 system_attrs: 1
assert len(df.columns) == 10
for i in range(3):
assert df.number[i] == i
assert df.state[i] == "FAIL"
assert df.value[i] is None
assert isinstance(df.datetime_start[i], pd.Timestamp)
assert isinstance(df.datetime_complete[i], pd.Timestamp)
assert isinstance(df.duration[i], pd.Timedelta)
assert df.params_x[i] == 1
assert df.params_y[i] == 2.5
assert df.user_attrs_train_loss[i] == 3
assert "system_attrs_fail_reason" in df.columns
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_create_study(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
# Test creating a new study.
study = optuna.create_study(storage=storage, load_if_exists=False)
# Test `load_if_exists=True` with existing study.
optuna.create_study(study_name=study.study_name, storage=storage, load_if_exists=True)
with pytest.raises(optuna.exceptions.DuplicatedStudyError):
optuna.create_study(study_name=study.study_name, storage=storage, load_if_exists=False)
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_load_study(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
if storage is None:
# `InMemoryStorage` can not be used with `load_study` function.
return
study_name = str(uuid.uuid4())
with pytest.raises(KeyError):
# Test loading an unexisting study.
optuna.study.load_study(study_name=study_name, storage=storage)
# Create a new study.
created_study = optuna.study.create_study(study_name=study_name, storage=storage)
# Test loading an existing study.
loaded_study = optuna.study.load_study(study_name=study_name, storage=storage)
assert created_study._study_id == loaded_study._study_id
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_delete_study(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
# Get storage object because delete_study does not accept None.
storage = optuna.storages.get_storage(storage=storage)
assert storage is not None
# Test deleting a non-existing study.
with pytest.raises(KeyError):
optuna.delete_study("invalid-study-name", storage)
# Test deleting an existing study.
study = optuna.create_study(storage=storage, load_if_exists=False)
optuna.delete_study(study.study_name, storage)
# Test failed to delete the study which is already deleted.
with pytest.raises(KeyError):
optuna.delete_study(study.study_name, storage)
def test_nested_optimization():
# type: () -> None
def objective(trial):
# type: (optuna.trial.Trial) -> float
with pytest.raises(RuntimeError):
trial.study.optimize(lambda _: 0.0, n_trials=1)
return 1.0
study = optuna.create_study()
study.optimize(objective, n_trials=10, catch=())
def test_stop_in_objective() -> None:
def objective(trial: optuna.trial.Trial, threshold_number: int) -> float:
if trial.number >= threshold_number:
trial.study.stop()
return trial.number
# Test stopping the optimization: it should stop once the trial number reaches 4.
study = optuna.create_study()
study.optimize(lambda x: objective(x, 4), n_trials=10)
assert len(study.trials) == 5
# Test calling `optimize` again: it should stop once the trial number reaches 11.
study.optimize(lambda x: objective(x, 11), n_trials=10)
assert len(study.trials) == 12
def test_stop_in_callback() -> None:
def callback(study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:
if trial.number >= 4:
study.stop()
# Test stopping the optimization inside a callback.
study = optuna.create_study()
study.optimize(lambda _: 1.0, n_trials=10, callbacks=[callback])
assert len(study.trials) == 5
def test_stop_n_jobs() -> None:
def callback(study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:
if trial.number >= 4:
study.stop()
study = optuna.create_study()
study.optimize(lambda _: 1.0, n_trials=None, callbacks=[callback], n_jobs=2)
assert 5 <= len(study.trials) <= 6
def test_stop_outside_optimize() -> None:
# Test stopping outside the optimization: it should raise `RuntimeError`.
study = optuna.create_study()
with pytest.raises(RuntimeError):
study.stop()
# Test calling `optimize` after the `RuntimeError` is caught.
study.optimize(lambda _: 1.0, n_trials=1)
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_append_trial(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
assert len(study.trials) == 0
trial_id = study._append_trial(value=0.8)
assert study.trials[0]._trial_id == trial_id
assert len(study.trials) == 1
assert study.best_value == 0.8
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_enqueue_trial_properly_sets_param_values(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
assert len(study.trials) == 0
study.enqueue_trial(params={"x": -5, "y": 5})
study.enqueue_trial(params={"x": -1, "y": 0})
def objective(trial):
# type: (optuna.trial.Trial) -> float
x = trial.suggest_int("x", -10, 10)
y = trial.suggest_int("y", -10, 10)
return x ** 2 + y ** 2
study.optimize(objective, n_trials=2)
t0 = study.trials[0]
assert t0.params["x"] == -5
assert t0.params["y"] == 5
t1 = study.trials[1]
assert t1.params["x"] == -1
assert t1.params["y"] == 0
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_enqueue_trial_with_unfixed_parameters(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
assert len(study.trials) == 0
study.enqueue_trial(params={"x": -5})
def objective(trial):
# type: (optuna.trial.Trial) -> float
x = trial.suggest_int("x", -10, 10)
y = trial.suggest_int("y", -10, 10)
return x ** 2 + y ** 2
study.optimize(objective, n_trials=1)
t = study.trials[0]
assert t.params["x"] == -5
assert -10 <= t.params["y"] <= 10
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_enqueue_trial_with_out_of_range_parameters(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
assert len(study.trials) == 0
study.enqueue_trial(params={"x": 11})
def objective(trial):
# type: (optuna.trial.Trial) -> float
return trial.suggest_int("x", -10, 10)
with pytest.warns(UserWarning):
study.optimize(objective, n_trials=1)
t = study.trials[0]
assert -10 <= t.params["x"] <= 10
# Internal logic might differ when distribution contains a single element.
# Test it explicitly.
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
assert len(study.trials) == 0
study.enqueue_trial(params={"x": 11})
def objective(trial):
# type: (optuna.trial.Trial) -> float
return trial.suggest_int("x", 1, 1) # Single element.
with pytest.warns(UserWarning):
study.optimize(objective, n_trials=1)
t = study.trials[0]
assert t.params["x"] == 1
@patch("optuna.study.gc.collect")
def test_optimize_with_gc(collect_mock):
# type: (Mock) -> None
study = optuna.create_study()
study.optimize(func, n_trials=10, gc_after_trial=True)
check_study(study)
assert collect_mock.call_count == 10
@patch("optuna.study.gc.collect")
def test_optimize_without_gc(collect_mock):
# type: (Mock) -> None
study = optuna.create_study()
study.optimize(func, n_trials=10, gc_after_trial=False)
check_study(study)
assert collect_mock.call_count == 0
@pytest.mark.parametrize("n_jobs", [1, 4])
def test_callbacks(n_jobs):
# type: (int) -> None
lock = threading.Lock()
def with_lock(f):
# type: (CallbackFuncType) -> CallbackFuncType
def callback(study, trial):
# type: (optuna.study.Study, optuna.trial.FrozenTrial) -> None
with lock:
f(study, trial)
return callback
study = optuna.create_study()
def objective(trial):
# type: (optuna.trial.Trial) -> float
return trial.suggest_int("x", 1, 1)
# Empty callback list.
study.optimize(objective, callbacks=[], n_trials=10, n_jobs=n_jobs)
# A callback.
values = []
callbacks = [with_lock(lambda study, trial: values.append(trial.value))]
study.optimize(objective, callbacks=callbacks, n_trials=10, n_jobs=n_jobs)
assert values == [1] * 10
# Two callbacks.
values = []
params = []
callbacks = [
with_lock(lambda study, trial: values.append(trial.value)),
with_lock(lambda study, trial: params.append(trial.params)),
]
study.optimize(objective, callbacks=callbacks, n_trials=10, n_jobs=n_jobs)
assert values == [1] * 10
assert params == [{"x": 1}] * 10
# If a trial is failed with an exception and the exception is caught by the study,
# callbacks are invoked.
states = []
callbacks = [with_lock(lambda study, trial: states.append(trial.state))]
study.optimize(
lambda t: 1 / 0,
callbacks=callbacks,
n_trials=10,
n_jobs=n_jobs,
catch=(ZeroDivisionError,),
)
assert states == [optuna.trial.TrialState.FAIL] * 10
# If a trial is failed with an exception and the exception isn't caught by the study,
# callbacks aren't invoked.
states = []
callbacks = [with_lock(lambda study, trial: states.append(trial.state))]
with pytest.raises(ZeroDivisionError):
study.optimize(lambda t: 1 / 0, callbacks=callbacks, n_trials=10, n_jobs=n_jobs, catch=())
assert states == []
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_get_trials(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
storage = optuna.storages.get_storage(storage=storage)
study = optuna.create_study(storage=storage)
study.optimize(lambda t: t.suggest_int("x", 1, 5), n_trials=5)
with patch("copy.deepcopy", wraps=copy.deepcopy) as mock_object:
trials0 = study.get_trials(deepcopy=False)
assert mock_object.call_count == 0
assert len(trials0) == 5
trials1 = study.get_trials(deepcopy=True)
assert mock_object.call_count > 0
assert trials0 == trials1
# `study.trials` is equivalent to `study.get_trials(deepcopy=True)`.
old_count = mock_object.call_count
trials2 = study.trials
assert mock_object.call_count > old_count
assert trials0 == trials2
def test_study_summary_eq_ne():
# type: () -> None
storage = optuna.storages.RDBStorage("sqlite:///:memory:")
optuna.create_study(storage=storage)
study = optuna.create_study(storage=storage)
summaries = study._storage.get_all_study_summaries()
assert len(summaries) == 2
assert summaries[0] == copy.deepcopy(summaries[0])
assert not summaries[0] != copy.deepcopy(summaries[0])
assert not summaries[0] == summaries[1]
assert summaries[0] != summaries[1]
assert not summaries[0] == 1
assert summaries[0] != 1
def test_study_summary_lt_le():
# type: () -> None
storage = optuna.storages.RDBStorage("sqlite:///:memory:")
optuna.create_study(storage=storage)
study = optuna.create_study(storage=storage)
summaries = study._storage.get_all_study_summaries()
assert len(summaries) == 2
summary_0 = summaries[0]
summary_1 = summaries[1]
assert summary_0 < summary_1
assert not summary_1 < summary_0
with pytest.raises(TypeError):
summary_0 < 1
assert summary_0 <= summary_0
assert not summary_1 <= summary_0
with pytest.raises(TypeError):
summary_0 <= 1
# A list of StudySummaries is sortable.
summaries.reverse()
summaries.sort()
assert summaries[0] == summary_0
assert summaries[1] == summary_1
def test_log_completed_trial(capsys: _pytest.capture.CaptureFixture) -> None:
# We need to reconstruct our default handler to properly capture stderr.
optuna.logging._reset_library_root_logger()
optuna.logging.set_verbosity(optuna.logging.INFO)
study = optuna.create_study()
study.optimize(lambda _: 1.0, n_trials=1)
_, err = capsys.readouterr()
assert "Trial 0" in err
optuna.logging.set_verbosity(optuna.logging.WARNING)
study.optimize(lambda _: 1.0, n_trials=1)
_, err = capsys.readouterr()
assert "Trial 1" not in err
optuna.logging.set_verbosity(optuna.logging.DEBUG)
study.optimize(lambda _: 1.0, n_trials=1)
_, err = capsys.readouterr()
assert "Trial 2" in err
def test_log_completed_trial_skip_storage_access() -> None:
study = optuna.create_study()
# Create a trial to retrieve it as the `study.best_trial`.
study.optimize(lambda _: 0.0, n_trials=1)
trial = optuna.Trial(study, study._storage.create_new_trial(study._study_id))
storage = study._storage
with patch.object(storage, "get_best_trial", wraps=storage.get_best_trial) as mock_object:
study._log_completed_trial(trial, 1.0)
# Trial.best_trial and Trial.best_params access storage.
assert mock_object.call_count == 2
optuna.logging.set_verbosity(optuna.logging.WARNING)
with patch.object(storage, "get_best_trial", wraps=storage.get_best_trial) as mock_object:
study._log_completed_trial(trial, 1.0)
assert mock_object.call_count == 0
optuna.logging.set_verbosity(optuna.logging.DEBUG)
with patch.object(storage, "get_best_trial", wraps=storage.get_best_trial) as mock_object:
study._log_completed_trial(trial, 1.0)
assert mock_object.call_count == 2
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
"""
hghooks
"""
import logging
import os.path.join as pathjoin
log = logging.getLogger('hghooks')
def http_request(method="GET",
url=None,
body=None,
headers=None,
username=None,
password=None):
import httplib
if username and password:
auth_string = (u':'.join(username, password)).encode('base64')
if url is None:
raise Exception(url)
if url.startswith('https'):
h = httplib.HTTPSConnection(url)
elif url.startswith('http'):
h = httplib.HTTPConnection(url)
else:
raise Exception(url)
headers = headers or {}
if auth_string:
headers['authorization'] = 'Basic %s' % auth_string
log.debug(url)
log.debug(headers)
return h.request(method, url, body, headers)
def notify_jenkins(*args, **kwargs):
log.info('notify_jenkins: %r, %r' % (args, kwargs))
app = kwargs.get('app', 'example')
url = pathjoin("/job", app, "build")
resp = http_request("POST", url, **kwargs)
log.info(resp)
MRUBASENAME = 'updates.json'
import json
import datetime
import os
def repo_event_json(**kwargs):
repo = kwargs['repo']
output = {
'date': datetime.datetime.now(),
'url': repo.url(),
# 'title':
'meta': kwargs
}
# log.debug(u'event: %s' % output)
return json.dumps(output)
def read_most_recent_mru(repo=None, repo_path=None, *args, **kwargs):
if repo_path is None:
if repo is None:
raise Exception()
repo_path = repo.path
mru_path = pathjoin(repo_path, MRUBASENAME)
if os.path.exists(mru_path):
try:
data = json.load(open(mru_path))
return data
except Exception as e:
log.exception(e)
else:
return None
def log_most_recent_incoming_hghook(ui, repo, *args, **kwargs):
def _write_event(**kwargs):
output = repo_event_json(**kwargs)
ui.debug(output)
mru_path = pathjoin(repo.path, MRUBASENAME)
if os.path.exists(mru_path):
data = read_most_recent_mru(*args, **kwargs)
ui.debug(data)
ui.debug('writing event to %s' % mru_path)
with open(mru_path, 'w') as _mru_file:
_mru_file.write(output)
EVENT_API_URL = "http://localhost:6543/api/events"
def log_repo_events_hghook(ui, repo, *args, **kwargs):
def _write_event(**kwargs):
event = repo_event_json(**kwargs)
ui.debug(event)
resp = http_request("POST", EVENT_API_URL, data=event)
ui.debug(resp)
return branch_hghook(ui, repo, _write_event, **kwargs)
# HG Hooks
def branch_hghook(ui, repo, function=notify_jenkins, **kwargs):
node = kwargs.get('node')
none_branch = repo[None].branch()
# branch of first node in changegroup
changegroup_branch = repo[node].branch()
context = {
'ui': ui,
'repo': repo,
'branch': none_branch,
'changegroup_branch': changegroup_branch
}
context.update(**kwargs)
if none_branch == 'default':
log.debug("branch: %s" % none_branch)
return function(**context)
else:
pass
def debug_hghook(ui, repo, **kwargs):
node = kwargs.get('node')
none_branch = repo[None].branch()
none_branch
# branch of first node in changegroup
changegroup_branch = repo[node].branch()
changegroup_branch
from IPython import embed
embed()
import unittest
class Test_hghooks(unittest.TestCase):
def test_hghooks(self):
from mercurial.ui import ui
ui = ui
repo = None
kwargs = {
'source': 'pull',
'node': 'test',
#
}
debug_hghook(ui, repo, kwargs)
def main():
import optparse
import logging
prs = optparse.OptionParser(usage="./%prog : args")
prs.add_option('-b', '--build', '--jenkins-build',
dest='jenkins_build',
action='store_true')
prs.add_option('-d', '--debug',
dest='debug',
action='store_true')
prs.add_option('-r', '--repo', '--repository-path',
dest='repo',
action='store')
prs.add_option('-v', '--verbose',
dest='verbose',
action='store_true',)
prs.add_option('-q', '--quiet',
dest='quiet',
action='store_true',)
prs.add_option('-t', '--test',
dest='run_tests',
action='store_true',)
(opts, args) = prs.parse_args()
if not opts.quiet:
logging.basicConfig()
if opts.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if opts.run_tests:
import sys
sys.argv = [sys.argv[0]] + args
import unittest
exit(unittest.main())
if opts.jenkins_build:
notify_jenkins()
if opts.debug:
import mercurial as hg
hg
repo = opts.repo
repo
from IPython import embed
embed()
if __name__ == "__main__":
main()
|
"""
This file defines the common pytest fixtures used in current directory.
"""
from contextlib import contextmanager
import json
import pytest
import subprocess
import ray
from ray.tests.cluster_utils import Cluster
@pytest.fixture
def shutdown_only():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
def generate_internal_config_map(**kwargs):
internal_config = json.dumps(kwargs)
ray_kwargs = {
"_internal_config": internal_config,
}
return ray_kwargs
def get_default_fixure_internal_config():
internal_config = json.dumps({
"initial_reconstruction_timeout_milliseconds": 200,
"num_heartbeats_timeout": 10,
})
return internal_config
def get_default_fixture_ray_kwargs():
internal_config = get_default_fixure_internal_config()
ray_kwargs = {
"num_cpus": 1,
"object_store_memory": 10**8,
"_internal_config": internal_config,
}
return ray_kwargs
@contextmanager
def _ray_start(**kwargs):
init_kwargs = get_default_fixture_ray_kwargs()
init_kwargs.update(kwargs)
# Start the Ray processes.
address_info = ray.init(**init_kwargs)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
# The following fixture will start ray with 0 cpu.
@pytest.fixture
def ray_start_no_cpu(request):
param = getattr(request, "param", {})
with _ray_start(num_cpus=0, **param) as res:
yield res
# The following fixture will start ray with 1 cpu.
@pytest.fixture
def ray_start_regular(request):
param = getattr(request, "param", {})
with _ray_start(**param) as res:
yield res
@pytest.fixture
def ray_start_2_cpus(request):
param = getattr(request, "param", {})
with _ray_start(num_cpus=2, **param) as res:
yield res
@pytest.fixture
def ray_start_10_cpus(request):
param = getattr(request, "param", {})
with _ray_start(num_cpus=10, **param) as res:
yield res
@contextmanager
def _ray_start_cluster(**kwargs):
init_kwargs = get_default_fixture_ray_kwargs()
num_nodes = 0
do_init = False
# num_nodes & do_init are not arguments for ray.init, so delete them.
if "num_nodes" in kwargs:
num_nodes = kwargs["num_nodes"]
del kwargs["num_nodes"]
if "do_init" in kwargs:
do_init = kwargs["do_init"]
del kwargs["do_init"]
elif num_nodes > 0:
do_init = True
init_kwargs.update(kwargs)
cluster = Cluster()
remote_nodes = []
for _ in range(num_nodes):
remote_nodes.append(cluster.add_node(**init_kwargs))
if do_init:
ray.init(redis_address=cluster.redis_address)
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
# This fixture will start a cluster with empty nodes.
@pytest.fixture
def ray_start_cluster(request):
param = getattr(request, "param", {})
with _ray_start_cluster(**param) as res:
yield res
@pytest.fixture
def ray_start_cluster_head(request):
param = getattr(request, "param", {})
with _ray_start_cluster(do_init=True, num_nodes=1, **param) as res:
yield res
@pytest.fixture
def ray_start_cluster_2_nodes(request):
param = getattr(request, "param", {})
with _ray_start_cluster(do_init=True, num_nodes=2, **param) as res:
yield res
@pytest.fixture
def ray_start_object_store_memory(request):
# Start the Ray processes.
store_size = request.param
internal_config = get_default_fixure_internal_config()
init_kwargs = {
"num_cpus": 1,
"_internal_config": internal_config,
"object_store_memory": store_size,
}
ray.init(**init_kwargs)
yield store_size
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def call_ray_start(request):
parameter = getattr(request, "param", "ray start --head --num-cpus=1")
command_args = parameter.split(" ")
out = ray.utils.decode(
subprocess.check_output(command_args, stderr=subprocess.STDOUT))
# Get the redis address from the output.
redis_substring_prefix = "redis_address=\""
redis_address_location = (
out.find(redis_substring_prefix) + len(redis_substring_prefix))
redis_address = out[redis_address_location:]
redis_address = redis_address.split("\"")[0]
yield redis_address
# Disconnect from the Ray cluster.
ray.shutdown()
# Kill the Ray cluster.
subprocess.check_output(["ray", "stop"])
@pytest.fixture()
def two_node_cluster():
internal_config = json.dumps({
"initial_reconstruction_timeout_milliseconds": 200,
"num_heartbeats_timeout": 10,
})
cluster = ray.tests.cluster_utils.Cluster(
head_node_args={"_internal_config": internal_config})
for _ in range(2):
remote_node = cluster.add_node(
num_cpus=1, _internal_config=internal_config)
ray.init(redis_address=cluster.redis_address)
yield cluster, remote_node
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
|
# read and write json files
#pip3 install langdetect
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from langdetect import detect
from pyspark.sql.types import *
import json
spark = SparkSession.Builder().appName("json").master("local[2]").getOrCreate()
sc = spark.sparkContext
#data = json.load(open("/home/user/workarea/projects/learn-pyspark/data/colors.json","r"))
#rdd = sc.parallelize(data)
#register a udf for language detection
def detect_tweet_lang(s):
return detect(s)
spark.udf.register("detect_tweet_lang",detect_tweet_lang)
"""
data = spark.read.format("json").\
option("multiline","true").\
option("mode","FAILFAST").\
load("/home/user/workarea/projects/learn-pyspark/data/source/tweets.json")
#data.printSchema()
#print(data.schema)
#run the following code to generate schema file in json format
f=open("/home/user/workarea/projects/learn-pyspark/config/tweets.schema","w+")
f.write(data.schema.json())
f.close()
"""
#pass custome schema from the file generated earlier, convert to struct type
tweet_schema_json = spark.read.text("/home/user/workarea/projects/learn-pyspark/config/tweets.schema").first()[0]
tweet_schema = StructType.fromJson(json.loads(tweet_schema_json))
tweets = spark.read.parquet("/home/user/workarea/projects/learn-pyspark/data/out/tweets-from-kafka/")
tweets1=tweets.withColumn('json',from_json(col('value'),tweet_schema))
t2 = tweets1.withColumn('text',substring_index(col('json.text'),':',-1)).select('text')
#t2.printSchema()
#t3 = t2.withColumn("lang",detect_tweet_lang(t2["text"].cast("string"))).select('text','lang')
t3 = t2.withColumn("lang",lit("en")).select('text','lang')
t3.printSchema()
#convert the rdd to json
rdd_t3 = t3.toJSON()
print(rdd_t3.take(5))
#.write.format("csv").mode("overwrite").save("/home/user/workarea/projects/learn-pyspark/data/out/tweets-text")
#tweets1.select(explode(col('value')).alias('flattened')).show(5)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['AccessKeyArgs', 'AccessKey']
@pulumi.input_type
class AccessKeyArgs:
def __init__(__self__, *,
user: pulumi.Input[str],
pgp_key: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AccessKey resource.
:param pulumi.Input[str] user: IAM user to associate with this access key.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
:param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
"""
pulumi.set(__self__, "user", user)
if pgp_key is not None:
pulumi.set(__self__, "pgp_key", pgp_key)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter
def user(self) -> pulumi.Input[str]:
"""
IAM user to associate with this access key.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: pulumi.Input[str]):
pulumi.set(self, "user", value)
@property
@pulumi.getter(name="pgpKey")
def pgp_key(self) -> Optional[pulumi.Input[str]]:
"""
Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
"""
return pulumi.get(self, "pgp_key")
@pgp_key.setter
def pgp_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pgp_key", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class _AccessKeyState:
def __init__(__self__, *,
create_date: Optional[pulumi.Input[str]] = None,
encrypted_secret: Optional[pulumi.Input[str]] = None,
encrypted_ses_smtp_password_v4: Optional[pulumi.Input[str]] = None,
key_fingerprint: Optional[pulumi.Input[str]] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
secret: Optional[pulumi.Input[str]] = None,
ses_smtp_password_v4: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AccessKey resources.
:param pulumi.Input[str] create_date: Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.
:param pulumi.Input[str] key_fingerprint: Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
:param pulumi.Input[str] secret: Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.
:param pulumi.Input[str] ses_smtp_password_v4: Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).
:param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
:param pulumi.Input[str] user: IAM user to associate with this access key.
"""
if create_date is not None:
pulumi.set(__self__, "create_date", create_date)
if encrypted_secret is not None:
pulumi.set(__self__, "encrypted_secret", encrypted_secret)
if encrypted_ses_smtp_password_v4 is not None:
pulumi.set(__self__, "encrypted_ses_smtp_password_v4", encrypted_ses_smtp_password_v4)
if key_fingerprint is not None:
pulumi.set(__self__, "key_fingerprint", key_fingerprint)
if pgp_key is not None:
pulumi.set(__self__, "pgp_key", pgp_key)
if secret is not None:
pulumi.set(__self__, "secret", secret)
if ses_smtp_password_v4 is not None:
pulumi.set(__self__, "ses_smtp_password_v4", ses_smtp_password_v4)
if status is not None:
pulumi.set(__self__, "status", status)
if user is not None:
pulumi.set(__self__, "user", user)
@property
@pulumi.getter(name="createDate")
def create_date(self) -> Optional[pulumi.Input[str]]:
"""
Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.
"""
return pulumi.get(self, "create_date")
@create_date.setter
def create_date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_date", value)
@property
@pulumi.getter(name="encryptedSecret")
def encrypted_secret(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "encrypted_secret")
@encrypted_secret.setter
def encrypted_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encrypted_secret", value)
@property
@pulumi.getter(name="encryptedSesSmtpPasswordV4")
def encrypted_ses_smtp_password_v4(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "encrypted_ses_smtp_password_v4")
@encrypted_ses_smtp_password_v4.setter
def encrypted_ses_smtp_password_v4(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "encrypted_ses_smtp_password_v4", value)
@property
@pulumi.getter(name="keyFingerprint")
def key_fingerprint(self) -> Optional[pulumi.Input[str]]:
"""
Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.
"""
return pulumi.get(self, "key_fingerprint")
@key_fingerprint.setter
def key_fingerprint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_fingerprint", value)
@property
@pulumi.getter(name="pgpKey")
def pgp_key(self) -> Optional[pulumi.Input[str]]:
"""
Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
"""
return pulumi.get(self, "pgp_key")
@pgp_key.setter
def pgp_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pgp_key", value)
@property
@pulumi.getter
def secret(self) -> Optional[pulumi.Input[str]]:
"""
Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.
"""
return pulumi.get(self, "secret")
@secret.setter
def secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret", value)
@property
@pulumi.getter(name="sesSmtpPasswordV4")
def ses_smtp_password_v4(self) -> Optional[pulumi.Input[str]]:
"""
Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).
"""
return pulumi.get(self, "ses_smtp_password_v4")
@ses_smtp_password_v4.setter
def ses_smtp_password_v4(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ses_smtp_password_v4", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def user(self) -> Optional[pulumi.Input[str]]:
"""
IAM user to associate with this access key.
"""
return pulumi.get(self, "user")
@user.setter
def user(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "user", value)
class AccessKey(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
lb_user = aws.iam.User("lbUser", path="/system/")
lb_access_key = aws.iam.AccessKey("lbAccessKey",
user=lb_user.name,
pgp_key="keybase:some_person_that_exists")
lb_ro = aws.iam.UserPolicy("lbRo",
user=lb_user.name,
policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ec2:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
\"\"\")
pulumi.export("secret", lb_access_key.encrypted_secret)
```
```python
import pulumi
import pulumi_aws as aws
test_user = aws.iam.User("testUser", path="/test/")
test_access_key = aws.iam.AccessKey("testAccessKey", user=test_user.name)
pulumi.export("awsIamSmtpPasswordV4", test_access_key.ses_smtp_password_v4)
```
## Import
IAM Access Keys can be imported using the identifier, e.g.,
```sh
$ pulumi import aws:iam/accessKey:AccessKey example AKIA1234567890
```
Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, `ses_smtp_password_v4`, and `encrypted_ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
:param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
:param pulumi.Input[str] user: IAM user to associate with this access key.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AccessKeyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
lb_user = aws.iam.User("lbUser", path="/system/")
lb_access_key = aws.iam.AccessKey("lbAccessKey",
user=lb_user.name,
pgp_key="keybase:some_person_that_exists")
lb_ro = aws.iam.UserPolicy("lbRo",
user=lb_user.name,
policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ec2:Describe*"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
\"\"\")
pulumi.export("secret", lb_access_key.encrypted_secret)
```
```python
import pulumi
import pulumi_aws as aws
test_user = aws.iam.User("testUser", path="/test/")
test_access_key = aws.iam.AccessKey("testAccessKey", user=test_user.name)
pulumi.export("awsIamSmtpPasswordV4", test_access_key.ses_smtp_password_v4)
```
## Import
IAM Access Keys can be imported using the identifier, e.g.,
```sh
$ pulumi import aws:iam/accessKey:AccessKey example AKIA1234567890
```
Resource attributes such as `encrypted_secret`, `key_fingerprint`, `pgp_key`, `secret`, `ses_smtp_password_v4`, and `encrypted_ses_smtp_password_v4` are not available for imported resources as this information cannot be read from the IAM API.
:param str resource_name: The name of the resource.
:param AccessKeyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AccessKeyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AccessKeyArgs.__new__(AccessKeyArgs)
__props__.__dict__["pgp_key"] = pgp_key
__props__.__dict__["status"] = status
if user is None and not opts.urn:
raise TypeError("Missing required property 'user'")
__props__.__dict__["user"] = user
__props__.__dict__["create_date"] = None
__props__.__dict__["encrypted_secret"] = None
__props__.__dict__["encrypted_ses_smtp_password_v4"] = None
__props__.__dict__["key_fingerprint"] = None
__props__.__dict__["secret"] = None
__props__.__dict__["ses_smtp_password_v4"] = None
super(AccessKey, __self__).__init__(
'aws:iam/accessKey:AccessKey',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
create_date: Optional[pulumi.Input[str]] = None,
encrypted_secret: Optional[pulumi.Input[str]] = None,
encrypted_ses_smtp_password_v4: Optional[pulumi.Input[str]] = None,
key_fingerprint: Optional[pulumi.Input[str]] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
secret: Optional[pulumi.Input[str]] = None,
ses_smtp_password_v4: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
user: Optional[pulumi.Input[str]] = None) -> 'AccessKey':
"""
Get an existing AccessKey resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] create_date: Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.
:param pulumi.Input[str] key_fingerprint: Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.
:param pulumi.Input[str] pgp_key: Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
:param pulumi.Input[str] secret: Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.
:param pulumi.Input[str] ses_smtp_password_v4: Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).
:param pulumi.Input[str] status: Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
:param pulumi.Input[str] user: IAM user to associate with this access key.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AccessKeyState.__new__(_AccessKeyState)
__props__.__dict__["create_date"] = create_date
__props__.__dict__["encrypted_secret"] = encrypted_secret
__props__.__dict__["encrypted_ses_smtp_password_v4"] = encrypted_ses_smtp_password_v4
__props__.__dict__["key_fingerprint"] = key_fingerprint
__props__.__dict__["pgp_key"] = pgp_key
__props__.__dict__["secret"] = secret
__props__.__dict__["ses_smtp_password_v4"] = ses_smtp_password_v4
__props__.__dict__["status"] = status
__props__.__dict__["user"] = user
return AccessKey(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createDate")
def create_date(self) -> pulumi.Output[str]:
"""
Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) that the access key was created.
"""
return pulumi.get(self, "create_date")
@property
@pulumi.getter(name="encryptedSecret")
def encrypted_secret(self) -> pulumi.Output[str]:
return pulumi.get(self, "encrypted_secret")
@property
@pulumi.getter(name="encryptedSesSmtpPasswordV4")
def encrypted_ses_smtp_password_v4(self) -> pulumi.Output[str]:
return pulumi.get(self, "encrypted_ses_smtp_password_v4")
@property
@pulumi.getter(name="keyFingerprint")
def key_fingerprint(self) -> pulumi.Output[str]:
"""
Fingerprint of the PGP key used to encrypt the secret. This attribute is not available for imported resources.
"""
return pulumi.get(self, "key_fingerprint")
@property
@pulumi.getter(name="pgpKey")
def pgp_key(self) -> pulumi.Output[Optional[str]]:
"""
Either a base-64 encoded PGP public key, or a keybase username in the form `keybase:some_person_that_exists`, for use in the `encrypted_secret` output attribute.
"""
return pulumi.get(self, "pgp_key")
@property
@pulumi.getter
def secret(self) -> pulumi.Output[str]:
"""
Secret access key. This attribute is not available for imported resources. Note that this will be written to the state file. If you use this, please protect your backend state file judiciously. Alternatively, you may supply a `pgp_key` instead, which will prevent the secret from being stored in plaintext, at the cost of preventing the use of the secret key in automation.
"""
return pulumi.get(self, "secret")
@property
@pulumi.getter(name="sesSmtpPasswordV4")
def ses_smtp_password_v4(self) -> pulumi.Output[str]:
"""
Secret access key converted into an SES SMTP password by applying [AWS's documented Sigv4 conversion algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert). This attribute is not available for imported resources. As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region).
"""
return pulumi.get(self, "ses_smtp_password_v4")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
Access key status to apply. Defaults to `Active`. Valid values are `Active` and `Inactive`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def user(self) -> pulumi.Output[str]:
"""
IAM user to associate with this access key.
"""
return pulumi.get(self, "user")
|
#!/usr/bin/python3
#-*-coding:utf-8-*-
#$File: dataset.py
#$Date: Sat May 7 10:59:24 2016
#$Author: Like Ma <milkpku[at]gmail[dot]com>
import copy
import random
import numpy as np
OUT_TYPE = np.float32
class Dataset(object):
def __init__(self, path, _type):
if _type == 'train':
self.__file_object = open(path + '/train.fen', 'r')
if _type == 'validation':
self.__file_object = open(path + '/valid.fen', 'r')
self.__chesslayer = {}
def __init_clayer(self):
# King(帅)*1, Advisor(仕)*2, Bishop(象)*2, kNight(马)*2
# Rook(车)*2, Cannon(炮)*2, Pawn(兵)*5
# Upper: red Lower: black
self.__chesslayer = {'K':0, 'A':1, 'B':3, 'N':5, 'R':7, 'C':9, 'P':11,
'k':0, 'a':1, 'b':3, 'n':5, 'r':7, 'c':9, 'p':11}
def next_batch(self, batch_size):
'''
return [data, label] with batched size
'''
frdpos = np.zeros((batch_size, 9, 10, 16), dtype=OUT_TYPE)
emypos = np.zeros((batch_size, 9, 10, 16), dtype=OUT_TYPE)
frdmove = np.zeros((batch_size, 9, 10, 16), dtype=OUT_TYPE)
emymove = np.zeros((batch_size, 9, 10, 16), dtype=OUT_TYPE)
frdprot = np.zeros((batch_size, 9, 10, 16), dtype=OUT_TYPE)
emyprot = np.zeros((batch_size, 9, 10, 16), dtype=OUT_TYPE)
movelabel = np.zeros((batch_size, 9, 10, 16), dtype=OUT_TYPE)
i = 0
while(i < batch_size):
line = self.__file_object.readline()
if line != '':
if line.split('\t')[2] == 'WIN!':
continue
else:
self.__file_object.seek(0, 0)
line = self.__file_object.readline()
frdpos[i], emypos[i], frdmove[i], emymove[i], frdprot[i], emyprot[i], movelabel[i] = self.__fen2tensor(line)
i += 1
return [frdpos, emypos, frdmove], movelabel
# return [frdpos, frdmove, emypos, emyprot], movelabel
# return [frdpos, emypos, frdmove, emymove, frdprot, emyprot], movelabel
def __fen2tensor(self, fen):
frdpos = np.zeros((9, 10, 16), dtype=OUT_TYPE)
emypos = np.zeros((9, 10, 16), dtype=OUT_TYPE)
frdmove = np.zeros((9, 10, 16), dtype=OUT_TYPE)
emymove = np.zeros((9, 10, 16), dtype=OUT_TYPE)
frdprot = np.zeros((9, 10, 16), dtype=OUT_TYPE)
emyprot = np.zeros((9, 10, 16), dtype=OUT_TYPE)
movelabel = np.zeros((9, 10, 16), dtype=OUT_TYPE)
fenlist = fen.split('\t')
frdpos, emypos = self.__f2tpos(fenlist[0], frdpos, emypos)
frdmove = self.__f2tmove(fenlist[1], frdmove, frdpos)
emymove = self.__f2tmove(fenlist[3], emymove, emypos)
frdprot = self.__f2tmove(fenlist[4], frdprot, frdpos)
emyprot = self.__f2tmove(fenlist[5], emyprot, emypos)
label = fenlist[2].strip().split('-')
layer = np.argmax(frdpos[self.__loca2i(label[0][0])][self.__loca2i(label[0][1])])
movelabel[self.__loca2i(label[1][0])][self.__loca2i(label[1][1])][layer] = 1
if fenlist[0].split()[1] == 'b':
self.__switch_round(frdpos)
self.__switch_round(frdmove)
self.__switch_round(emypos)
self.__switch_round(movelabel)
# shuffle random
self.__shuffle([frdpos, frdmove, movelabel], self.__shuffle_args())
self.__shuffle([emypos], self.__shuffle_args())
return frdpos, emypos, frdmove, emymove, frdprot, emyprot, movelabel
def __f2tpos(self, fen, frdpos, emypos):
self.__init_clayer()
poslist = fen.split()[0].split('/')
player = fen.split()[1]
for i in range(len(poslist)):
item = poslist[9 - i]
index = 0
for j in range(len(item)):
if item[j].isupper():
if player == 'w':
frdpos[index][i][self.__chesslayer[item[j]]] = 1
else:
emypos[index][i][self.__chesslayer[item[j]]] = 1
self.__chesslayer[item[j]] += 1
index += 1
elif item[j].islower():
if player == 'w':
emypos[index][i][self.__chesslayer[item[j]]] = 1
else:
frdpos[index][i][self.__chesslayer[item[j]]] = 1
self.__chesslayer[item[j]] += 1
index += 1
else:
index += int(item[j])
return frdpos, emypos
def __f2tmove(self, movelist, move, pos):
movelist = movelist.split()
for item in movelist:
src = item.split('-')[0]
des = item.split('-')[1]
layer = np.argmax(pos[self.__loca2i(src[0])][self.__loca2i(src[1])])
move[self.__loca2i(des[0])][self.__loca2i(des[1])][layer] = 1
return move
def __loca2i(self, loc):
if loc.isupper():
return ord(loc)-ord('A')
else:
return int(loc)
def __switch_round(self, mat):
mat = mat[:,::-1,:]
def __shuffle(self, mat, args):
index = [[1,2],[3,4],[5,6],[7,8],[9,10],[11,12,13,14,15]]
for item in mat:
for i in range(len(index)):
item[:,:,index[i]] = self.__switch_layer(item[:,:,index[i]], args[i])
def __switch_layer(self, mat, args):
mat_temp = copy.deepcopy(mat)
assert len(args) == mat.shape[2]
for k in range(len(args)):
mat[:,:,k] = mat_temp[:,:,args[k]]
return mat
def __shuffle_args(self):
args = []
for i in range(5):
a = [0,1]
random.shuffle(a)
args.append(a)
seq = [0,1,2,3,4]
random.shuffle(seq)
args.append(seq)
return args
def load_data(_type):
'''
return dataset which yeild minibatch data
'''
data = Dataset('/home/mlk/BetaElephant/data', _type)
return data
def visualdata(data):
print('------------------------')
for i in range(data.shape[2]):
print(i)
for j in range(data.shape[1]):
for k in range(data.shape[0]):
print(int(data[k][9 - j][i])),
print('\n'),
print('\n')
print('------------------------\n')
if __name__ == '__main__':
traindata = load_data('validation')
for i in range(10):
[frdpos, emypos, frdmov, emymove, frdprot, emyprot], movelabel = traindata.next_batch(10)
if 0:
visualdata(frdpos[0])
visualdata(frdmove[0])
visualdata(emypos[0])
for i in range(10):
[frdpos, emypos, frdmove, emymove, frdprot, emyprot], movelabel = traindata.next_batch(100)
# from IPython import embed; embed()
# assert all protected pieces are selfpieces
assert all((frdprot.sum(axis=3)*frdpos.sum(axis=3)==frdprot.sum(axis=3)).reshape(-1))
assert all((emyprot.sum(axis=3)*emypos.sum(axis=3)==emyprot.sum(axis=3)).reshape(-1))
# assert no empty moves
frdmove = frdmove.reshape(frdmove.shape[0], -1)
frdmove = frdmove.sum(axis=1)
assert all(frdmove!=0), print(i, np.argwhere(frdmove==0))
# assert no piece in the same layer
frdpos = frdpos.reshape(frdpos.shape[0]*90, -1)
frdpos = frdpos.sum(axis=1)
assert all(frdpos < 2), print(i, np.argwhere(frdpos>1))
emypos = emypos.reshape(emypos.shape[0]*90, -1)
emypos = emypos.sum(axis=1)
assert all(emypos < 2), print(i, np.argwhere(emypos>1))
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.24
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1SecretList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1Secret]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1SecretList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1SecretList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1SecretList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1SecretList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1SecretList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1SecretList. # noqa: E501
Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret # noqa: E501
:return: The items of this V1SecretList. # noqa: E501
:rtype: list[V1Secret]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1SecretList.
Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret # noqa: E501
:param items: The items of this V1SecretList. # noqa: E501
:type: list[V1Secret]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1SecretList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1SecretList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1SecretList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1SecretList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1SecretList. # noqa: E501
:return: The metadata of this V1SecretList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1SecretList.
:param metadata: The metadata of this V1SecretList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SecretList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1SecretList):
return True
return self.to_dict() != other.to_dict()
|
#
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
import os
##############################################################################
# customize directories to automatically find and register software
def dirs(i):
return {'return':0}
##############################################################################
# limit directories
def limit(i):
hosd=i.get('host_os_dict',{})
tosd=i.get('target_os_dict',{})
phosd=hosd.get('ck_name','')
dr=i.get('list',[])
drx=[]
for q in dr:
if q.find('.{')<0 and q.find('X11')<0 and q.find('/lib/')<0:
drx.append(q)
return {'return':0, 'list':drx}
##############################################################################
# parse software version
def parse_version(i):
lst=i['output']
ver=''
for q in lst:
q=q.strip()
if q!='':
j=q.find(' V')
if j>=0:
ver=q[j+2:]
j=ver.find(' ')
if j>=0:
ver=ver[:j]
break
return {'return':0, 'version':ver}
##############################################################################
# setup environment setup
def setup(i):
"""
Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
import os
# Get variables
ck=i['ck_kernel']
s=''
ver=i.get('version','')
sver=i.get('version_split',[])
cus=i['customize']
env=i['env']
host_d=i.get('host_os_dict',{})
target_d=i.get('host_os_dict',{})
tbits=target_d.get('bits','')
winh=host_d.get('windows_base','')
deps=i['deps']
fp=cus.get('full_path','')
ep=cus.get('env_prefix','')
p1=''
p2=''
if ep!='' and fp!='':
p1=os.path.dirname(fp)
p2=os.path.dirname(p1)
env[ep]=p2
env[ep+'_BIN']=p1
env['CUDA_PATH']=p2
env['CUDA_HOME']=p2
env['CUDA_INSTALL_DIR']=p2
opt1='--relaxed-constexpr'
if len(sver)>1 and ((sver[0]==7 and sver[1]>=5) or sver[0]>7):
opt1='--expt-relaxed-constexpr'
env['CK_OPT_RELAXED_CONSTEXPR']=opt1
opt2=' '
if len(sver)>0 and sver[0]>6:
opt2='--std=c++11'
env['CK_COMPILER_FLAG_CPP11']=opt2
if p1!='':
############################################################
if winh=='yes':
s+='\nset PATH='+p1+';'+p2+'\\libnvvp;'+p2+'\\nnvm\\bin;'+p2+'\\open64\\bin;%PATH%\n\n'
env['CK_COMPILER_FLAGS_OBLIGATORY']='-DWINDOWS'
ext='x64'
if tbits=='32':
ext='Win32'
env[ep+'_LIB']=p2+'\\lib\\'+ext
env[ep+'_INCLUDE']=p2+'\\include'
rx=ck.access({'action':'convert_to_cygwin_path',
'module_uoa':'os',
'path':p2})
if rx['return']>0: return rx
env[ep+'_WIN']=rx['path']
else:
s+='\nexport PATH='+p1+':$PATH\n\n'
# Update -ccbin -> dep on GCC
gcc=deps['gcc']['dict']['customize']
y=gcc.get('full_path','')
if y!='':
x=env['CK_NVCC']
if '-ccbin' not in x:
env['CK_NVCC']=x+' -ccbin '+y
x=env['CK_NVCXX']
if '-ccbin' not in x:
y=y.replace('gcc','g++')
env['CK_NVCXX']=x+' -ccbin '+y
lp=''
if os.path.isdir(p2+'/lib64'):
lp=p2+'/lib64'
elif os.path.isdir(p2+'/lib'):
lp=p2+'/lib'
if lp!='':
env[ep+'_LIB']=lp
r = ck.access({'action': 'lib_path_export_script', 'module_uoa': 'os', 'host_os_dict': host_d,
'lib_path': lp})
if r['return']>0: return r
s += r['script']
env[ep+'_INCLUDE']=p2+'/include'
return {'return':0, 'bat':s}
|
import os.path
from mitmproxy import exceptions
from mitmproxy import flowfilter
from mitmproxy import io
class FileStreamer:
def __init__(self):
self.stream = None
self.active_flows = set() # type: Set[flow.Flow]
def start_stream_to_path(self, path, mode, flt):
path = os.path.expanduser(path)
try:
f = open(path, mode)
except IOError as v:
return str(v)
self.stream = io.FilteredFlowWriter(f, flt)
self.active_flows = set()
def configure(self, options, updated):
# We're already streaming - stop the previous stream and restart
if self.stream:
self.done()
if options.outfile:
flt = None
if options.get("filtstr"):
flt = flowfilter.parse(options.filtstr)
if not flt:
raise exceptions.OptionsError(
"Invalid filter specification: %s" % options.filtstr
)
path, mode = options.outfile
if mode not in ("wb", "ab"):
raise exceptions.OptionsError("Invalid mode.")
err = self.start_stream_to_path(path, mode, flt)
if err:
raise exceptions.OptionsError(err)
def tcp_start(self, flow):
if self.stream:
self.active_flows.add(flow)
def tcp_end(self, flow):
if self.stream:
self.stream.add(flow)
self.active_flows.discard(flow)
def response(self, flow):
if self.stream:
self.stream.add(flow)
self.active_flows.discard(flow)
def request(self, flow):
if self.stream:
self.active_flows.add(flow)
def done(self):
if self.stream:
for flow in self.active_flows:
self.stream.add(flow)
self.active_flows = set([])
self.stream.fo.close()
self.stream = None
|
EXPECTED_DAILY_RESOURCE_GENERATE_SAMPLES_QUERIES = """delimiter //
DROP PROCEDURE IF EXISTS get_daily_samples;
CREATE PROCEDURE get_daily_samples (
IN id INT,
OUT entry0 FLOAT,
OUT entry1 FLOAT,
OUT entry2 FLOAT,
OUT entry3 FLOAT,
OUT entry4 FLOAT,
OUT entry5 FLOAT,
OUT entry6 FLOAT,
OUT entry7 FLOAT,
OUT entry8 FLOAT,
OUT entry9 FLOAT,
OUT entry10 FLOAT,
OUT entry11 FLOAT,
OUT entry12 FLOAT,
OUT entry13 FLOAT,
OUT entry14 FLOAT,
OUT entry15 FLOAT,
OUT entry16 FLOAT,
OUT entry17 FLOAT,
OUT entry18 FLOAT,
OUT entry19 FLOAT,
OUT entry20 FLOAT,
OUT entry21 FLOAT,
OUT entry22 FLOAT,
OUT entry23 FLOAT,
OUT start_hour FLOAT
)
BEGIN
select HOUR(now()) INTO start_hour;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 24 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 23 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry0 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 24 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 23 HOUR) AND Resourceid = id limit 1;
else SET entry0 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 23 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 22 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry1 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 23 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 22 HOUR) AND Resourceid = id limit 1;
else SET entry1 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 22 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 21 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry2 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 22 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 21 HOUR) AND Resourceid = id limit 1;
else SET entry2 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 21 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 20 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry3 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 21 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 20 HOUR) AND Resourceid = id limit 1;
else SET entry3 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 20 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 19 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry4 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 20 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 19 HOUR) AND Resourceid = id limit 1;
else SET entry4 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 19 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry5 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 19 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND Resourceid = id limit 1;
else SET entry5 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 17 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry6 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 17 HOUR) AND Resourceid = id limit 1;
else SET entry6 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 17 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 16 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry7 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 17 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 16 HOUR) AND Resourceid = id limit 1;
else SET entry7 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 16 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 15 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry8 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 16 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 15 HOUR) AND Resourceid = id limit 1;
else SET entry8 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 15 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 14 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry9 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 15 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 14 HOUR) AND Resourceid = id limit 1;
else SET entry9 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 14 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 13 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry10 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 14 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 13 HOUR) AND Resourceid = id limit 1;
else SET entry10 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 13 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry11 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 13 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND Resourceid = id limit 1;
else SET entry11 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 11 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry12 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 11 HOUR) AND Resourceid = id limit 1;
else SET entry12 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 11 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 10 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry13 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 11 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 10 HOUR) AND Resourceid = id limit 1;
else SET entry13 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 10 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 9 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry14 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 10 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 9 HOUR) AND Resourceid = id limit 1;
else SET entry14 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 9 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 8 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry15 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 9 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 8 HOUR) AND Resourceid = id limit 1;
else SET entry15 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 8 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 7 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry16 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 8 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 7 HOUR) AND Resourceid = id limit 1;
else SET entry16 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 7 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry17 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 7 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND Resourceid = id limit 1;
else SET entry17 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 5 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry18 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 5 HOUR) AND Resourceid = id limit 1;
else SET entry18 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 5 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 4 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry19 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 5 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 4 HOUR) AND Resourceid = id limit 1;
else SET entry19 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 4 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 3 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry20 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 4 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 3 HOUR) AND Resourceid = id limit 1;
else SET entry20 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 3 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 2 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry21 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 3 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 2 HOUR) AND Resourceid = id limit 1;
else SET entry21 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 2 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 1 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry22 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 2 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 1 HOUR) AND Resourceid = id limit 1;
else SET entry22 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 1 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 0 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry23 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 1 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 0 HOUR) AND Resourceid = id limit 1;
else SET entry23 := 0;
end if;
END//
delimiter ;
"""
EXPECTED_WEEKLY_RESOURCE_GENERATE_SAMPLES_QUERIES = """delimiter //
DROP PROCEDURE IF EXISTS get_weekly_samples;
CREATE PROCEDURE get_weekly_samples (
IN id INT,
OUT entry0 FLOAT,
OUT entry1 FLOAT,
OUT entry2 FLOAT,
OUT entry3 FLOAT,
OUT entry4 FLOAT,
OUT entry5 FLOAT,
OUT entry6 FLOAT,
OUT entry7 FLOAT,
OUT entry8 FLOAT,
OUT entry9 FLOAT,
OUT entry10 FLOAT,
OUT entry11 FLOAT,
OUT entry12 FLOAT,
OUT entry13 FLOAT,
OUT entry14 FLOAT,
OUT entry15 FLOAT,
OUT entry16 FLOAT,
OUT entry17 FLOAT,
OUT entry18 FLOAT,
OUT entry19 FLOAT,
OUT entry20 FLOAT,
OUT entry21 FLOAT,
OUT entry22 FLOAT,
OUT entry23 FLOAT,
OUT entry24 FLOAT,
OUT entry25 FLOAT,
OUT entry26 FLOAT,
OUT entry27 FLOAT,
OUT start_hour FLOAT
)
BEGIN
select HOUR(now()) INTO start_hour;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 168 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 162 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry0 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 168 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 162 HOUR) AND Resourceid = id limit 1;
else SET entry0 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 162 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 156 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry1 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 162 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 156 HOUR) AND Resourceid = id limit 1;
else SET entry1 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 156 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 150 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry2 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 156 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 150 HOUR) AND Resourceid = id limit 1;
else SET entry2 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 150 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 144 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry3 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 150 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 144 HOUR) AND Resourceid = id limit 1;
else SET entry3 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 144 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 138 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry4 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 144 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 138 HOUR) AND Resourceid = id limit 1;
else SET entry4 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 138 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 132 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry5 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 138 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 132 HOUR) AND Resourceid = id limit 1;
else SET entry5 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 132 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 126 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry6 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 132 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 126 HOUR) AND Resourceid = id limit 1;
else SET entry6 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 126 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 120 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry7 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 126 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 120 HOUR) AND Resourceid = id limit 1;
else SET entry7 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 120 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 114 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry8 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 120 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 114 HOUR) AND Resourceid = id limit 1;
else SET entry8 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 114 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 108 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry9 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 114 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 108 HOUR) AND Resourceid = id limit 1;
else SET entry9 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 108 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 102 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry10 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 108 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 102 HOUR) AND Resourceid = id limit 1;
else SET entry10 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 102 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 96 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry11 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 102 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 96 HOUR) AND Resourceid = id limit 1;
else SET entry11 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 96 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 90 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry12 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 96 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 90 HOUR) AND Resourceid = id limit 1;
else SET entry12 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 90 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 84 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry13 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 90 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 84 HOUR) AND Resourceid = id limit 1;
else SET entry13 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 84 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 78 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry14 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 84 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 78 HOUR) AND Resourceid = id limit 1;
else SET entry14 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 78 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 72 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry15 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 78 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 72 HOUR) AND Resourceid = id limit 1;
else SET entry15 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 72 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 66 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry16 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 72 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 66 HOUR) AND Resourceid = id limit 1;
else SET entry16 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 66 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 60 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry17 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 66 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 60 HOUR) AND Resourceid = id limit 1;
else SET entry17 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 60 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 54 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry18 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 60 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 54 HOUR) AND Resourceid = id limit 1;
else SET entry18 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 54 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 48 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry19 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 54 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 48 HOUR) AND Resourceid = id limit 1;
else SET entry19 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 48 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 42 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry20 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 48 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 42 HOUR) AND Resourceid = id limit 1;
else SET entry20 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 42 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 36 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry21 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 42 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 36 HOUR) AND Resourceid = id limit 1;
else SET entry21 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 36 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 30 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry22 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 36 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 30 HOUR) AND Resourceid = id limit 1;
else SET entry22 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 30 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 24 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry23 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 30 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 24 HOUR) AND Resourceid = id limit 1;
else SET entry23 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 24 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry24 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 24 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND Resourceid = id limit 1;
else SET entry24 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry25 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 18 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND Resourceid = id limit 1;
else SET entry25 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry26 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 12 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND Resourceid = id limit 1;
else SET entry26 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 0 HOUR) AND Resourceid = id)
then SELECT ResponseTime INTO entry27 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 6 HOUR) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 0 HOUR) AND Resourceid = id limit 1;
else SET entry27 := 0;
end if;
END//
delimiter ;
"""
EXPECTED_MONTHLY_RESOURCE_GENERATE_SAMPLES_QUERIES = """delimiter //
DROP PROCEDURE IF EXISTS get_monthly_samples;
CREATE PROCEDURE get_monthly_samples (
IN id INT,
OUT entry0 FLOAT,
OUT entry1 FLOAT,
OUT entry2 FLOAT,
OUT entry3 FLOAT,
OUT entry4 FLOAT,
OUT entry5 FLOAT,
OUT entry6 FLOAT,
OUT entry7 FLOAT,
OUT entry8 FLOAT,
OUT entry9 FLOAT,
OUT entry10 FLOAT,
OUT entry11 FLOAT,
OUT entry12 FLOAT,
OUT entry13 FLOAT,
OUT entry14 FLOAT,
OUT entry15 FLOAT,
OUT entry16 FLOAT,
OUT entry17 FLOAT,
OUT entry18 FLOAT,
OUT entry19 FLOAT,
OUT entry20 FLOAT,
OUT entry21 FLOAT,
OUT entry22 FLOAT,
OUT entry23 FLOAT,
OUT entry24 FLOAT,
OUT entry25 FLOAT,
OUT entry26 FLOAT,
OUT entry27 FLOAT,
OUT entry28 FLOAT,
OUT entry29 FLOAT,
OUT entry30 FLOAT,
OUT start_hour FLOAT
)
BEGIN
select DAY(now()) INTO start_hour;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 31 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 30 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry0 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 31 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 30 DAY) AND Resourceid = id limit 1;
else SET entry0 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 30 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 29 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry1 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 30 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 29 DAY) AND Resourceid = id limit 1;
else SET entry1 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 29 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 28 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry2 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 29 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 28 DAY) AND Resourceid = id limit 1;
else SET entry2 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 28 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 27 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry3 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 28 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 27 DAY) AND Resourceid = id limit 1;
else SET entry3 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 27 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 26 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry4 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 27 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 26 DAY) AND Resourceid = id limit 1;
else SET entry4 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 26 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 25 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry5 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 26 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 25 DAY) AND Resourceid = id limit 1;
else SET entry5 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 25 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 24 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry6 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 25 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 24 DAY) AND Resourceid = id limit 1;
else SET entry6 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 24 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 23 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry7 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 24 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 23 DAY) AND Resourceid = id limit 1;
else SET entry7 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 23 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 22 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry8 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 23 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 22 DAY) AND Resourceid = id limit 1;
else SET entry8 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 22 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 21 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry9 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 22 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 21 DAY) AND Resourceid = id limit 1;
else SET entry9 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 21 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 20 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry10 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 21 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 20 DAY) AND Resourceid = id limit 1;
else SET entry10 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 20 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 19 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry11 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 20 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 19 DAY) AND Resourceid = id limit 1;
else SET entry11 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 19 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 18 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry12 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 19 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 18 DAY) AND Resourceid = id limit 1;
else SET entry12 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 18 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 17 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry13 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 18 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 17 DAY) AND Resourceid = id limit 1;
else SET entry13 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 17 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 16 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry14 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 17 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 16 DAY) AND Resourceid = id limit 1;
else SET entry14 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 16 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 15 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry15 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 16 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 15 DAY) AND Resourceid = id limit 1;
else SET entry15 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 15 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 14 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry16 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 15 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 14 DAY) AND Resourceid = id limit 1;
else SET entry16 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 14 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 13 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry17 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 14 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 13 DAY) AND Resourceid = id limit 1;
else SET entry17 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 13 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 12 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry18 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 13 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 12 DAY) AND Resourceid = id limit 1;
else SET entry18 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 12 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 11 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry19 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 12 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 11 DAY) AND Resourceid = id limit 1;
else SET entry19 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 11 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 10 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry20 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 11 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 10 DAY) AND Resourceid = id limit 1;
else SET entry20 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 10 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 9 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry21 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 10 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 9 DAY) AND Resourceid = id limit 1;
else SET entry21 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 9 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 8 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry22 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 9 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 8 DAY) AND Resourceid = id limit 1;
else SET entry22 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 8 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 7 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry23 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 8 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 7 DAY) AND Resourceid = id limit 1;
else SET entry23 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 7 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 6 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry24 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 7 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 6 DAY) AND Resourceid = id limit 1;
else SET entry24 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 6 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 5 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry25 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 6 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 5 DAY) AND Resourceid = id limit 1;
else SET entry25 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 5 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 4 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry26 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 5 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 4 DAY) AND Resourceid = id limit 1;
else SET entry26 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 4 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 3 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry27 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 4 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 3 DAY) AND Resourceid = id limit 1;
else SET entry27 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 3 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 2 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry28 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 3 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 2 DAY) AND Resourceid = id limit 1;
else SET entry28 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 2 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 1 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry29 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 2 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 1 DAY) AND Resourceid = id limit 1;
else SET entry29 := 0;
end if;
if EXISTS(SELECT ResponseTime FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 1 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 0 DAY) AND Resourceid = id)
then SELECT ResponseTime INTO entry30 FROM PING WHERE TIMESTAMP >= DATE_SUB(NOW(), INTERVAL 1 DAY) AND TIMESTAMP <= DATE_SUB(NOW(), INTERVAL 0 DAY) AND Resourceid = id limit 1;
else SET entry30 := 0;
end if;
END//
delimiter ;
"""
|
import numpy as np
import pandas as pd
# Test data download requirements:
import requests
import os
URL_DATA = "https://raw.githubusercontent.com/open2c/cooltools/pileup-update/datasets/external_test_files.tsv"
def assign_supports(features, supports, labels=False, suffix=""):
"""
Assign support regions to a table of genomic intervals.
Parameters
----------
features : DataFrame
Dataframe with columns `chrom`, `start`, `end`
or `chrom1`, `start1`, `end1`, `chrom2`, `start2`, `end2`
supports : array-like
Support areas
"""
features = features.copy()
supp_col = pd.Series(index=features.index, data=np.nan)
c = "chrom" + suffix
s = "start" + suffix
e = "end" + suffix
for col in (c, s, e):
if col not in features.columns:
raise ValueError(
'Column "{}" not found in features data frame.'.format(col)
)
for i, region in enumerate(supports):
# single-region support
if len(region) in [3, 4]:
sel = (features[c] == region[0]) & (features[e] > region[1])
if region[2] is not None:
sel &= features[s] < region[2]
# paired-region support
elif len(region) == 2:
region1, region2 = region
sel1 = (features[c] == region1[0]) & (features[e] > region1[1])
if region1[2] is not None:
sel1 &= features[s] < region1[2]
sel2 = (features[c] == region2[0]) & (features[e] > region2[1])
if region2[2] is not None:
sel2 &= features[s] < region2[2]
sel = sel1 | sel2
supp_col.loc[sel] = i
if labels:
supp_col = supp_col.map(lambda i: supports[int(i)], na_action="ignore")
return supp_col
def assign_regions_to_bins(bin_ids, regions_span):
regions_binsorted = (
regions_span[(regions_span["bin_start"] >= 0) & (regions_span["bin_end"] >= 0)]
.sort_values(["bin_start", "bin_end"])
.reset_index()
)
bin_reg_idx_lo = regions_span["bin_start"].searchsorted(bin_ids, "right") - 1
bin_reg_idx_hi = regions_span["bin_end"].searchsorted(bin_ids, "right")
mask_assigned = (bin_reg_idx_lo == bin_reg_idx_hi) & (bin_reg_idx_lo >= 0)
region_ids = pd.array([pd.NA] * len(bin_ids))
region_ids[mask_assigned] = regions_span["name"][bin_reg_idx_lo[mask_assigned]]
return region_ids
def download_data(name="all", cache=True, data_dir=None):
"""
Parameters
----------
name : str
Name of the dataset.
cache : boolean, optional
If True, try to load from the local cache first, and save to the cache
if a download is required.
data_dir : string, optional
The directory where to cache data; default is defined by :func:`get_data_dir`.
Returns
-------
Path to the last downloaded file.
"""
def download_file(url, local_filename=None):
if local_filename is None:
local_filename = url.split('/')[-1]
print('downloading:', url, 'as', local_filename)
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
available_datasets = _get_datasets_info()
available_keys = list( map(lambda x: x[0], available_datasets) )
if name in available_keys:
keys = [name]
elif name=="all":
keys = available_keys
else:
raise Exception(
"""Dataset {key} is not available.
Available datasets: {datasets}
Use print_available_datasets() to see the details.
""".format(key=name, datasets=','.join(available_keys)))
data_dir = get_data_dir(data_dir)
assert len(keys)>0 # Checking that test data file is parsed successfully
downloaded = '' # Empty string that will be returned if the request was empty
for key, url, local_filename in available_datasets:
if key not in keys:
continue
file_path = os.path.join(data_dir, local_filename)
if cache and os.path.exists(file_path):
downloaded = file_path
continue
elif cache:
print("Test dataset {} (file {}) is not in the cache directory {}".format(key, local_filename, data_dir))
downloaded = download_file(url, file_path)
return downloaded
def get_data_dir(data_dir=None):
"""
Returns a path to cache directory for example datasets.
This directory is then used by :func:`download_data`.
By default, this function uses the location of cooltools as the home folder
and stores the files under ./datasets/ path there.
Parameters
----------
data_dir : str, optional
Location of the home for test data storage
Returns
-------
String with path to the folder with test data
Notes
-------
Designed following seaborn download test data examples:
https://github.com/mwaskom/seaborn/blob/4dd57d63e23e4628d8b7f41cdefdf64f7fefcf74/seaborn/utils.py#L427
"""
if data_dir is None:
data_dir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', '..', 'datasets/'))
data_dir = os.path.expanduser(data_dir)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
return os.path.join(data_dir, '')
def _get_datasets_info():
"""
Reports all available datasets in URL_DATA
Requires an internet connection.
Returns
-------
A list of datasets: [key, url, local_filename]
"""
url = URL_DATA
datasets_metadata = requests.get(url, stream=True).iter_lines()
datasets = []
for line_metadata in datasets_metadata:
line_metadata = line_metadata.decode("utf-8")
if not line_metadata[0]=="#":
datasets.append([line_metadata.split()[0], line_metadata.split()[2], line_metadata.split()[1]])
return datasets
def print_available_datasets():
"""
Prints all available test datasets in URL_DATA
Requires an internet connection.
"""
url = URL_DATA
datasets_metadata = requests.get(url, stream=True).iter_lines()
print("Available datasets:")
for i, line_metadata in enumerate(datasets_metadata):
if not line_metadata.decode("utf-8")[0]=="#":
print("{0}) {1} : {4} \n Downloaded from {3} \n Stored as {2}".format(i, *line_metadata.decode("utf-8").split("\t")))
|
import random
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.dates as mdate
from NormalizationUtils import *
def generate_workload_by_point_dataset(input_path, output_path, lon_width, lat_width, time_width, sample_rate = 0.2):
"""
generate one specific query workload
:param input_path: point dataset path
:param output_path: generated workload output;
query_string: format -> "-73.920000, -73.910000, 40.762000,40.772000, timestamp1, timestamp2"
-73.953418,-73.94341800000001,40.71959,40.72959,1262275200.0,1262278800.0
:param sample_rate:
:param lon_width:
:param lat_width:
:param time_width: min
:return:
"""
file_fd = open(input_path, 'r')
line = file_fd.readline()
count = 1
batch_lines = []
batch_size = 8192
while line:
# if count > 2957579:
# # one week
# break
print("current count in the workload: " + str(count))
items = line.strip().split(',')
pickup_time_string = items[2].strip()
pickup_longitude = float(items[5].strip())
pickup_latitude = float(items[6].strip())
pickup_timestamp = normalize_date(pickup_time_string)
pickup_date = datetime.datetime.fromtimestamp(pickup_timestamp)
random_value = random.random()
output_line = ""
if (random_value > (1 - sample_rate)):
longitude_lower = pickup_longitude - lon_width / 2.0
longitude_upper = pickup_longitude + lon_width / 2.0
latitude_lower = pickup_latitude - lat_width / 2.0
latitude_upper = pickup_latitude + lat_width / 2.0
date_lower = pickup_date
date_upper = pickup_date + datetime.timedelta(minutes=time_width)
output_line = "%s,%s,%s,%s,%s,%s\n" % (longitude_lower, longitude_upper, latitude_lower, latitude_upper, date_lower.timestamp(), date_upper.timestamp())
print(output_line)
count = count + 1
batch_lines.append(output_line)
if count % batch_size == 0:
with open(output_path, mode='a') as output_fd:
output_fd.writelines(batch_lines)
batch_lines.clear()
line = file_fd.readline()
with open(output_path, mode='a') as output_fd:
output_fd.writelines(batch_lines)
def generate_query_frequency_per_region(input_path, time_interval, spatial_interval, longitude_min, longitude_max, latitude_min, latitude_max, timestamp_min, timestamp_max):
"""
generate frequency result for one specific query workload
:param input_path: workload file path (only contains one kind of query workload)
:param time_interval: second
:param spatial_interval: float
:param longitude_min:
:param longitude_max:
:param latitude_min:
:param latitude_max:
:param timestamp_min:
:param timestamp_max:
:return:
"""
longitude_len = int((longitude_max - longitude_min) / spatial_interval + 1)
latitude_len = int((latitude_max - latitude_min) / spatial_interval + 1)
time_len = int((timestamp_max - timestamp_min) / time_interval + 1)
frequency_result = np.zeros((time_len, longitude_len, latitude_len))
file_fd = open(input_path, 'r')
line = file_fd.readline()
count = 1
while line:
print("current count when generating frequency: " + str(count))
items = line.strip().split(',')
# longitude_lower = float(items[0])
# longitude_upper = float(items[1])
# latitude_lower = float(items[2])
# latitude_upper = float(items[3])
# date_lower = float(items[4])
# date_upper = float(items[5])
# new record format: 2010-01-01 00:18:01,2009-12-31 00:18:01,2009-12-31 01:18:01,-74.074684,-73.574684,40.477577,40.977577
longitude_lower = float(items[3])
longitude_upper = float(items[4])
latitude_lower = float(items[5])
latitude_upper = float(items[6])
date_lower = float(normalize_to_utc_date(items[1]))
date_upper = float(normalize_to_utc_date(items[2]))
count += 1
if (longitude_lower >= longitude_min
and longitude_upper <= longitude_max
and latitude_lower >= latitude_min
and latitude_upper <= latitude_max
and date_lower >= timestamp_min
and date_upper <= timestamp_max):
temporal_start_index = int((date_lower - timestamp_min) / time_interval)
temporal_stop_index = int((date_upper - timestamp_min) / time_interval)
longitude_start_index = int((longitude_lower - longitude_min) / spatial_interval)
longitude_stop_index = int((longitude_upper - longitude_min) / spatial_interval)
latitude_start_index = int((latitude_lower - latitude_min) / spatial_interval)
latitude_stop_index = int((latitude_upper - latitude_min) / spatial_interval)
for i in range(int(temporal_stop_index - temporal_start_index + 1)):
for j in range(int(longitude_stop_index - longitude_start_index + 1)):
for k in range(int(latitude_stop_index - latitude_start_index + 1)):
frequency_result[temporal_start_index + i, longitude_start_index + j, latitude_start_index + k] += 1
line = file_fd.readline()
return frequency_result
def generate_time_precedence_query_workload(input_path, config_map):
"""
at this stage, two different regions has different pattern
:param input_path:
:param config_map: ['time_precedence_workload_pattern_1.csv'] = [0.01, 0.01, 60]
:return:
"""
#generate_workload_by_point_dataset(input_path, "time_precedence_workload_pattern_1.csv", 0.01, 0.01, 60)
#generate_workload_by_point_dataset(input_path, "time_precedence_workload_pattern_2.csv", 0.1, 0.1, 60)
for key in config_map.keys():
config_param_list = config_map.get(key)
generate_workload_by_point_dataset(input_path, key, config_param_list[0], config_param_list[1], config_param_list[2], config_param_list[3])
print("finish time-precedence query workload")
def generate_frequency_result_for_time_precedence_query_workload(config_map, time_interval, spatial_interval):
"""
:param config_map:
:param time_interval:
:param spatial_interval:
:return:
"""
frequency_result = {}
for key in config_map.keys():
region_param_list = config_map.get(key)
lon_min = region_param_list[4]
lon_max = region_param_list[5]
lat_min = region_param_list[6]
lat_max = region_param_list[7]
time_min = normalize_to_utc_date(region_param_list[8])
time_max = normalize_to_utc_date(region_param_list[9])
frequency_result[key] = generate_query_frequency_per_region(key, time_interval, spatial_interval, lon_min, lon_max, lat_min, lat_max, time_min, time_max)
print("finish frequency result for query")
return frequency_result
from HBaseDriver import *
def put_to_hbase(table, frequency_result_map, spatial_interval, longitude_min, latitude_min, time_min):
"""
hbase table schema
rowkey = longitude_offset,latitude_offset,day_offset
column family = workload type
qualifier = hour offset
value = frequency value
time unit is hour in our prediction model
:param frequency: map: key -> workload name, value -> frequency values (3d array)
:param time_interval: unit is hour
:param spatial_interval:
:param longitude_min:
:param longitude_max:
:param latitude_min:
:param latitude_max:
:param timestamp_min:
:param timestamp_max:
:return:
"""
hbase_put_datas = dict()
count = 0
batch_size = 1024
connection = happybase.Connection('127.0.0.1')
for key in frequency_result_map.keys():
frequency_result = frequency_result_map.get(key)
normalized_longitude_width = get_normalized_longitude_width(spatial_interval)
normalized_latitude_width = get_normalized_latitude_width(spatial_interval)
normalized_longitude_min = normalize_longitude(longitude_min)
normalized_latitude_min = normalize_latitude(latitude_min)
normalized_timestamp_min = normalize_to_utc_date(time_min)
longitude_offset_min = math.floor(normalized_longitude_min / normalized_longitude_width)
latitude_offset_min = math.floor(normalized_latitude_min / normalized_latitude_width)
timestamp_hour_offset_min = get_hour_offset(normalized_timestamp_min)
for time_index, time_item in enumerate(frequency_result):
for lon_index, lon_item in enumerate(time_item):
for lat_index, frequency in enumerate(lon_item):
longitude_offset = longitude_offset_min + lon_index
latitude_offset = latitude_offset_min + lat_index
timestamp_hour_offset = timestamp_hour_offset_min + time_index
timestamp_day_offset = math.floor(timestamp_hour_offset / 24)
rowkey = str(longitude_offset) + "," + str(latitude_offset) + "," + str(timestamp_day_offset)
cf_name = key.split(".")[0].split("/")[1]
cf_qualifier = cf_name + ":" + str(timestamp_hour_offset - timestamp_day_offset * 24)
#print(timestamp_hour_offset - timestamp_day_offset * 24)
if rowkey not in hbase_put_datas:
hbase_put_datas[rowkey] = {cf_qualifier: str(frequency)}
else:
hbase_put_datas[rowkey].update({cf_qualifier: str(frequency)})
count = count + 1
if (count % batch_size == 0):
put_batch(hbase_put_datas, connection, table)
hbase_put_datas.clear()
print(count)
#create_table(connection, "prediction_frequency_test", "workload_1_next_passenger_sample")
put_batch(hbase_put_datas, connection, table)
print()
def new_test():
time_interval = 60 * 60 # 1hour
spatial_interval = 0.02
time_precedence_query_config_map = {}
time_precedence_query_config_map['workload/workload_2_heatmap_airport.csv'] = [0.05, 0.05, 60, 0.2,
-74.05, -73.75, 40.60, 40.90,
"2010-01-01 00:00:00",
"2010-01-31 00:00:00"]
result = generate_frequency_result_for_time_precedence_query_workload(time_precedence_query_config_map,
time_interval, spatial_interval)
for key in time_precedence_query_config_map.keys():
frequency_result = result.get(key)
print(key)
print(frequency_result.shape)
interval_len = len(frequency_result)
x_values = np.arange(0, interval_len, 1)
s, w, h = frequency_result.shape
for m in range(w - 1):
for n in range(h - 1):
draw_plot(x_values, frequency_result[:, m:m + 1, n:n + 1].reshape(-1))
def test_put_for_real(table):
time_interval = 60 * 60 # 1hour
spatial_interval = 0.02
time_precedence_query_config_map = {}
time_precedence_query_config_map['workload/workload_1_next_passenger.csv'] = [0.05, 0.05, 60, 0.2,
-74.05, -73.75, 40.60, 40.90,
"2010-01-01 00:00:00",
"2010-01-31 23:59:59"]
time_precedence_query_config_map['workload/workload_2_heatmap_multiple_5x.csv'] = [0.05, 0.05, 60, 0.2,
-74.05, -73.75, 40.60,
40.90,
"2010-01-01 00:00:00",
"2010-01-31 23:59:59"]
result = generate_frequency_result_for_time_precedence_query_workload(time_precedence_query_config_map,
time_interval, spatial_interval)
print()
put_to_hbase(table, result, spatial_interval, -74.05, 40.60, "2010-01-01 00:00:00")
def test_put_for_predicted(table):
time_interval = 60 * 60 # 1hour
spatial_interval = 0.02
time_precedence_query_config_map = {}
time_precedence_query_config_map['workload/workload_2_heatmap_multiple.csv'] = [0.05, 0.05, 60, 0.2,
-74.05, -73.75, 40.60,
40.90,
"2010-01-22 00:00:00",
"2010-01-29 23:59:59"]
result = generate_frequency_result_for_time_precedence_query_workload(time_precedence_query_config_map,
time_interval, spatial_interval)
print()
put_to_hbase(table, result, spatial_interval, -74.05, 40.60, "2010-01-22 00:00:00")
if __name__ == "__main__":
table = "frequency_real_test_month1_workload_multiple_ratio-12-5"
#create_table_with_2cf(connection, table, "workload_1_next_passenger", "workload_2_heatmap_multiple")
test_put_for_real(table)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.