hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b7b04a9512b8ef2440e846a6b55a9e187eb94b01 | 653 | py | Python | agent/deployment_stages/linux_utils.py | Suremaker/consul-deployment-agent | 466c36d3fcb9f8bfa144299dde7cb94f4341907b | [
"Apache-2.0"
] | 6 | 2016-10-10T09:26:07.000Z | 2018-09-20T08:59:42.000Z | agent/deployment_stages/linux_utils.py | Suremaker/consul-deployment-agent | 466c36d3fcb9f8bfa144299dde7cb94f4341907b | [
"Apache-2.0"
] | 11 | 2016-10-10T12:11:07.000Z | 2018-05-09T22:11:02.000Z | agent/deployment_stages/linux_utils.py | Suremaker/consul-deployment-agent | 466c36d3fcb9f8bfa144299dde7cb94f4341907b | [
"Apache-2.0"
] | 16 | 2016-09-28T16:00:58.000Z | 2019-02-25T16:52:12.000Z | # Copyright (c) Trainline Limited, 2016-2017. All rights reserved. See LICENSE.txt in the project root for license information.
from grp import getgrnam
from pwd import getpwnam
def get_gid(name):
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result.gr_gid
return None
def get_uid(name):
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result.pw_uid
return None
| 24.185185 | 127 | 0.653905 |
b6e4ad5e918b30d470d6ed99b1642e96932b94e3 | 3,323 | py | Python | 3rdparty/webkit/Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_internal_header.py | mchiasson/PhaserNative | f867454602c395484bf730a7c43b9c586c102ac2 | [
"MIT"
] | null | null | null | 3rdparty/webkit/Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_internal_header.py | mchiasson/PhaserNative | f867454602c395484bf730a7c43b9c586c102ac2 | [
"MIT"
] | 9 | 2020-04-18T18:47:18.000Z | 2020-04-18T18:52:41.000Z | Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_internal_header.py | ijsf/DeniseEmbeddableWebKit | 57dfc6783d60f8f59b7129874e60f84d8c8556c9 | [
"BSD-3-Clause"
] | 1 | 2019-01-25T13:55:25.000Z | 2019-01-25T13:55:25.000Z | #!/usr/bin/env python
#
# Copyright (c) 2014, 2016 Apple Inc. All rights reserved.
# Copyright (c) 2014 University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
import string
from string import Template
from generator import Generator, ucfirst
from objc_generator import ObjCGenerator
from objc_generator_templates import ObjCGeneratorTemplates as ObjCTemplates
log = logging.getLogger('global')
class ObjCInternalHeaderGenerator(ObjCGenerator):
def __init__(self, *args, **kwargs):
ObjCGenerator.__init__(self, *args, **kwargs)
def output_filename(self):
return '%sInternal.h' % self.protocol_name()
def generate_output(self):
headers = set([
'"%s.h"' % self.protocol_name(),
'"%sJSONObjectPrivate.h"' % self.protocol_name(),
'<JavaScriptCore/AugmentableInspectorController.h>',
'<wtf/JSONValues.h>',
])
header_args = {
'includes': '\n'.join(['#import ' + header for header in sorted(headers)]),
}
event_domains = filter(self.should_generate_events_for_domain, self.domains_to_generate())
sections = []
sections.append(self.generate_license())
sections.append(Template(ObjCTemplates.GenericHeaderPrelude).substitute(None, **header_args))
sections.append('\n\n'.join(filter(None, map(self._generate_event_dispatcher_private_interfaces, event_domains))))
sections.append(Template(ObjCTemplates.GenericHeaderPostlude).substitute(None, **header_args))
return '\n\n'.join(sections)
def _generate_event_dispatcher_private_interfaces(self, domain):
lines = []
if len(self.events_for_domain(domain)):
objc_name = '%s%sDomainEventDispatcher' % (self.objc_prefix(), domain.domain_name)
lines.append('@interface %s (Private)' % objc_name)
lines.append('- (instancetype)initWithController:(Inspector::AugmentableInspectorController*)controller;')
lines.append('@end')
return '\n'.join(lines)
| 44.306667 | 122 | 0.723443 |
0e68c6a2505b4e2a02ff00f86229c9fe27b62016 | 12,793 | py | Python | forest/main.py | kaedonkers/forest | d087636a7eaf09098f245aa2b83e25e98d29f6b7 | [
"BSD-3-Clause"
] | null | null | null | forest/main.py | kaedonkers/forest | d087636a7eaf09098f245aa2b83e25e98d29f6b7 | [
"BSD-3-Clause"
] | null | null | null | forest/main.py | kaedonkers/forest | d087636a7eaf09098f245aa2b83e25e98d29f6b7 | [
"BSD-3-Clause"
] | null | null | null | import bokeh.plotting
import bokeh.models
import bokeh.events
import bokeh.colors
import os
from forest import _profile as profile
from forest import (
drivers,
dimension,
screen,
tools,
series,
data,
geo,
colors,
layers,
db,
keys,
plugin,
presets,
redux,
rx,
navigate,
parse_args)
import forest.app
import forest.actions
import forest.components
import forest.components.borders
import forest.components.title
from forest.components import tiles, html_ready
import forest.config as cfg
import forest.middlewares as mws
import forest.gallery
from forest.db.util import autolabel
def map_figure(x_range, y_range):
"""Adjust Figure settings to present web map tiles"""
figure = bokeh.plotting.figure(
x_range=x_range,
y_range=y_range,
x_axis_type="mercator",
y_axis_type="mercator",
active_scroll="wheel_zoom")
figure.axis.visible = False
figure.toolbar.logo = None
figure.toolbar_location = None
figure.min_border = 0
return figure
def configure(argv=None):
args = parse_args.parse_args(argv)
data.AUTO_SHUTDOWN = args.auto_shutdown
if len(args.files) > 0:
if args.config_file is not None:
raise Exception('--config-file and [FILE [FILE ...]] not compatible')
config = cfg.from_files(args.files, args.file_type)
else:
config = cfg.Config.load(
args.config_file,
variables=cfg.combine_variables(
os.environ,
args.variables))
return config
def main(argv=None):
config = configure(argv=argv)
# Feature toggles
if "feature" in config.plugins:
features = plugin.call(config.plugins["feature"].entry_point)
else:
features = config.features
data.FEATURE_FLAGS = features
# Full screen map
viewport = config.default_viewport
x_range, y_range = geo.web_mercator(
viewport.lon_range,
viewport.lat_range)
figure = map_figure(x_range, y_range)
figures = [figure]
for _ in range(2):
f = map_figure(figure.x_range, figure.y_range)
figures.append(f)
figure_row = layers.FigureRow(figures)
color_mapper = bokeh.models.LinearColorMapper(
low=0,
high=1,
palette=bokeh.palettes.Plasma[256])
# Convert config to datasets
datasets = {}
datasets_by_pattern = {}
label_to_pattern = {}
for group, dataset in zip(config.file_groups, config.datasets):
datasets[group.label] = dataset
datasets_by_pattern[group.pattern] = dataset
label_to_pattern[group.label] = group.pattern
layers_ui = layers.LayersUI()
# Add optional sub-navigators
sub_navigators = {
key: dataset.navigator()
for key, dataset in datasets_by_pattern.items()
if hasattr(dataset, "navigator")
}
navigator = navigate.Navigator(sub_navigators)
middlewares = [
keys.navigate,
db.InverseCoordinate("pressure"),
db.next_previous,
db.Controls(navigator), # TODO: Deprecate this middleware
colors.palettes,
colors.middleware(),
presets.Middleware(presets.proxy_storage(config.presets_file)),
presets.middleware,
layers.middleware,
navigator,
mws.echo,
]
store = redux.Store(
forest.reducer,
middlewares=middlewares)
app = forest.app.Application()
app.add_component(forest.components.title.Title())
# Coastlines, borders, lakes and disputed borders
view = forest.components.borders.View()
for figure in figures:
view.add_figure(figure)
view.connect(store)
border_ui = forest.components.borders.UI()
border_ui.connect(store)
# Colorbar user interface
component = forest.components.ColorbarUI()
app.add_component(component)
# Add time user interface
if config.defaults.timeui:
component = forest.components.TimeUI()
component.layout = bokeh.layouts.row(component.layout, name="time")
app.add_component(component)
# Connect MapView orchestration to store
opacity_slider = forest.layers.OpacitySlider()
source_limits = colors.SourceLimits().connect(store)
factory_class = forest.layers.factory(color_mapper,
figures,
source_limits,
opacity_slider)
gallery = forest.gallery.Gallery.map_view(datasets, factory_class)
gallery.connect(store)
# Connect layers controls
layers_ui.add_subscriber(store.dispatch)
layers_ui.connect(store)
# Connect tools controls
display_names = {
"time_series": "Display Time Series",
"profile": "Display Profile"
}
available_features = {k: display_names[k]
for k in display_names.keys() if data.FEATURE_FLAGS[k]}
tools_panel = tools.ToolsPanel(available_features)
tools_panel.connect(store)
# Navbar components
navbar = Navbar(show_diagram_button=len(available_features) > 0)
navbar.connect(store)
# Connect tap listener
tap_listener = screen.TapListener()
tap_listener.connect(store)
# Connect figure controls/views
if config.defaults.figures.ui:
figure_ui = layers.FigureUI(config.defaults.figures.maximum)
figure_ui.connect(store)
figure_row.connect(store)
# Tiling picker
if config.use_web_map_tiles:
tile_picker = forest.components.TilePicker()
for figure in figures:
tile_picker.add_figure(figure)
tile_picker.connect(store)
if not data.FEATURE_FLAGS["multiple_colorbars"]:
# Connect color palette controls
colors.ColorMapperView(color_mapper).connect(store)
color_palette = colors.ColorPalette().connect(store)
# Connect limit controllers to store
user_limits = colors.UserLimits().connect(store)
# Preset
if config.defaults.presetui:
preset_ui = presets.PresetUI().connect(store)
# Connect navigation controls
controls = db.ControlView()
controls.connect(store)
# Add support for a modal dialogue
if data.FEATURE_FLAGS["multiple_colorbars"]:
view = forest.components.modal.Tabbed()
else:
view = forest.components.modal.Default()
modal = forest.components.Modal(view=view)
modal.connect(store)
# Connect components to Store
app.connect(store)
# Set initial state
store.dispatch(forest.actions.set_state(config.state).to_dict())
# Pre-select menu choices (if any)
for pattern, _ in sub_navigators.items():
state = db.initial_state(navigator, pattern=pattern)
store.dispatch(forest.actions.update_state(state).to_dict())
break
# Set default time series visibility
store.dispatch(tools.on_toggle_tool("time_series", False))
# Set default profile visibility
store.dispatch(tools.on_toggle_tool("profile", False))
# Set top-level navigation
store.dispatch(db.set_value("patterns", config.patterns))
# Pre-select first map_view layer
for label, dataset in datasets.items():
pattern = label_to_pattern[label]
for variable in navigator.variables(pattern):
spec = {"label": label,
"dataset": label,
"variable": variable,
"active": [0]}
store.dispatch(forest.layers.save_layer(0, spec))
break
break
# Set variable dimensions (needed by modal dialogue)
for label, dataset in datasets.items():
pattern = label_to_pattern[label]
values = navigator.variables(pattern)
store.dispatch(dimension.set_variables(label, values))
# Organise controls/settings
layouts = {}
layouts["controls"] = []
if config.defaults.figures.ui:
layouts["controls"] += [
bokeh.models.Div(text="Layout:"),
figure_ui.layout]
layouts["controls"] += [
bokeh.models.Div(text="Navigate:"),
controls.layout,
bokeh.models.Div(text="Compare:"),
layers_ui.layout
]
layouts["settings"] = [
bokeh.models.Div(text="Borders, coastlines and lakes:"),
border_ui.layout,
opacity_slider.layout,
]
if not data.FEATURE_FLAGS["multiple_colorbars"]:
layouts["settings"].append(color_palette.layout)
layouts["settings"].append(user_limits.layout)
if config.defaults.presetui:
layouts["settings"].append(preset_ui.layout)
if config.use_web_map_tiles:
layouts["settings"].append(bokeh.models.Div(text="Tiles:"))
layouts["settings"].append(tile_picker.layout)
tabs = bokeh.models.Tabs(tabs=[
bokeh.models.Panel(
child=bokeh.layouts.column(*layouts["controls"]),
title="Control"
),
bokeh.models.Panel(
child=bokeh.layouts.column(*layouts["settings"]),
title="Settings")
])
tool_figures = {}
if data.FEATURE_FLAGS["time_series"]:
# Series sub-figure widget
series_figure = bokeh.plotting.figure(
plot_width=400,
plot_height=200,
x_axis_type="datetime",
toolbar_location=None,
border_fill_alpha=0)
series_figure.toolbar.logo = None
gallery = forest.gallery.Gallery.series_view(datasets,
series_figure)
gallery.connect(store)
tool_figures["series_figure"] = series_figure
if data.FEATURE_FLAGS["profile"]:
# Profile sub-figure widget
profile_figure = bokeh.plotting.figure(
plot_width=300,
plot_height=450,
toolbar_location=None,
border_fill_alpha=0)
profile_figure.toolbar.logo = None
profile_figure.y_range.flipped = True
gallery = forest.gallery.Gallery.profile_view(datasets,
profile_figure)
gallery.connect(store)
tool_figures["profile_figure"] = profile_figure
tool_layout = tools.ToolLayout(**tool_figures)
tool_layout.connect(store)
for f in figures:
f.on_event(bokeh.events.Tap, tap_listener.update_xy)
marker = screen.MarkDraw(f).connect(store)
control_root = bokeh.layouts.column(
tabs,
name="controls")
# Add key press support
key_press = keys.KeyPress()
key_press.add_subscriber(store.dispatch)
# Add HTML ready support
obj = html_ready.HTMLReady(key_press.hidden_button)
obj.connect(store)
document = bokeh.plotting.curdoc()
document.title = "FOREST"
document.add_root(control_root)
document.add_root(
bokeh.layouts.column(
tools_panel.layout,
tool_layout.layout,
width=400,
name="series"))
for root in navbar.roots:
document.add_root(root)
for root in app.roots:
document.add_root(root)
document.add_root(figure_row.layout)
document.add_root(key_press.hidden_button)
document.add_root(modal.layout)
class Navbar:
"""Collection of navbar components"""
def __init__(self, show_diagram_button=True):
self.headline = forest.components.Headline()
self.headline.layout.name = "headline"
self.buttons = {}
# Add button to control left drawer
key = "sidenav_button"
self.buttons[key] = bokeh.models.Button(
label="Settings",
name=key)
custom_js = bokeh.models.CustomJS(code="""
openId("sidenav");
""")
self.buttons[key].js_on_click(custom_js)
# Add button to control right drawer
key = "diagrams_button"
self.buttons[key] = bokeh.models.Button(
label="Diagrams",
css_classes=["float-right"],
name=key)
custom_js = bokeh.models.CustomJS(code="""
openId("diagrams");
""")
self.buttons[key].js_on_click(custom_js)
roots = [
self.buttons["sidenav_button"],
self.headline.layout,
]
if show_diagram_button:
roots.append(self.buttons["diagrams_button"])
self.roots = roots
def connect(self, store):
self.headline.connect(store)
def any_none(obj, attrs):
return any([getattr(obj, x) is None for x in attrs])
if __name__.startswith("bokeh"):
main()
| 30.678657 | 81 | 0.628234 |
ca059fb7bdd7ac838d72f1ce9aa23ec2448608ff | 1,215 | py | Python | python_resumable/models/chunk.py | akaushik759/python-resumable | 0c3a6d908b2c850ea45bfe2a5434ef3cacaa9c51 | [
"MIT"
] | 6 | 2017-10-19T20:27:46.000Z | 2020-02-24T09:26:25.000Z | python_resumable/models/chunk.py | akaushik759/python-resumable | 0c3a6d908b2c850ea45bfe2a5434ef3cacaa9c51 | [
"MIT"
] | 1 | 2020-09-10T13:30:33.000Z | 2020-09-10T13:30:33.000Z | python_resumable/models/chunk.py | akaushik759/python-resumable | 0c3a6d908b2c850ea45bfe2a5434ef3cacaa9c51 | [
"MIT"
] | 2 | 2020-09-24T03:41:46.000Z | 2020-10-15T17:52:09.000Z | from os import path
from base64 import decodebytes
class Chunk(object):
'''
Contains all the chunk info.
Attributes:
chunk_number
chunk_data
chunk_name
chunk_path
chunk_name is (repo_dict['file_id'] + '.' +
resumable_dict['resumableChunkNumber'])
'''
def __init__(self, chunk_dict):
self.chunk_number = chunk_dict['chunk_number']
self.chunk_data = chunk_dict['chunk_data']
self.chunk_name = chunk_dict['chunk_name']
self.chunk_path = chunk_dict['chunk_path']
def save(self):
'''
This writes chunk data to disk, while decoding it.
'''
self.chunk_data = self.chunk_data.encode()
chunk_file = open(path.join(self.chunk_path, self.chunk_name))
chunk_file.write(decodebytes(self.chunk_data))
def exists(self):
'''
This checks if chunk already exists or not.
'''
return path.exists(path.join(self.chunk_path, self.chunk_name))
class FlaskChunk(Chunk):
def save(self):
'''
This writes flask file object to disk.
'''
self.chunk_data.save(path.join(self.chunk_path, self.chunk_name))
| 26.413043 | 73 | 0.61893 |
36d218598720ae23c4f20bb0da8e17393d85f5bf | 410 | py | Python | psydac/__init__.py | mayuri-dhote/psydac | 01ddbe2d049a599684c45060912d01c2658160a3 | [
"MIT"
] | 20 | 2019-07-30T12:37:57.000Z | 2022-03-09T11:35:04.000Z | psydac/__init__.py | mayuri-dhote/psydac | 01ddbe2d049a599684c45060912d01c2658160a3 | [
"MIT"
] | 98 | 2019-04-01T16:32:27.000Z | 2022-03-21T19:30:35.000Z | psydac/__init__.py | mayuri-dhote/psydac | 01ddbe2d049a599684c45060912d01c2658160a3 | [
"MIT"
] | 7 | 2019-10-03T03:49:47.000Z | 2022-03-01T09:11:49.000Z | # -*- coding: UTF-8 -*-
__all__ = ['__version__', 'api', 'cad', 'core', 'ddm', 'feec', 'fem',
'linalg', 'mapping', 'utilities']
from psydac.version import __version__
from psydac import api
from psydac import cad
from psydac import core
from psydac import ddm
from psydac import feec
from psydac import fem
from psydac import linalg
from psydac import mapping
from psydac import utilities
| 25.625 | 73 | 0.707317 |
07fad8996ad382fce895e3c17d8abd9a53f79850 | 112,633 | py | Python | sdk/python/pulumi_aws_native/cloudfront/outputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 29 | 2021-09-30T19:32:07.000Z | 2022-03-22T21:06:08.000Z | sdk/python/pulumi_aws_native/cloudfront/outputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 232 | 2021-09-30T19:26:26.000Z | 2022-03-31T23:22:06.000Z | sdk/python/pulumi_aws_native/cloudfront/outputs.py | AaronFriel/pulumi-aws-native | 5621690373ac44accdbd20b11bae3be1baf022d1 | [
"Apache-2.0"
] | 4 | 2021-11-10T19:42:01.000Z | 2022-02-05T10:15:49.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'CachePolicyConfig',
'CachePolicyCookiesConfig',
'CachePolicyHeadersConfig',
'CachePolicyParametersInCacheKeyAndForwardedToOrigin',
'CachePolicyQueryStringsConfig',
'CloudFrontOriginAccessIdentityConfig',
'DistributionCacheBehavior',
'DistributionConfig',
'DistributionCookies',
'DistributionCustomErrorResponse',
'DistributionCustomOriginConfig',
'DistributionDefaultCacheBehavior',
'DistributionForwardedValues',
'DistributionFunctionAssociation',
'DistributionGeoRestriction',
'DistributionLambdaFunctionAssociation',
'DistributionLegacyCustomOrigin',
'DistributionLegacyS3Origin',
'DistributionLogging',
'DistributionOrigin',
'DistributionOriginCustomHeader',
'DistributionOriginGroup',
'DistributionOriginGroupFailoverCriteria',
'DistributionOriginGroupMember',
'DistributionOriginGroupMembers',
'DistributionOriginGroups',
'DistributionOriginShield',
'DistributionRestrictions',
'DistributionS3OriginConfig',
'DistributionStatusCodes',
'DistributionTag',
'DistributionViewerCertificate',
'FunctionConfig',
'FunctionMetadata',
'KeyGroupConfig',
'OriginRequestPolicyConfig',
'OriginRequestPolicyCookiesConfig',
'OriginRequestPolicyHeadersConfig',
'OriginRequestPolicyQueryStringsConfig',
'PublicKeyConfig',
'RealtimeLogConfigEndPoint',
'RealtimeLogConfigKinesisStreamConfig',
'ResponseHeadersPolicyAccessControlAllowHeaders',
'ResponseHeadersPolicyAccessControlAllowMethods',
'ResponseHeadersPolicyAccessControlAllowOrigins',
'ResponseHeadersPolicyAccessControlExposeHeaders',
'ResponseHeadersPolicyConfig',
'ResponseHeadersPolicyContentSecurityPolicy',
'ResponseHeadersPolicyContentTypeOptions',
'ResponseHeadersPolicyCorsConfig',
'ResponseHeadersPolicyCustomHeader',
'ResponseHeadersPolicyCustomHeadersConfig',
'ResponseHeadersPolicyFrameOptions',
'ResponseHeadersPolicyReferrerPolicy',
'ResponseHeadersPolicySecurityHeadersConfig',
'ResponseHeadersPolicyStrictTransportSecurity',
'ResponseHeadersPolicyXSSProtection',
'StreamingDistributionConfig',
'StreamingDistributionLogging',
'StreamingDistributionS3Origin',
'StreamingDistributionTag',
'StreamingDistributionTrustedSigners',
]
@pulumi.output_type
class CachePolicyConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "defaultTTL":
suggest = "default_ttl"
elif key == "maxTTL":
suggest = "max_ttl"
elif key == "minTTL":
suggest = "min_ttl"
elif key == "parametersInCacheKeyAndForwardedToOrigin":
suggest = "parameters_in_cache_key_and_forwarded_to_origin"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CachePolicyConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CachePolicyConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CachePolicyConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
default_ttl: float,
max_ttl: float,
min_ttl: float,
name: str,
parameters_in_cache_key_and_forwarded_to_origin: 'outputs.CachePolicyParametersInCacheKeyAndForwardedToOrigin',
comment: Optional[str] = None):
pulumi.set(__self__, "default_ttl", default_ttl)
pulumi.set(__self__, "max_ttl", max_ttl)
pulumi.set(__self__, "min_ttl", min_ttl)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "parameters_in_cache_key_and_forwarded_to_origin", parameters_in_cache_key_and_forwarded_to_origin)
if comment is not None:
pulumi.set(__self__, "comment", comment)
@property
@pulumi.getter(name="defaultTTL")
def default_ttl(self) -> float:
return pulumi.get(self, "default_ttl")
@property
@pulumi.getter(name="maxTTL")
def max_ttl(self) -> float:
return pulumi.get(self, "max_ttl")
@property
@pulumi.getter(name="minTTL")
def min_ttl(self) -> float:
return pulumi.get(self, "min_ttl")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="parametersInCacheKeyAndForwardedToOrigin")
def parameters_in_cache_key_and_forwarded_to_origin(self) -> 'outputs.CachePolicyParametersInCacheKeyAndForwardedToOrigin':
return pulumi.get(self, "parameters_in_cache_key_and_forwarded_to_origin")
@property
@pulumi.getter
def comment(self) -> Optional[str]:
return pulumi.get(self, "comment")
@pulumi.output_type
class CachePolicyCookiesConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cookieBehavior":
suggest = "cookie_behavior"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CachePolicyCookiesConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CachePolicyCookiesConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CachePolicyCookiesConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cookie_behavior: str,
cookies: Optional[Sequence[str]] = None):
pulumi.set(__self__, "cookie_behavior", cookie_behavior)
if cookies is not None:
pulumi.set(__self__, "cookies", cookies)
@property
@pulumi.getter(name="cookieBehavior")
def cookie_behavior(self) -> str:
return pulumi.get(self, "cookie_behavior")
@property
@pulumi.getter
def cookies(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "cookies")
@pulumi.output_type
class CachePolicyHeadersConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "headerBehavior":
suggest = "header_behavior"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CachePolicyHeadersConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CachePolicyHeadersConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CachePolicyHeadersConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
header_behavior: str,
headers: Optional[Sequence[str]] = None):
pulumi.set(__self__, "header_behavior", header_behavior)
if headers is not None:
pulumi.set(__self__, "headers", headers)
@property
@pulumi.getter(name="headerBehavior")
def header_behavior(self) -> str:
return pulumi.get(self, "header_behavior")
@property
@pulumi.getter
def headers(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "headers")
@pulumi.output_type
class CachePolicyParametersInCacheKeyAndForwardedToOrigin(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cookiesConfig":
suggest = "cookies_config"
elif key == "enableAcceptEncodingGzip":
suggest = "enable_accept_encoding_gzip"
elif key == "headersConfig":
suggest = "headers_config"
elif key == "queryStringsConfig":
suggest = "query_strings_config"
elif key == "enableAcceptEncodingBrotli":
suggest = "enable_accept_encoding_brotli"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CachePolicyParametersInCacheKeyAndForwardedToOrigin. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CachePolicyParametersInCacheKeyAndForwardedToOrigin.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CachePolicyParametersInCacheKeyAndForwardedToOrigin.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cookies_config: 'outputs.CachePolicyCookiesConfig',
enable_accept_encoding_gzip: bool,
headers_config: 'outputs.CachePolicyHeadersConfig',
query_strings_config: 'outputs.CachePolicyQueryStringsConfig',
enable_accept_encoding_brotli: Optional[bool] = None):
pulumi.set(__self__, "cookies_config", cookies_config)
pulumi.set(__self__, "enable_accept_encoding_gzip", enable_accept_encoding_gzip)
pulumi.set(__self__, "headers_config", headers_config)
pulumi.set(__self__, "query_strings_config", query_strings_config)
if enable_accept_encoding_brotli is not None:
pulumi.set(__self__, "enable_accept_encoding_brotli", enable_accept_encoding_brotli)
@property
@pulumi.getter(name="cookiesConfig")
def cookies_config(self) -> 'outputs.CachePolicyCookiesConfig':
return pulumi.get(self, "cookies_config")
@property
@pulumi.getter(name="enableAcceptEncodingGzip")
def enable_accept_encoding_gzip(self) -> bool:
return pulumi.get(self, "enable_accept_encoding_gzip")
@property
@pulumi.getter(name="headersConfig")
def headers_config(self) -> 'outputs.CachePolicyHeadersConfig':
return pulumi.get(self, "headers_config")
@property
@pulumi.getter(name="queryStringsConfig")
def query_strings_config(self) -> 'outputs.CachePolicyQueryStringsConfig':
return pulumi.get(self, "query_strings_config")
@property
@pulumi.getter(name="enableAcceptEncodingBrotli")
def enable_accept_encoding_brotli(self) -> Optional[bool]:
return pulumi.get(self, "enable_accept_encoding_brotli")
@pulumi.output_type
class CachePolicyQueryStringsConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "queryStringBehavior":
suggest = "query_string_behavior"
elif key == "queryStrings":
suggest = "query_strings"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CachePolicyQueryStringsConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CachePolicyQueryStringsConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CachePolicyQueryStringsConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
query_string_behavior: str,
query_strings: Optional[Sequence[str]] = None):
pulumi.set(__self__, "query_string_behavior", query_string_behavior)
if query_strings is not None:
pulumi.set(__self__, "query_strings", query_strings)
@property
@pulumi.getter(name="queryStringBehavior")
def query_string_behavior(self) -> str:
return pulumi.get(self, "query_string_behavior")
@property
@pulumi.getter(name="queryStrings")
def query_strings(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "query_strings")
@pulumi.output_type
class CloudFrontOriginAccessIdentityConfig(dict):
def __init__(__self__, *,
comment: str):
pulumi.set(__self__, "comment", comment)
@property
@pulumi.getter
def comment(self) -> str:
return pulumi.get(self, "comment")
@pulumi.output_type
class DistributionCacheBehavior(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "pathPattern":
suggest = "path_pattern"
elif key == "targetOriginId":
suggest = "target_origin_id"
elif key == "viewerProtocolPolicy":
suggest = "viewer_protocol_policy"
elif key == "allowedMethods":
suggest = "allowed_methods"
elif key == "cachePolicyId":
suggest = "cache_policy_id"
elif key == "cachedMethods":
suggest = "cached_methods"
elif key == "defaultTTL":
suggest = "default_ttl"
elif key == "fieldLevelEncryptionId":
suggest = "field_level_encryption_id"
elif key == "forwardedValues":
suggest = "forwarded_values"
elif key == "functionAssociations":
suggest = "function_associations"
elif key == "lambdaFunctionAssociations":
suggest = "lambda_function_associations"
elif key == "maxTTL":
suggest = "max_ttl"
elif key == "minTTL":
suggest = "min_ttl"
elif key == "originRequestPolicyId":
suggest = "origin_request_policy_id"
elif key == "realtimeLogConfigArn":
suggest = "realtime_log_config_arn"
elif key == "responseHeadersPolicyId":
suggest = "response_headers_policy_id"
elif key == "smoothStreaming":
suggest = "smooth_streaming"
elif key == "trustedKeyGroups":
suggest = "trusted_key_groups"
elif key == "trustedSigners":
suggest = "trusted_signers"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionCacheBehavior. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionCacheBehavior.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionCacheBehavior.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
path_pattern: str,
target_origin_id: str,
viewer_protocol_policy: str,
allowed_methods: Optional[Sequence[str]] = None,
cache_policy_id: Optional[str] = None,
cached_methods: Optional[Sequence[str]] = None,
compress: Optional[bool] = None,
default_ttl: Optional[float] = None,
field_level_encryption_id: Optional[str] = None,
forwarded_values: Optional['outputs.DistributionForwardedValues'] = None,
function_associations: Optional[Sequence['outputs.DistributionFunctionAssociation']] = None,
lambda_function_associations: Optional[Sequence['outputs.DistributionLambdaFunctionAssociation']] = None,
max_ttl: Optional[float] = None,
min_ttl: Optional[float] = None,
origin_request_policy_id: Optional[str] = None,
realtime_log_config_arn: Optional[str] = None,
response_headers_policy_id: Optional[str] = None,
smooth_streaming: Optional[bool] = None,
trusted_key_groups: Optional[Sequence[str]] = None,
trusted_signers: Optional[Sequence[str]] = None):
pulumi.set(__self__, "path_pattern", path_pattern)
pulumi.set(__self__, "target_origin_id", target_origin_id)
pulumi.set(__self__, "viewer_protocol_policy", viewer_protocol_policy)
if allowed_methods is not None:
pulumi.set(__self__, "allowed_methods", allowed_methods)
if cache_policy_id is not None:
pulumi.set(__self__, "cache_policy_id", cache_policy_id)
if cached_methods is not None:
pulumi.set(__self__, "cached_methods", cached_methods)
if compress is not None:
pulumi.set(__self__, "compress", compress)
if default_ttl is not None:
pulumi.set(__self__, "default_ttl", default_ttl)
if field_level_encryption_id is not None:
pulumi.set(__self__, "field_level_encryption_id", field_level_encryption_id)
if forwarded_values is not None:
pulumi.set(__self__, "forwarded_values", forwarded_values)
if function_associations is not None:
pulumi.set(__self__, "function_associations", function_associations)
if lambda_function_associations is not None:
pulumi.set(__self__, "lambda_function_associations", lambda_function_associations)
if max_ttl is not None:
pulumi.set(__self__, "max_ttl", max_ttl)
if min_ttl is not None:
pulumi.set(__self__, "min_ttl", min_ttl)
if origin_request_policy_id is not None:
pulumi.set(__self__, "origin_request_policy_id", origin_request_policy_id)
if realtime_log_config_arn is not None:
pulumi.set(__self__, "realtime_log_config_arn", realtime_log_config_arn)
if response_headers_policy_id is not None:
pulumi.set(__self__, "response_headers_policy_id", response_headers_policy_id)
if smooth_streaming is not None:
pulumi.set(__self__, "smooth_streaming", smooth_streaming)
if trusted_key_groups is not None:
pulumi.set(__self__, "trusted_key_groups", trusted_key_groups)
if trusted_signers is not None:
pulumi.set(__self__, "trusted_signers", trusted_signers)
@property
@pulumi.getter(name="pathPattern")
def path_pattern(self) -> str:
return pulumi.get(self, "path_pattern")
@property
@pulumi.getter(name="targetOriginId")
def target_origin_id(self) -> str:
return pulumi.get(self, "target_origin_id")
@property
@pulumi.getter(name="viewerProtocolPolicy")
def viewer_protocol_policy(self) -> str:
return pulumi.get(self, "viewer_protocol_policy")
@property
@pulumi.getter(name="allowedMethods")
def allowed_methods(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "allowed_methods")
@property
@pulumi.getter(name="cachePolicyId")
def cache_policy_id(self) -> Optional[str]:
return pulumi.get(self, "cache_policy_id")
@property
@pulumi.getter(name="cachedMethods")
def cached_methods(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "cached_methods")
@property
@pulumi.getter
def compress(self) -> Optional[bool]:
return pulumi.get(self, "compress")
@property
@pulumi.getter(name="defaultTTL")
def default_ttl(self) -> Optional[float]:
return pulumi.get(self, "default_ttl")
@property
@pulumi.getter(name="fieldLevelEncryptionId")
def field_level_encryption_id(self) -> Optional[str]:
return pulumi.get(self, "field_level_encryption_id")
@property
@pulumi.getter(name="forwardedValues")
def forwarded_values(self) -> Optional['outputs.DistributionForwardedValues']:
return pulumi.get(self, "forwarded_values")
@property
@pulumi.getter(name="functionAssociations")
def function_associations(self) -> Optional[Sequence['outputs.DistributionFunctionAssociation']]:
return pulumi.get(self, "function_associations")
@property
@pulumi.getter(name="lambdaFunctionAssociations")
def lambda_function_associations(self) -> Optional[Sequence['outputs.DistributionLambdaFunctionAssociation']]:
return pulumi.get(self, "lambda_function_associations")
@property
@pulumi.getter(name="maxTTL")
def max_ttl(self) -> Optional[float]:
return pulumi.get(self, "max_ttl")
@property
@pulumi.getter(name="minTTL")
def min_ttl(self) -> Optional[float]:
return pulumi.get(self, "min_ttl")
@property
@pulumi.getter(name="originRequestPolicyId")
def origin_request_policy_id(self) -> Optional[str]:
return pulumi.get(self, "origin_request_policy_id")
@property
@pulumi.getter(name="realtimeLogConfigArn")
def realtime_log_config_arn(self) -> Optional[str]:
return pulumi.get(self, "realtime_log_config_arn")
@property
@pulumi.getter(name="responseHeadersPolicyId")
def response_headers_policy_id(self) -> Optional[str]:
return pulumi.get(self, "response_headers_policy_id")
@property
@pulumi.getter(name="smoothStreaming")
def smooth_streaming(self) -> Optional[bool]:
return pulumi.get(self, "smooth_streaming")
@property
@pulumi.getter(name="trustedKeyGroups")
def trusted_key_groups(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "trusted_key_groups")
@property
@pulumi.getter(name="trustedSigners")
def trusted_signers(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "trusted_signers")
@pulumi.output_type
class DistributionConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cNAMEs":
suggest = "c_names"
elif key == "cacheBehaviors":
suggest = "cache_behaviors"
elif key == "customErrorResponses":
suggest = "custom_error_responses"
elif key == "customOrigin":
suggest = "custom_origin"
elif key == "defaultCacheBehavior":
suggest = "default_cache_behavior"
elif key == "defaultRootObject":
suggest = "default_root_object"
elif key == "httpVersion":
suggest = "http_version"
elif key == "iPV6Enabled":
suggest = "i_pv6_enabled"
elif key == "originGroups":
suggest = "origin_groups"
elif key == "priceClass":
suggest = "price_class"
elif key == "s3Origin":
suggest = "s3_origin"
elif key == "viewerCertificate":
suggest = "viewer_certificate"
elif key == "webACLId":
suggest = "web_acl_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
aliases: Optional[Sequence[str]] = None,
c_names: Optional[Sequence[str]] = None,
cache_behaviors: Optional[Sequence['outputs.DistributionCacheBehavior']] = None,
comment: Optional[str] = None,
custom_error_responses: Optional[Sequence['outputs.DistributionCustomErrorResponse']] = None,
custom_origin: Optional['outputs.DistributionLegacyCustomOrigin'] = None,
default_cache_behavior: Optional['outputs.DistributionDefaultCacheBehavior'] = None,
default_root_object: Optional[str] = None,
http_version: Optional[str] = None,
i_pv6_enabled: Optional[bool] = None,
logging: Optional['outputs.DistributionLogging'] = None,
origin_groups: Optional['outputs.DistributionOriginGroups'] = None,
origins: Optional[Sequence['outputs.DistributionOrigin']] = None,
price_class: Optional[str] = None,
restrictions: Optional['outputs.DistributionRestrictions'] = None,
s3_origin: Optional['outputs.DistributionLegacyS3Origin'] = None,
viewer_certificate: Optional['outputs.DistributionViewerCertificate'] = None,
web_acl_id: Optional[str] = None):
pulumi.set(__self__, "enabled", enabled)
if aliases is not None:
pulumi.set(__self__, "aliases", aliases)
if c_names is not None:
pulumi.set(__self__, "c_names", c_names)
if cache_behaviors is not None:
pulumi.set(__self__, "cache_behaviors", cache_behaviors)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if custom_error_responses is not None:
pulumi.set(__self__, "custom_error_responses", custom_error_responses)
if custom_origin is not None:
pulumi.set(__self__, "custom_origin", custom_origin)
if default_cache_behavior is not None:
pulumi.set(__self__, "default_cache_behavior", default_cache_behavior)
if default_root_object is not None:
pulumi.set(__self__, "default_root_object", default_root_object)
if http_version is not None:
pulumi.set(__self__, "http_version", http_version)
if i_pv6_enabled is not None:
pulumi.set(__self__, "i_pv6_enabled", i_pv6_enabled)
if logging is not None:
pulumi.set(__self__, "logging", logging)
if origin_groups is not None:
pulumi.set(__self__, "origin_groups", origin_groups)
if origins is not None:
pulumi.set(__self__, "origins", origins)
if price_class is not None:
pulumi.set(__self__, "price_class", price_class)
if restrictions is not None:
pulumi.set(__self__, "restrictions", restrictions)
if s3_origin is not None:
pulumi.set(__self__, "s3_origin", s3_origin)
if viewer_certificate is not None:
pulumi.set(__self__, "viewer_certificate", viewer_certificate)
if web_acl_id is not None:
pulumi.set(__self__, "web_acl_id", web_acl_id)
@property
@pulumi.getter
def enabled(self) -> bool:
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def aliases(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "aliases")
@property
@pulumi.getter(name="cNAMEs")
def c_names(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "c_names")
@property
@pulumi.getter(name="cacheBehaviors")
def cache_behaviors(self) -> Optional[Sequence['outputs.DistributionCacheBehavior']]:
return pulumi.get(self, "cache_behaviors")
@property
@pulumi.getter
def comment(self) -> Optional[str]:
return pulumi.get(self, "comment")
@property
@pulumi.getter(name="customErrorResponses")
def custom_error_responses(self) -> Optional[Sequence['outputs.DistributionCustomErrorResponse']]:
return pulumi.get(self, "custom_error_responses")
@property
@pulumi.getter(name="customOrigin")
def custom_origin(self) -> Optional['outputs.DistributionLegacyCustomOrigin']:
return pulumi.get(self, "custom_origin")
@property
@pulumi.getter(name="defaultCacheBehavior")
def default_cache_behavior(self) -> Optional['outputs.DistributionDefaultCacheBehavior']:
return pulumi.get(self, "default_cache_behavior")
@property
@pulumi.getter(name="defaultRootObject")
def default_root_object(self) -> Optional[str]:
return pulumi.get(self, "default_root_object")
@property
@pulumi.getter(name="httpVersion")
def http_version(self) -> Optional[str]:
return pulumi.get(self, "http_version")
@property
@pulumi.getter(name="iPV6Enabled")
def i_pv6_enabled(self) -> Optional[bool]:
return pulumi.get(self, "i_pv6_enabled")
@property
@pulumi.getter
def logging(self) -> Optional['outputs.DistributionLogging']:
return pulumi.get(self, "logging")
@property
@pulumi.getter(name="originGroups")
def origin_groups(self) -> Optional['outputs.DistributionOriginGroups']:
return pulumi.get(self, "origin_groups")
@property
@pulumi.getter
def origins(self) -> Optional[Sequence['outputs.DistributionOrigin']]:
return pulumi.get(self, "origins")
@property
@pulumi.getter(name="priceClass")
def price_class(self) -> Optional[str]:
return pulumi.get(self, "price_class")
@property
@pulumi.getter
def restrictions(self) -> Optional['outputs.DistributionRestrictions']:
return pulumi.get(self, "restrictions")
@property
@pulumi.getter(name="s3Origin")
def s3_origin(self) -> Optional['outputs.DistributionLegacyS3Origin']:
return pulumi.get(self, "s3_origin")
@property
@pulumi.getter(name="viewerCertificate")
def viewer_certificate(self) -> Optional['outputs.DistributionViewerCertificate']:
return pulumi.get(self, "viewer_certificate")
@property
@pulumi.getter(name="webACLId")
def web_acl_id(self) -> Optional[str]:
return pulumi.get(self, "web_acl_id")
@pulumi.output_type
class DistributionCookies(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "whitelistedNames":
suggest = "whitelisted_names"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionCookies. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionCookies.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionCookies.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
forward: str,
whitelisted_names: Optional[Sequence[str]] = None):
pulumi.set(__self__, "forward", forward)
if whitelisted_names is not None:
pulumi.set(__self__, "whitelisted_names", whitelisted_names)
@property
@pulumi.getter
def forward(self) -> str:
return pulumi.get(self, "forward")
@property
@pulumi.getter(name="whitelistedNames")
def whitelisted_names(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "whitelisted_names")
@pulumi.output_type
class DistributionCustomErrorResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "errorCode":
suggest = "error_code"
elif key == "errorCachingMinTTL":
suggest = "error_caching_min_ttl"
elif key == "responseCode":
suggest = "response_code"
elif key == "responsePagePath":
suggest = "response_page_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionCustomErrorResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionCustomErrorResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionCustomErrorResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
error_code: int,
error_caching_min_ttl: Optional[float] = None,
response_code: Optional[int] = None,
response_page_path: Optional[str] = None):
pulumi.set(__self__, "error_code", error_code)
if error_caching_min_ttl is not None:
pulumi.set(__self__, "error_caching_min_ttl", error_caching_min_ttl)
if response_code is not None:
pulumi.set(__self__, "response_code", response_code)
if response_page_path is not None:
pulumi.set(__self__, "response_page_path", response_page_path)
@property
@pulumi.getter(name="errorCode")
def error_code(self) -> int:
return pulumi.get(self, "error_code")
@property
@pulumi.getter(name="errorCachingMinTTL")
def error_caching_min_ttl(self) -> Optional[float]:
return pulumi.get(self, "error_caching_min_ttl")
@property
@pulumi.getter(name="responseCode")
def response_code(self) -> Optional[int]:
return pulumi.get(self, "response_code")
@property
@pulumi.getter(name="responsePagePath")
def response_page_path(self) -> Optional[str]:
return pulumi.get(self, "response_page_path")
@pulumi.output_type
class DistributionCustomOriginConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "originProtocolPolicy":
suggest = "origin_protocol_policy"
elif key == "hTTPPort":
suggest = "h_ttp_port"
elif key == "hTTPSPort":
suggest = "h_ttps_port"
elif key == "originKeepaliveTimeout":
suggest = "origin_keepalive_timeout"
elif key == "originReadTimeout":
suggest = "origin_read_timeout"
elif key == "originSSLProtocols":
suggest = "origin_ssl_protocols"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionCustomOriginConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionCustomOriginConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionCustomOriginConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
origin_protocol_policy: str,
h_ttp_port: Optional[int] = None,
h_ttps_port: Optional[int] = None,
origin_keepalive_timeout: Optional[int] = None,
origin_read_timeout: Optional[int] = None,
origin_ssl_protocols: Optional[Sequence[str]] = None):
pulumi.set(__self__, "origin_protocol_policy", origin_protocol_policy)
if h_ttp_port is not None:
pulumi.set(__self__, "h_ttp_port", h_ttp_port)
if h_ttps_port is not None:
pulumi.set(__self__, "h_ttps_port", h_ttps_port)
if origin_keepalive_timeout is not None:
pulumi.set(__self__, "origin_keepalive_timeout", origin_keepalive_timeout)
if origin_read_timeout is not None:
pulumi.set(__self__, "origin_read_timeout", origin_read_timeout)
if origin_ssl_protocols is not None:
pulumi.set(__self__, "origin_ssl_protocols", origin_ssl_protocols)
@property
@pulumi.getter(name="originProtocolPolicy")
def origin_protocol_policy(self) -> str:
return pulumi.get(self, "origin_protocol_policy")
@property
@pulumi.getter(name="hTTPPort")
def h_ttp_port(self) -> Optional[int]:
return pulumi.get(self, "h_ttp_port")
@property
@pulumi.getter(name="hTTPSPort")
def h_ttps_port(self) -> Optional[int]:
return pulumi.get(self, "h_ttps_port")
@property
@pulumi.getter(name="originKeepaliveTimeout")
def origin_keepalive_timeout(self) -> Optional[int]:
return pulumi.get(self, "origin_keepalive_timeout")
@property
@pulumi.getter(name="originReadTimeout")
def origin_read_timeout(self) -> Optional[int]:
return pulumi.get(self, "origin_read_timeout")
@property
@pulumi.getter(name="originSSLProtocols")
def origin_ssl_protocols(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "origin_ssl_protocols")
@pulumi.output_type
class DistributionDefaultCacheBehavior(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "targetOriginId":
suggest = "target_origin_id"
elif key == "viewerProtocolPolicy":
suggest = "viewer_protocol_policy"
elif key == "allowedMethods":
suggest = "allowed_methods"
elif key == "cachePolicyId":
suggest = "cache_policy_id"
elif key == "cachedMethods":
suggest = "cached_methods"
elif key == "defaultTTL":
suggest = "default_ttl"
elif key == "fieldLevelEncryptionId":
suggest = "field_level_encryption_id"
elif key == "forwardedValues":
suggest = "forwarded_values"
elif key == "functionAssociations":
suggest = "function_associations"
elif key == "lambdaFunctionAssociations":
suggest = "lambda_function_associations"
elif key == "maxTTL":
suggest = "max_ttl"
elif key == "minTTL":
suggest = "min_ttl"
elif key == "originRequestPolicyId":
suggest = "origin_request_policy_id"
elif key == "realtimeLogConfigArn":
suggest = "realtime_log_config_arn"
elif key == "responseHeadersPolicyId":
suggest = "response_headers_policy_id"
elif key == "smoothStreaming":
suggest = "smooth_streaming"
elif key == "trustedKeyGroups":
suggest = "trusted_key_groups"
elif key == "trustedSigners":
suggest = "trusted_signers"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionDefaultCacheBehavior. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionDefaultCacheBehavior.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionDefaultCacheBehavior.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
target_origin_id: str,
viewer_protocol_policy: str,
allowed_methods: Optional[Sequence[str]] = None,
cache_policy_id: Optional[str] = None,
cached_methods: Optional[Sequence[str]] = None,
compress: Optional[bool] = None,
default_ttl: Optional[float] = None,
field_level_encryption_id: Optional[str] = None,
forwarded_values: Optional['outputs.DistributionForwardedValues'] = None,
function_associations: Optional[Sequence['outputs.DistributionFunctionAssociation']] = None,
lambda_function_associations: Optional[Sequence['outputs.DistributionLambdaFunctionAssociation']] = None,
max_ttl: Optional[float] = None,
min_ttl: Optional[float] = None,
origin_request_policy_id: Optional[str] = None,
realtime_log_config_arn: Optional[str] = None,
response_headers_policy_id: Optional[str] = None,
smooth_streaming: Optional[bool] = None,
trusted_key_groups: Optional[Sequence[str]] = None,
trusted_signers: Optional[Sequence[str]] = None):
pulumi.set(__self__, "target_origin_id", target_origin_id)
pulumi.set(__self__, "viewer_protocol_policy", viewer_protocol_policy)
if allowed_methods is not None:
pulumi.set(__self__, "allowed_methods", allowed_methods)
if cache_policy_id is not None:
pulumi.set(__self__, "cache_policy_id", cache_policy_id)
if cached_methods is not None:
pulumi.set(__self__, "cached_methods", cached_methods)
if compress is not None:
pulumi.set(__self__, "compress", compress)
if default_ttl is not None:
pulumi.set(__self__, "default_ttl", default_ttl)
if field_level_encryption_id is not None:
pulumi.set(__self__, "field_level_encryption_id", field_level_encryption_id)
if forwarded_values is not None:
pulumi.set(__self__, "forwarded_values", forwarded_values)
if function_associations is not None:
pulumi.set(__self__, "function_associations", function_associations)
if lambda_function_associations is not None:
pulumi.set(__self__, "lambda_function_associations", lambda_function_associations)
if max_ttl is not None:
pulumi.set(__self__, "max_ttl", max_ttl)
if min_ttl is not None:
pulumi.set(__self__, "min_ttl", min_ttl)
if origin_request_policy_id is not None:
pulumi.set(__self__, "origin_request_policy_id", origin_request_policy_id)
if realtime_log_config_arn is not None:
pulumi.set(__self__, "realtime_log_config_arn", realtime_log_config_arn)
if response_headers_policy_id is not None:
pulumi.set(__self__, "response_headers_policy_id", response_headers_policy_id)
if smooth_streaming is not None:
pulumi.set(__self__, "smooth_streaming", smooth_streaming)
if trusted_key_groups is not None:
pulumi.set(__self__, "trusted_key_groups", trusted_key_groups)
if trusted_signers is not None:
pulumi.set(__self__, "trusted_signers", trusted_signers)
@property
@pulumi.getter(name="targetOriginId")
def target_origin_id(self) -> str:
return pulumi.get(self, "target_origin_id")
@property
@pulumi.getter(name="viewerProtocolPolicy")
def viewer_protocol_policy(self) -> str:
return pulumi.get(self, "viewer_protocol_policy")
@property
@pulumi.getter(name="allowedMethods")
def allowed_methods(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "allowed_methods")
@property
@pulumi.getter(name="cachePolicyId")
def cache_policy_id(self) -> Optional[str]:
return pulumi.get(self, "cache_policy_id")
@property
@pulumi.getter(name="cachedMethods")
def cached_methods(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "cached_methods")
@property
@pulumi.getter
def compress(self) -> Optional[bool]:
return pulumi.get(self, "compress")
@property
@pulumi.getter(name="defaultTTL")
def default_ttl(self) -> Optional[float]:
return pulumi.get(self, "default_ttl")
@property
@pulumi.getter(name="fieldLevelEncryptionId")
def field_level_encryption_id(self) -> Optional[str]:
return pulumi.get(self, "field_level_encryption_id")
@property
@pulumi.getter(name="forwardedValues")
def forwarded_values(self) -> Optional['outputs.DistributionForwardedValues']:
return pulumi.get(self, "forwarded_values")
@property
@pulumi.getter(name="functionAssociations")
def function_associations(self) -> Optional[Sequence['outputs.DistributionFunctionAssociation']]:
return pulumi.get(self, "function_associations")
@property
@pulumi.getter(name="lambdaFunctionAssociations")
def lambda_function_associations(self) -> Optional[Sequence['outputs.DistributionLambdaFunctionAssociation']]:
return pulumi.get(self, "lambda_function_associations")
@property
@pulumi.getter(name="maxTTL")
def max_ttl(self) -> Optional[float]:
return pulumi.get(self, "max_ttl")
@property
@pulumi.getter(name="minTTL")
def min_ttl(self) -> Optional[float]:
return pulumi.get(self, "min_ttl")
@property
@pulumi.getter(name="originRequestPolicyId")
def origin_request_policy_id(self) -> Optional[str]:
return pulumi.get(self, "origin_request_policy_id")
@property
@pulumi.getter(name="realtimeLogConfigArn")
def realtime_log_config_arn(self) -> Optional[str]:
return pulumi.get(self, "realtime_log_config_arn")
@property
@pulumi.getter(name="responseHeadersPolicyId")
def response_headers_policy_id(self) -> Optional[str]:
return pulumi.get(self, "response_headers_policy_id")
@property
@pulumi.getter(name="smoothStreaming")
def smooth_streaming(self) -> Optional[bool]:
return pulumi.get(self, "smooth_streaming")
@property
@pulumi.getter(name="trustedKeyGroups")
def trusted_key_groups(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "trusted_key_groups")
@property
@pulumi.getter(name="trustedSigners")
def trusted_signers(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "trusted_signers")
@pulumi.output_type
class DistributionForwardedValues(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "queryString":
suggest = "query_string"
elif key == "queryStringCacheKeys":
suggest = "query_string_cache_keys"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionForwardedValues. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionForwardedValues.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionForwardedValues.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
query_string: bool,
cookies: Optional['outputs.DistributionCookies'] = None,
headers: Optional[Sequence[str]] = None,
query_string_cache_keys: Optional[Sequence[str]] = None):
pulumi.set(__self__, "query_string", query_string)
if cookies is not None:
pulumi.set(__self__, "cookies", cookies)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if query_string_cache_keys is not None:
pulumi.set(__self__, "query_string_cache_keys", query_string_cache_keys)
@property
@pulumi.getter(name="queryString")
def query_string(self) -> bool:
return pulumi.get(self, "query_string")
@property
@pulumi.getter
def cookies(self) -> Optional['outputs.DistributionCookies']:
return pulumi.get(self, "cookies")
@property
@pulumi.getter
def headers(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "headers")
@property
@pulumi.getter(name="queryStringCacheKeys")
def query_string_cache_keys(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "query_string_cache_keys")
@pulumi.output_type
class DistributionFunctionAssociation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eventType":
suggest = "event_type"
elif key == "functionARN":
suggest = "function_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionFunctionAssociation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionFunctionAssociation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionFunctionAssociation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
event_type: Optional[str] = None,
function_arn: Optional[str] = None):
if event_type is not None:
pulumi.set(__self__, "event_type", event_type)
if function_arn is not None:
pulumi.set(__self__, "function_arn", function_arn)
@property
@pulumi.getter(name="eventType")
def event_type(self) -> Optional[str]:
return pulumi.get(self, "event_type")
@property
@pulumi.getter(name="functionARN")
def function_arn(self) -> Optional[str]:
return pulumi.get(self, "function_arn")
@pulumi.output_type
class DistributionGeoRestriction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "restrictionType":
suggest = "restriction_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionGeoRestriction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionGeoRestriction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionGeoRestriction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
restriction_type: str,
locations: Optional[Sequence[str]] = None):
pulumi.set(__self__, "restriction_type", restriction_type)
if locations is not None:
pulumi.set(__self__, "locations", locations)
@property
@pulumi.getter(name="restrictionType")
def restriction_type(self) -> str:
return pulumi.get(self, "restriction_type")
@property
@pulumi.getter
def locations(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "locations")
@pulumi.output_type
class DistributionLambdaFunctionAssociation(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eventType":
suggest = "event_type"
elif key == "includeBody":
suggest = "include_body"
elif key == "lambdaFunctionARN":
suggest = "lambda_function_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionLambdaFunctionAssociation. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionLambdaFunctionAssociation.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionLambdaFunctionAssociation.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
event_type: Optional[str] = None,
include_body: Optional[bool] = None,
lambda_function_arn: Optional[str] = None):
if event_type is not None:
pulumi.set(__self__, "event_type", event_type)
if include_body is not None:
pulumi.set(__self__, "include_body", include_body)
if lambda_function_arn is not None:
pulumi.set(__self__, "lambda_function_arn", lambda_function_arn)
@property
@pulumi.getter(name="eventType")
def event_type(self) -> Optional[str]:
return pulumi.get(self, "event_type")
@property
@pulumi.getter(name="includeBody")
def include_body(self) -> Optional[bool]:
return pulumi.get(self, "include_body")
@property
@pulumi.getter(name="lambdaFunctionARN")
def lambda_function_arn(self) -> Optional[str]:
return pulumi.get(self, "lambda_function_arn")
@pulumi.output_type
class DistributionLegacyCustomOrigin(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dNSName":
suggest = "d_ns_name"
elif key == "originProtocolPolicy":
suggest = "origin_protocol_policy"
elif key == "originSSLProtocols":
suggest = "origin_ssl_protocols"
elif key == "hTTPPort":
suggest = "h_ttp_port"
elif key == "hTTPSPort":
suggest = "h_ttps_port"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionLegacyCustomOrigin. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionLegacyCustomOrigin.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionLegacyCustomOrigin.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
d_ns_name: str,
origin_protocol_policy: str,
origin_ssl_protocols: Sequence[str],
h_ttp_port: Optional[int] = None,
h_ttps_port: Optional[int] = None):
pulumi.set(__self__, "d_ns_name", d_ns_name)
pulumi.set(__self__, "origin_protocol_policy", origin_protocol_policy)
pulumi.set(__self__, "origin_ssl_protocols", origin_ssl_protocols)
if h_ttp_port is not None:
pulumi.set(__self__, "h_ttp_port", h_ttp_port)
if h_ttps_port is not None:
pulumi.set(__self__, "h_ttps_port", h_ttps_port)
@property
@pulumi.getter(name="dNSName")
def d_ns_name(self) -> str:
return pulumi.get(self, "d_ns_name")
@property
@pulumi.getter(name="originProtocolPolicy")
def origin_protocol_policy(self) -> str:
return pulumi.get(self, "origin_protocol_policy")
@property
@pulumi.getter(name="originSSLProtocols")
def origin_ssl_protocols(self) -> Sequence[str]:
return pulumi.get(self, "origin_ssl_protocols")
@property
@pulumi.getter(name="hTTPPort")
def h_ttp_port(self) -> Optional[int]:
return pulumi.get(self, "h_ttp_port")
@property
@pulumi.getter(name="hTTPSPort")
def h_ttps_port(self) -> Optional[int]:
return pulumi.get(self, "h_ttps_port")
@pulumi.output_type
class DistributionLegacyS3Origin(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dNSName":
suggest = "d_ns_name"
elif key == "originAccessIdentity":
suggest = "origin_access_identity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionLegacyS3Origin. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionLegacyS3Origin.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionLegacyS3Origin.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
d_ns_name: str,
origin_access_identity: Optional[str] = None):
pulumi.set(__self__, "d_ns_name", d_ns_name)
if origin_access_identity is not None:
pulumi.set(__self__, "origin_access_identity", origin_access_identity)
@property
@pulumi.getter(name="dNSName")
def d_ns_name(self) -> str:
return pulumi.get(self, "d_ns_name")
@property
@pulumi.getter(name="originAccessIdentity")
def origin_access_identity(self) -> Optional[str]:
return pulumi.get(self, "origin_access_identity")
@pulumi.output_type
class DistributionLogging(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "includeCookies":
suggest = "include_cookies"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionLogging. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionLogging.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionLogging.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket: str,
include_cookies: Optional[bool] = None,
prefix: Optional[str] = None):
pulumi.set(__self__, "bucket", bucket)
if include_cookies is not None:
pulumi.set(__self__, "include_cookies", include_cookies)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def bucket(self) -> str:
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="includeCookies")
def include_cookies(self) -> Optional[bool]:
return pulumi.get(self, "include_cookies")
@property
@pulumi.getter
def prefix(self) -> Optional[str]:
return pulumi.get(self, "prefix")
@pulumi.output_type
class DistributionOrigin(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "domainName":
suggest = "domain_name"
elif key == "connectionAttempts":
suggest = "connection_attempts"
elif key == "connectionTimeout":
suggest = "connection_timeout"
elif key == "customOriginConfig":
suggest = "custom_origin_config"
elif key == "originCustomHeaders":
suggest = "origin_custom_headers"
elif key == "originPath":
suggest = "origin_path"
elif key == "originShield":
suggest = "origin_shield"
elif key == "s3OriginConfig":
suggest = "s3_origin_config"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionOrigin. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionOrigin.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionOrigin.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
domain_name: str,
id: str,
connection_attempts: Optional[int] = None,
connection_timeout: Optional[int] = None,
custom_origin_config: Optional['outputs.DistributionCustomOriginConfig'] = None,
origin_custom_headers: Optional[Sequence['outputs.DistributionOriginCustomHeader']] = None,
origin_path: Optional[str] = None,
origin_shield: Optional['outputs.DistributionOriginShield'] = None,
s3_origin_config: Optional['outputs.DistributionS3OriginConfig'] = None):
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "id", id)
if connection_attempts is not None:
pulumi.set(__self__, "connection_attempts", connection_attempts)
if connection_timeout is not None:
pulumi.set(__self__, "connection_timeout", connection_timeout)
if custom_origin_config is not None:
pulumi.set(__self__, "custom_origin_config", custom_origin_config)
if origin_custom_headers is not None:
pulumi.set(__self__, "origin_custom_headers", origin_custom_headers)
if origin_path is not None:
pulumi.set(__self__, "origin_path", origin_path)
if origin_shield is not None:
pulumi.set(__self__, "origin_shield", origin_shield)
if s3_origin_config is not None:
pulumi.set(__self__, "s3_origin_config", s3_origin_config)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> str:
return pulumi.get(self, "domain_name")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="connectionAttempts")
def connection_attempts(self) -> Optional[int]:
return pulumi.get(self, "connection_attempts")
@property
@pulumi.getter(name="connectionTimeout")
def connection_timeout(self) -> Optional[int]:
return pulumi.get(self, "connection_timeout")
@property
@pulumi.getter(name="customOriginConfig")
def custom_origin_config(self) -> Optional['outputs.DistributionCustomOriginConfig']:
return pulumi.get(self, "custom_origin_config")
@property
@pulumi.getter(name="originCustomHeaders")
def origin_custom_headers(self) -> Optional[Sequence['outputs.DistributionOriginCustomHeader']]:
return pulumi.get(self, "origin_custom_headers")
@property
@pulumi.getter(name="originPath")
def origin_path(self) -> Optional[str]:
return pulumi.get(self, "origin_path")
@property
@pulumi.getter(name="originShield")
def origin_shield(self) -> Optional['outputs.DistributionOriginShield']:
return pulumi.get(self, "origin_shield")
@property
@pulumi.getter(name="s3OriginConfig")
def s3_origin_config(self) -> Optional['outputs.DistributionS3OriginConfig']:
return pulumi.get(self, "s3_origin_config")
@pulumi.output_type
class DistributionOriginCustomHeader(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "headerName":
suggest = "header_name"
elif key == "headerValue":
suggest = "header_value"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionOriginCustomHeader. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionOriginCustomHeader.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionOriginCustomHeader.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
header_name: str,
header_value: str):
pulumi.set(__self__, "header_name", header_name)
pulumi.set(__self__, "header_value", header_value)
@property
@pulumi.getter(name="headerName")
def header_name(self) -> str:
return pulumi.get(self, "header_name")
@property
@pulumi.getter(name="headerValue")
def header_value(self) -> str:
return pulumi.get(self, "header_value")
@pulumi.output_type
class DistributionOriginGroup(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "failoverCriteria":
suggest = "failover_criteria"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionOriginGroup. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionOriginGroup.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionOriginGroup.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
failover_criteria: 'outputs.DistributionOriginGroupFailoverCriteria',
id: str,
members: 'outputs.DistributionOriginGroupMembers'):
pulumi.set(__self__, "failover_criteria", failover_criteria)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "members", members)
@property
@pulumi.getter(name="failoverCriteria")
def failover_criteria(self) -> 'outputs.DistributionOriginGroupFailoverCriteria':
return pulumi.get(self, "failover_criteria")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def members(self) -> 'outputs.DistributionOriginGroupMembers':
return pulumi.get(self, "members")
@pulumi.output_type
class DistributionOriginGroupFailoverCriteria(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "statusCodes":
suggest = "status_codes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionOriginGroupFailoverCriteria. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionOriginGroupFailoverCriteria.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionOriginGroupFailoverCriteria.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
status_codes: 'outputs.DistributionStatusCodes'):
pulumi.set(__self__, "status_codes", status_codes)
@property
@pulumi.getter(name="statusCodes")
def status_codes(self) -> 'outputs.DistributionStatusCodes':
return pulumi.get(self, "status_codes")
@pulumi.output_type
class DistributionOriginGroupMember(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "originId":
suggest = "origin_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionOriginGroupMember. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionOriginGroupMember.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionOriginGroupMember.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
origin_id: str):
pulumi.set(__self__, "origin_id", origin_id)
@property
@pulumi.getter(name="originId")
def origin_id(self) -> str:
return pulumi.get(self, "origin_id")
@pulumi.output_type
class DistributionOriginGroupMembers(dict):
def __init__(__self__, *,
items: Sequence['outputs.DistributionOriginGroupMember'],
quantity: int):
pulumi.set(__self__, "items", items)
pulumi.set(__self__, "quantity", quantity)
@property
@pulumi.getter
def items(self) -> Sequence['outputs.DistributionOriginGroupMember']:
return pulumi.get(self, "items")
@property
@pulumi.getter
def quantity(self) -> int:
return pulumi.get(self, "quantity")
@pulumi.output_type
class DistributionOriginGroups(dict):
def __init__(__self__, *,
quantity: int,
items: Optional[Sequence['outputs.DistributionOriginGroup']] = None):
pulumi.set(__self__, "quantity", quantity)
if items is not None:
pulumi.set(__self__, "items", items)
@property
@pulumi.getter
def quantity(self) -> int:
return pulumi.get(self, "quantity")
@property
@pulumi.getter
def items(self) -> Optional[Sequence['outputs.DistributionOriginGroup']]:
return pulumi.get(self, "items")
@pulumi.output_type
class DistributionOriginShield(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "originShieldRegion":
suggest = "origin_shield_region"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionOriginShield. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionOriginShield.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionOriginShield.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: Optional[bool] = None,
origin_shield_region: Optional[str] = None):
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if origin_shield_region is not None:
pulumi.set(__self__, "origin_shield_region", origin_shield_region)
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="originShieldRegion")
def origin_shield_region(self) -> Optional[str]:
return pulumi.get(self, "origin_shield_region")
@pulumi.output_type
class DistributionRestrictions(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "geoRestriction":
suggest = "geo_restriction"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionRestrictions. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionRestrictions.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionRestrictions.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
geo_restriction: 'outputs.DistributionGeoRestriction'):
pulumi.set(__self__, "geo_restriction", geo_restriction)
@property
@pulumi.getter(name="geoRestriction")
def geo_restriction(self) -> 'outputs.DistributionGeoRestriction':
return pulumi.get(self, "geo_restriction")
@pulumi.output_type
class DistributionS3OriginConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "originAccessIdentity":
suggest = "origin_access_identity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionS3OriginConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionS3OriginConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionS3OriginConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
origin_access_identity: Optional[str] = None):
if origin_access_identity is not None:
pulumi.set(__self__, "origin_access_identity", origin_access_identity)
@property
@pulumi.getter(name="originAccessIdentity")
def origin_access_identity(self) -> Optional[str]:
return pulumi.get(self, "origin_access_identity")
@pulumi.output_type
class DistributionStatusCodes(dict):
def __init__(__self__, *,
items: Sequence[int],
quantity: int):
pulumi.set(__self__, "items", items)
pulumi.set(__self__, "quantity", quantity)
@property
@pulumi.getter
def items(self) -> Sequence[int]:
return pulumi.get(self, "items")
@property
@pulumi.getter
def quantity(self) -> int:
return pulumi.get(self, "quantity")
@pulumi.output_type
class DistributionTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class DistributionViewerCertificate(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "acmCertificateArn":
suggest = "acm_certificate_arn"
elif key == "cloudFrontDefaultCertificate":
suggest = "cloud_front_default_certificate"
elif key == "iamCertificateId":
suggest = "iam_certificate_id"
elif key == "minimumProtocolVersion":
suggest = "minimum_protocol_version"
elif key == "sslSupportMethod":
suggest = "ssl_support_method"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DistributionViewerCertificate. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DistributionViewerCertificate.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DistributionViewerCertificate.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
acm_certificate_arn: Optional[str] = None,
cloud_front_default_certificate: Optional[bool] = None,
iam_certificate_id: Optional[str] = None,
minimum_protocol_version: Optional[str] = None,
ssl_support_method: Optional[str] = None):
if acm_certificate_arn is not None:
pulumi.set(__self__, "acm_certificate_arn", acm_certificate_arn)
if cloud_front_default_certificate is not None:
pulumi.set(__self__, "cloud_front_default_certificate", cloud_front_default_certificate)
if iam_certificate_id is not None:
pulumi.set(__self__, "iam_certificate_id", iam_certificate_id)
if minimum_protocol_version is not None:
pulumi.set(__self__, "minimum_protocol_version", minimum_protocol_version)
if ssl_support_method is not None:
pulumi.set(__self__, "ssl_support_method", ssl_support_method)
@property
@pulumi.getter(name="acmCertificateArn")
def acm_certificate_arn(self) -> Optional[str]:
return pulumi.get(self, "acm_certificate_arn")
@property
@pulumi.getter(name="cloudFrontDefaultCertificate")
def cloud_front_default_certificate(self) -> Optional[bool]:
return pulumi.get(self, "cloud_front_default_certificate")
@property
@pulumi.getter(name="iamCertificateId")
def iam_certificate_id(self) -> Optional[str]:
return pulumi.get(self, "iam_certificate_id")
@property
@pulumi.getter(name="minimumProtocolVersion")
def minimum_protocol_version(self) -> Optional[str]:
return pulumi.get(self, "minimum_protocol_version")
@property
@pulumi.getter(name="sslSupportMethod")
def ssl_support_method(self) -> Optional[str]:
return pulumi.get(self, "ssl_support_method")
@pulumi.output_type
class FunctionConfig(dict):
def __init__(__self__, *,
comment: str,
runtime: str):
pulumi.set(__self__, "comment", comment)
pulumi.set(__self__, "runtime", runtime)
@property
@pulumi.getter
def comment(self) -> str:
return pulumi.get(self, "comment")
@property
@pulumi.getter
def runtime(self) -> str:
return pulumi.get(self, "runtime")
@pulumi.output_type
class FunctionMetadata(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "functionARN":
suggest = "function_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in FunctionMetadata. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
FunctionMetadata.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
FunctionMetadata.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
function_arn: Optional[str] = None):
if function_arn is not None:
pulumi.set(__self__, "function_arn", function_arn)
@property
@pulumi.getter(name="functionARN")
def function_arn(self) -> Optional[str]:
return pulumi.get(self, "function_arn")
@pulumi.output_type
class KeyGroupConfig(dict):
def __init__(__self__, *,
items: Sequence[str],
name: str,
comment: Optional[str] = None):
pulumi.set(__self__, "items", items)
pulumi.set(__self__, "name", name)
if comment is not None:
pulumi.set(__self__, "comment", comment)
@property
@pulumi.getter
def items(self) -> Sequence[str]:
return pulumi.get(self, "items")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def comment(self) -> Optional[str]:
return pulumi.get(self, "comment")
@pulumi.output_type
class OriginRequestPolicyConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cookiesConfig":
suggest = "cookies_config"
elif key == "headersConfig":
suggest = "headers_config"
elif key == "queryStringsConfig":
suggest = "query_strings_config"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OriginRequestPolicyConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OriginRequestPolicyConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OriginRequestPolicyConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cookies_config: 'outputs.OriginRequestPolicyCookiesConfig',
headers_config: 'outputs.OriginRequestPolicyHeadersConfig',
name: str,
query_strings_config: 'outputs.OriginRequestPolicyQueryStringsConfig',
comment: Optional[str] = None):
pulumi.set(__self__, "cookies_config", cookies_config)
pulumi.set(__self__, "headers_config", headers_config)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "query_strings_config", query_strings_config)
if comment is not None:
pulumi.set(__self__, "comment", comment)
@property
@pulumi.getter(name="cookiesConfig")
def cookies_config(self) -> 'outputs.OriginRequestPolicyCookiesConfig':
return pulumi.get(self, "cookies_config")
@property
@pulumi.getter(name="headersConfig")
def headers_config(self) -> 'outputs.OriginRequestPolicyHeadersConfig':
return pulumi.get(self, "headers_config")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="queryStringsConfig")
def query_strings_config(self) -> 'outputs.OriginRequestPolicyQueryStringsConfig':
return pulumi.get(self, "query_strings_config")
@property
@pulumi.getter
def comment(self) -> Optional[str]:
return pulumi.get(self, "comment")
@pulumi.output_type
class OriginRequestPolicyCookiesConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cookieBehavior":
suggest = "cookie_behavior"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OriginRequestPolicyCookiesConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OriginRequestPolicyCookiesConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OriginRequestPolicyCookiesConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cookie_behavior: str,
cookies: Optional[Sequence[str]] = None):
pulumi.set(__self__, "cookie_behavior", cookie_behavior)
if cookies is not None:
pulumi.set(__self__, "cookies", cookies)
@property
@pulumi.getter(name="cookieBehavior")
def cookie_behavior(self) -> str:
return pulumi.get(self, "cookie_behavior")
@property
@pulumi.getter
def cookies(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "cookies")
@pulumi.output_type
class OriginRequestPolicyHeadersConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "headerBehavior":
suggest = "header_behavior"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OriginRequestPolicyHeadersConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OriginRequestPolicyHeadersConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OriginRequestPolicyHeadersConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
header_behavior: str,
headers: Optional[Sequence[str]] = None):
pulumi.set(__self__, "header_behavior", header_behavior)
if headers is not None:
pulumi.set(__self__, "headers", headers)
@property
@pulumi.getter(name="headerBehavior")
def header_behavior(self) -> str:
return pulumi.get(self, "header_behavior")
@property
@pulumi.getter
def headers(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "headers")
@pulumi.output_type
class OriginRequestPolicyQueryStringsConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "queryStringBehavior":
suggest = "query_string_behavior"
elif key == "queryStrings":
suggest = "query_strings"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OriginRequestPolicyQueryStringsConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OriginRequestPolicyQueryStringsConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OriginRequestPolicyQueryStringsConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
query_string_behavior: str,
query_strings: Optional[Sequence[str]] = None):
pulumi.set(__self__, "query_string_behavior", query_string_behavior)
if query_strings is not None:
pulumi.set(__self__, "query_strings", query_strings)
@property
@pulumi.getter(name="queryStringBehavior")
def query_string_behavior(self) -> str:
return pulumi.get(self, "query_string_behavior")
@property
@pulumi.getter(name="queryStrings")
def query_strings(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "query_strings")
@pulumi.output_type
class PublicKeyConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "callerReference":
suggest = "caller_reference"
elif key == "encodedKey":
suggest = "encoded_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PublicKeyConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PublicKeyConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PublicKeyConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
caller_reference: str,
encoded_key: str,
name: str,
comment: Optional[str] = None):
pulumi.set(__self__, "caller_reference", caller_reference)
pulumi.set(__self__, "encoded_key", encoded_key)
pulumi.set(__self__, "name", name)
if comment is not None:
pulumi.set(__self__, "comment", comment)
@property
@pulumi.getter(name="callerReference")
def caller_reference(self) -> str:
return pulumi.get(self, "caller_reference")
@property
@pulumi.getter(name="encodedKey")
def encoded_key(self) -> str:
return pulumi.get(self, "encoded_key")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def comment(self) -> Optional[str]:
return pulumi.get(self, "comment")
@pulumi.output_type
class RealtimeLogConfigEndPoint(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "kinesisStreamConfig":
suggest = "kinesis_stream_config"
elif key == "streamType":
suggest = "stream_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RealtimeLogConfigEndPoint. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RealtimeLogConfigEndPoint.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RealtimeLogConfigEndPoint.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
kinesis_stream_config: 'outputs.RealtimeLogConfigKinesisStreamConfig',
stream_type: str):
pulumi.set(__self__, "kinesis_stream_config", kinesis_stream_config)
pulumi.set(__self__, "stream_type", stream_type)
@property
@pulumi.getter(name="kinesisStreamConfig")
def kinesis_stream_config(self) -> 'outputs.RealtimeLogConfigKinesisStreamConfig':
return pulumi.get(self, "kinesis_stream_config")
@property
@pulumi.getter(name="streamType")
def stream_type(self) -> str:
return pulumi.get(self, "stream_type")
@pulumi.output_type
class RealtimeLogConfigKinesisStreamConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "roleArn":
suggest = "role_arn"
elif key == "streamArn":
suggest = "stream_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RealtimeLogConfigKinesisStreamConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RealtimeLogConfigKinesisStreamConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RealtimeLogConfigKinesisStreamConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
role_arn: str,
stream_arn: str):
pulumi.set(__self__, "role_arn", role_arn)
pulumi.set(__self__, "stream_arn", stream_arn)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> str:
return pulumi.get(self, "role_arn")
@property
@pulumi.getter(name="streamArn")
def stream_arn(self) -> str:
return pulumi.get(self, "stream_arn")
@pulumi.output_type
class ResponseHeadersPolicyAccessControlAllowHeaders(dict):
def __init__(__self__, *,
items: Sequence[str]):
pulumi.set(__self__, "items", items)
@property
@pulumi.getter
def items(self) -> Sequence[str]:
return pulumi.get(self, "items")
@pulumi.output_type
class ResponseHeadersPolicyAccessControlAllowMethods(dict):
def __init__(__self__, *,
items: Sequence[str]):
pulumi.set(__self__, "items", items)
@property
@pulumi.getter
def items(self) -> Sequence[str]:
return pulumi.get(self, "items")
@pulumi.output_type
class ResponseHeadersPolicyAccessControlAllowOrigins(dict):
def __init__(__self__, *,
items: Sequence[str]):
pulumi.set(__self__, "items", items)
@property
@pulumi.getter
def items(self) -> Sequence[str]:
return pulumi.get(self, "items")
@pulumi.output_type
class ResponseHeadersPolicyAccessControlExposeHeaders(dict):
def __init__(__self__, *,
items: Sequence[str]):
pulumi.set(__self__, "items", items)
@property
@pulumi.getter
def items(self) -> Sequence[str]:
return pulumi.get(self, "items")
@pulumi.output_type
class ResponseHeadersPolicyConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "corsConfig":
suggest = "cors_config"
elif key == "customHeadersConfig":
suggest = "custom_headers_config"
elif key == "securityHeadersConfig":
suggest = "security_headers_config"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResponseHeadersPolicyConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResponseHeadersPolicyConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResponseHeadersPolicyConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
comment: Optional[str] = None,
cors_config: Optional['outputs.ResponseHeadersPolicyCorsConfig'] = None,
custom_headers_config: Optional['outputs.ResponseHeadersPolicyCustomHeadersConfig'] = None,
security_headers_config: Optional['outputs.ResponseHeadersPolicySecurityHeadersConfig'] = None):
pulumi.set(__self__, "name", name)
if comment is not None:
pulumi.set(__self__, "comment", comment)
if cors_config is not None:
pulumi.set(__self__, "cors_config", cors_config)
if custom_headers_config is not None:
pulumi.set(__self__, "custom_headers_config", custom_headers_config)
if security_headers_config is not None:
pulumi.set(__self__, "security_headers_config", security_headers_config)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def comment(self) -> Optional[str]:
return pulumi.get(self, "comment")
@property
@pulumi.getter(name="corsConfig")
def cors_config(self) -> Optional['outputs.ResponseHeadersPolicyCorsConfig']:
return pulumi.get(self, "cors_config")
@property
@pulumi.getter(name="customHeadersConfig")
def custom_headers_config(self) -> Optional['outputs.ResponseHeadersPolicyCustomHeadersConfig']:
return pulumi.get(self, "custom_headers_config")
@property
@pulumi.getter(name="securityHeadersConfig")
def security_headers_config(self) -> Optional['outputs.ResponseHeadersPolicySecurityHeadersConfig']:
return pulumi.get(self, "security_headers_config")
@pulumi.output_type
class ResponseHeadersPolicyContentSecurityPolicy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "contentSecurityPolicy":
suggest = "content_security_policy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResponseHeadersPolicyContentSecurityPolicy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResponseHeadersPolicyContentSecurityPolicy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResponseHeadersPolicyContentSecurityPolicy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
content_security_policy: str,
override: bool):
pulumi.set(__self__, "content_security_policy", content_security_policy)
pulumi.set(__self__, "override", override)
@property
@pulumi.getter(name="contentSecurityPolicy")
def content_security_policy(self) -> str:
return pulumi.get(self, "content_security_policy")
@property
@pulumi.getter
def override(self) -> bool:
return pulumi.get(self, "override")
@pulumi.output_type
class ResponseHeadersPolicyContentTypeOptions(dict):
def __init__(__self__, *,
override: bool):
pulumi.set(__self__, "override", override)
@property
@pulumi.getter
def override(self) -> bool:
return pulumi.get(self, "override")
@pulumi.output_type
class ResponseHeadersPolicyCorsConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessControlAllowCredentials":
suggest = "access_control_allow_credentials"
elif key == "accessControlAllowHeaders":
suggest = "access_control_allow_headers"
elif key == "accessControlAllowMethods":
suggest = "access_control_allow_methods"
elif key == "accessControlAllowOrigins":
suggest = "access_control_allow_origins"
elif key == "originOverride":
suggest = "origin_override"
elif key == "accessControlExposeHeaders":
suggest = "access_control_expose_headers"
elif key == "accessControlMaxAgeSec":
suggest = "access_control_max_age_sec"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResponseHeadersPolicyCorsConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResponseHeadersPolicyCorsConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResponseHeadersPolicyCorsConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
access_control_allow_credentials: bool,
access_control_allow_headers: 'outputs.ResponseHeadersPolicyAccessControlAllowHeaders',
access_control_allow_methods: 'outputs.ResponseHeadersPolicyAccessControlAllowMethods',
access_control_allow_origins: 'outputs.ResponseHeadersPolicyAccessControlAllowOrigins',
origin_override: bool,
access_control_expose_headers: Optional['outputs.ResponseHeadersPolicyAccessControlExposeHeaders'] = None,
access_control_max_age_sec: Optional[int] = None):
pulumi.set(__self__, "access_control_allow_credentials", access_control_allow_credentials)
pulumi.set(__self__, "access_control_allow_headers", access_control_allow_headers)
pulumi.set(__self__, "access_control_allow_methods", access_control_allow_methods)
pulumi.set(__self__, "access_control_allow_origins", access_control_allow_origins)
pulumi.set(__self__, "origin_override", origin_override)
if access_control_expose_headers is not None:
pulumi.set(__self__, "access_control_expose_headers", access_control_expose_headers)
if access_control_max_age_sec is not None:
pulumi.set(__self__, "access_control_max_age_sec", access_control_max_age_sec)
@property
@pulumi.getter(name="accessControlAllowCredentials")
def access_control_allow_credentials(self) -> bool:
return pulumi.get(self, "access_control_allow_credentials")
@property
@pulumi.getter(name="accessControlAllowHeaders")
def access_control_allow_headers(self) -> 'outputs.ResponseHeadersPolicyAccessControlAllowHeaders':
return pulumi.get(self, "access_control_allow_headers")
@property
@pulumi.getter(name="accessControlAllowMethods")
def access_control_allow_methods(self) -> 'outputs.ResponseHeadersPolicyAccessControlAllowMethods':
return pulumi.get(self, "access_control_allow_methods")
@property
@pulumi.getter(name="accessControlAllowOrigins")
def access_control_allow_origins(self) -> 'outputs.ResponseHeadersPolicyAccessControlAllowOrigins':
return pulumi.get(self, "access_control_allow_origins")
@property
@pulumi.getter(name="originOverride")
def origin_override(self) -> bool:
return pulumi.get(self, "origin_override")
@property
@pulumi.getter(name="accessControlExposeHeaders")
def access_control_expose_headers(self) -> Optional['outputs.ResponseHeadersPolicyAccessControlExposeHeaders']:
return pulumi.get(self, "access_control_expose_headers")
@property
@pulumi.getter(name="accessControlMaxAgeSec")
def access_control_max_age_sec(self) -> Optional[int]:
return pulumi.get(self, "access_control_max_age_sec")
@pulumi.output_type
class ResponseHeadersPolicyCustomHeader(dict):
def __init__(__self__, *,
header: str,
override: bool,
value: str):
pulumi.set(__self__, "header", header)
pulumi.set(__self__, "override", override)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def header(self) -> str:
return pulumi.get(self, "header")
@property
@pulumi.getter
def override(self) -> bool:
return pulumi.get(self, "override")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class ResponseHeadersPolicyCustomHeadersConfig(dict):
def __init__(__self__, *,
items: Sequence['outputs.ResponseHeadersPolicyCustomHeader']):
pulumi.set(__self__, "items", items)
@property
@pulumi.getter
def items(self) -> Sequence['outputs.ResponseHeadersPolicyCustomHeader']:
return pulumi.get(self, "items")
@pulumi.output_type
class ResponseHeadersPolicyFrameOptions(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "frameOption":
suggest = "frame_option"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResponseHeadersPolicyFrameOptions. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResponseHeadersPolicyFrameOptions.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResponseHeadersPolicyFrameOptions.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
frame_option: str,
override: bool):
pulumi.set(__self__, "frame_option", frame_option)
pulumi.set(__self__, "override", override)
@property
@pulumi.getter(name="frameOption")
def frame_option(self) -> str:
return pulumi.get(self, "frame_option")
@property
@pulumi.getter
def override(self) -> bool:
return pulumi.get(self, "override")
@pulumi.output_type
class ResponseHeadersPolicyReferrerPolicy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "referrerPolicy":
suggest = "referrer_policy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResponseHeadersPolicyReferrerPolicy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResponseHeadersPolicyReferrerPolicy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResponseHeadersPolicyReferrerPolicy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
override: bool,
referrer_policy: str):
pulumi.set(__self__, "override", override)
pulumi.set(__self__, "referrer_policy", referrer_policy)
@property
@pulumi.getter
def override(self) -> bool:
return pulumi.get(self, "override")
@property
@pulumi.getter(name="referrerPolicy")
def referrer_policy(self) -> str:
return pulumi.get(self, "referrer_policy")
@pulumi.output_type
class ResponseHeadersPolicySecurityHeadersConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "contentSecurityPolicy":
suggest = "content_security_policy"
elif key == "contentTypeOptions":
suggest = "content_type_options"
elif key == "frameOptions":
suggest = "frame_options"
elif key == "referrerPolicy":
suggest = "referrer_policy"
elif key == "strictTransportSecurity":
suggest = "strict_transport_security"
elif key == "xSSProtection":
suggest = "x_ss_protection"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResponseHeadersPolicySecurityHeadersConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResponseHeadersPolicySecurityHeadersConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResponseHeadersPolicySecurityHeadersConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
content_security_policy: Optional['outputs.ResponseHeadersPolicyContentSecurityPolicy'] = None,
content_type_options: Optional['outputs.ResponseHeadersPolicyContentTypeOptions'] = None,
frame_options: Optional['outputs.ResponseHeadersPolicyFrameOptions'] = None,
referrer_policy: Optional['outputs.ResponseHeadersPolicyReferrerPolicy'] = None,
strict_transport_security: Optional['outputs.ResponseHeadersPolicyStrictTransportSecurity'] = None,
x_ss_protection: Optional['outputs.ResponseHeadersPolicyXSSProtection'] = None):
if content_security_policy is not None:
pulumi.set(__self__, "content_security_policy", content_security_policy)
if content_type_options is not None:
pulumi.set(__self__, "content_type_options", content_type_options)
if frame_options is not None:
pulumi.set(__self__, "frame_options", frame_options)
if referrer_policy is not None:
pulumi.set(__self__, "referrer_policy", referrer_policy)
if strict_transport_security is not None:
pulumi.set(__self__, "strict_transport_security", strict_transport_security)
if x_ss_protection is not None:
pulumi.set(__self__, "x_ss_protection", x_ss_protection)
@property
@pulumi.getter(name="contentSecurityPolicy")
def content_security_policy(self) -> Optional['outputs.ResponseHeadersPolicyContentSecurityPolicy']:
return pulumi.get(self, "content_security_policy")
@property
@pulumi.getter(name="contentTypeOptions")
def content_type_options(self) -> Optional['outputs.ResponseHeadersPolicyContentTypeOptions']:
return pulumi.get(self, "content_type_options")
@property
@pulumi.getter(name="frameOptions")
def frame_options(self) -> Optional['outputs.ResponseHeadersPolicyFrameOptions']:
return pulumi.get(self, "frame_options")
@property
@pulumi.getter(name="referrerPolicy")
def referrer_policy(self) -> Optional['outputs.ResponseHeadersPolicyReferrerPolicy']:
return pulumi.get(self, "referrer_policy")
@property
@pulumi.getter(name="strictTransportSecurity")
def strict_transport_security(self) -> Optional['outputs.ResponseHeadersPolicyStrictTransportSecurity']:
return pulumi.get(self, "strict_transport_security")
@property
@pulumi.getter(name="xSSProtection")
def x_ss_protection(self) -> Optional['outputs.ResponseHeadersPolicyXSSProtection']:
return pulumi.get(self, "x_ss_protection")
@pulumi.output_type
class ResponseHeadersPolicyStrictTransportSecurity(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accessControlMaxAgeSec":
suggest = "access_control_max_age_sec"
elif key == "includeSubdomains":
suggest = "include_subdomains"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResponseHeadersPolicyStrictTransportSecurity. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResponseHeadersPolicyStrictTransportSecurity.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResponseHeadersPolicyStrictTransportSecurity.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
access_control_max_age_sec: int,
override: bool,
include_subdomains: Optional[bool] = None,
preload: Optional[bool] = None):
pulumi.set(__self__, "access_control_max_age_sec", access_control_max_age_sec)
pulumi.set(__self__, "override", override)
if include_subdomains is not None:
pulumi.set(__self__, "include_subdomains", include_subdomains)
if preload is not None:
pulumi.set(__self__, "preload", preload)
@property
@pulumi.getter(name="accessControlMaxAgeSec")
def access_control_max_age_sec(self) -> int:
return pulumi.get(self, "access_control_max_age_sec")
@property
@pulumi.getter
def override(self) -> bool:
return pulumi.get(self, "override")
@property
@pulumi.getter(name="includeSubdomains")
def include_subdomains(self) -> Optional[bool]:
return pulumi.get(self, "include_subdomains")
@property
@pulumi.getter
def preload(self) -> Optional[bool]:
return pulumi.get(self, "preload")
@pulumi.output_type
class ResponseHeadersPolicyXSSProtection(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "modeBlock":
suggest = "mode_block"
elif key == "reportUri":
suggest = "report_uri"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ResponseHeadersPolicyXSSProtection. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ResponseHeadersPolicyXSSProtection.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ResponseHeadersPolicyXSSProtection.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
override: bool,
protection: bool,
mode_block: Optional[bool] = None,
report_uri: Optional[str] = None):
pulumi.set(__self__, "override", override)
pulumi.set(__self__, "protection", protection)
if mode_block is not None:
pulumi.set(__self__, "mode_block", mode_block)
if report_uri is not None:
pulumi.set(__self__, "report_uri", report_uri)
@property
@pulumi.getter
def override(self) -> bool:
return pulumi.get(self, "override")
@property
@pulumi.getter
def protection(self) -> bool:
return pulumi.get(self, "protection")
@property
@pulumi.getter(name="modeBlock")
def mode_block(self) -> Optional[bool]:
return pulumi.get(self, "mode_block")
@property
@pulumi.getter(name="reportUri")
def report_uri(self) -> Optional[str]:
return pulumi.get(self, "report_uri")
@pulumi.output_type
class StreamingDistributionConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "s3Origin":
suggest = "s3_origin"
elif key == "trustedSigners":
suggest = "trusted_signers"
elif key == "priceClass":
suggest = "price_class"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StreamingDistributionConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StreamingDistributionConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StreamingDistributionConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
comment: str,
enabled: bool,
s3_origin: 'outputs.StreamingDistributionS3Origin',
trusted_signers: 'outputs.StreamingDistributionTrustedSigners',
aliases: Optional[Sequence[str]] = None,
logging: Optional['outputs.StreamingDistributionLogging'] = None,
price_class: Optional[str] = None):
pulumi.set(__self__, "comment", comment)
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "s3_origin", s3_origin)
pulumi.set(__self__, "trusted_signers", trusted_signers)
if aliases is not None:
pulumi.set(__self__, "aliases", aliases)
if logging is not None:
pulumi.set(__self__, "logging", logging)
if price_class is not None:
pulumi.set(__self__, "price_class", price_class)
@property
@pulumi.getter
def comment(self) -> str:
return pulumi.get(self, "comment")
@property
@pulumi.getter
def enabled(self) -> bool:
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="s3Origin")
def s3_origin(self) -> 'outputs.StreamingDistributionS3Origin':
return pulumi.get(self, "s3_origin")
@property
@pulumi.getter(name="trustedSigners")
def trusted_signers(self) -> 'outputs.StreamingDistributionTrustedSigners':
return pulumi.get(self, "trusted_signers")
@property
@pulumi.getter
def aliases(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "aliases")
@property
@pulumi.getter
def logging(self) -> Optional['outputs.StreamingDistributionLogging']:
return pulumi.get(self, "logging")
@property
@pulumi.getter(name="priceClass")
def price_class(self) -> Optional[str]:
return pulumi.get(self, "price_class")
@pulumi.output_type
class StreamingDistributionLogging(dict):
def __init__(__self__, *,
bucket: str,
enabled: bool,
prefix: str):
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def bucket(self) -> str:
return pulumi.get(self, "bucket")
@property
@pulumi.getter
def enabled(self) -> bool:
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def prefix(self) -> str:
return pulumi.get(self, "prefix")
@pulumi.output_type
class StreamingDistributionS3Origin(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "domainName":
suggest = "domain_name"
elif key == "originAccessIdentity":
suggest = "origin_access_identity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StreamingDistributionS3Origin. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StreamingDistributionS3Origin.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StreamingDistributionS3Origin.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
domain_name: str,
origin_access_identity: str):
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "origin_access_identity", origin_access_identity)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> str:
return pulumi.get(self, "domain_name")
@property
@pulumi.getter(name="originAccessIdentity")
def origin_access_identity(self) -> str:
return pulumi.get(self, "origin_access_identity")
@pulumi.output_type
class StreamingDistributionTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class StreamingDistributionTrustedSigners(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "awsAccountNumbers":
suggest = "aws_account_numbers"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StreamingDistributionTrustedSigners. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StreamingDistributionTrustedSigners.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StreamingDistributionTrustedSigners.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
aws_account_numbers: Optional[Sequence[str]] = None):
pulumi.set(__self__, "enabled", enabled)
if aws_account_numbers is not None:
pulumi.set(__self__, "aws_account_numbers", aws_account_numbers)
@property
@pulumi.getter
def enabled(self) -> bool:
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="awsAccountNumbers")
def aws_account_numbers(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "aws_account_numbers")
| 36.61671 | 171 | 0.657028 |
a47676855b00672c610a9e2ee46a17689e203e5b | 3,259 | py | Python | tests/sc/test_system.py | allenmichael/pyTenable | 8372cfdf3ced99de50227f6fbb37d6db2b26291e | [
"MIT"
] | null | null | null | tests/sc/test_system.py | allenmichael/pyTenable | 8372cfdf3ced99de50227f6fbb37d6db2b26291e | [
"MIT"
] | null | null | null | tests/sc/test_system.py | allenmichael/pyTenable | 8372cfdf3ced99de50227f6fbb37d6db2b26291e | [
"MIT"
] | null | null | null | from tenable.errors import *
from ..checker import check, single
import pytest, zipfile
@pytest.mark.vcr()
def test_system_details(unauth):
s = unauth.system.details()
assert isinstance(s, dict)
check(s, 'ACAS', str)
check(s, 'PasswordComplexity', str)
check(s, 'banner', str)
check(s, 'buildID', str)
check(s, 'freshInstall', str)
check(s, 'headerText', str)
check(s, 'licenseStatus', str)
check(s, 'loginNotifications', str)
check(s, 'logo', str)
check(s, 'releaseID', str)
check(s, 'reportTypes', list)
for i in s['reportTypes']:
check(i, 'attributeSets', list)
check(i, 'enabled', str)
check(i, 'name', str)
check(i, 'type', str)
check(s, 'serverAuth', str)
check(s, 'serverClassification', str)
check(s, 'sessionTimeout', str)
check(s, 'telemetryEnabled', str)
check(s, 'timezones', list)
for i in s['timezones']:
check(i, 'gmtOffset', (int, float))
check(i, 'name', str)
check(s, 'uuid', str)
check(s, 'version', str)
@pytest.mark.vcr()
def test_system_diagnostics_task_typeerror(admin):
with pytest.raises(TypeError):
admin.system.diagnostics(task=1)
@pytest.mark.vcr()
def test_system_diagnostics_type_unexpectedvalueerror(admin):
with pytest.raises(UnexpectedValueError):
admin.system.diagnostics(task='something else')
@pytest.mark.vcr()
def test_system_diagnostics_options_typeerror(admin):
with pytest.raises(TypeError):
admin.system.diagnostics(options=1)
@pytest.mark.vcr()
def test_system_diagnostics_options_item_typeerror(admin):
with pytest.raises(TypeError):
admin.system.diagnostics(options=[1])
@pytest.mark.vcr()
def test_system_diagnostics_options_item_unexpectedvalueerror(admin):
with pytest.raises(UnexpectedValueError):
admin.system.diagnostics(options=['something else'])
@pytest.mark.vcr()
def test_system_diagnostics_success(admin):
fobj = admin.system.diagnostics()
assert zipfile.is_zipfile(fobj)
@pytest.mark.vcr()
def test_system_current_locale_success(admin):
l = admin.system.current_locale()
assert isinstance(l, dict)
check(l, 'code', str)
check(l, 'description', str)
check(l, 'name', str)
@pytest.mark.vcr()
def test_system_list_locales_success(admin):
l = admin.system.list_locales()
assert isinstance(l, dict)
for key in l.keys():
check(l[key], 'code', str)
check(l[key], 'name', str)
@pytest.mark.vcr()
def test_system_set_locale_locale_typeerror(admin):
with pytest.raises(TypeError):
admin.system.set_locale(1)
@pytest.mark.vcr()
@pytest.mark.skip(reason='This appears to be 1-way, need a sacrificial system to test with')
def test_system_set_locale_success(admin):
locales = admin.system.list_locales()
assert admin.system.set_locale('ja') == 'ja'
@pytest.mark.vcr()
def test_system_status_success(admin):
s = admin.system.status()
assert isinstance(s, dict)
check(s, 'diagnosticsGenerateState', str)
check(s, 'diagnosticsGenerated', int)
check(s, 'statusDisk', str)
check(s, 'statusJava', str)
check(s, 'statusLastChecked', str)
check(s, 'statusRPM', str)
check(s, 'statusThresholdDisk', str) | 31.640777 | 92 | 0.686714 |
fe5b42839c5db859af897a5a335e9e6d63c87de7 | 2,299 | py | Python | services/lib/api.py | jyaganeh/zenircbot | 2484b68c05548ab758855f89ec4b45b2e52d000f | [
"MIT"
] | 1 | 2020-05-14T02:27:53.000Z | 2020-05-14T02:27:53.000Z | services/lib/api.py | jyaganeh/zenircbot | 2484b68c05548ab758855f89ec4b45b2e52d000f | [
"MIT"
] | null | null | null | services/lib/api.py | jyaganeh/zenircbot | 2484b68c05548ab758855f89ec4b45b2e52d000f | [
"MIT"
] | null | null | null | from redis import StrictRedis
import json
from threading import Thread
def send_privmsg(to, message):
if isinstance(to, basestring):
to = (to,)
for channel in to:
get_redis_client().publish('out',
json.dumps({
'version': 1,
'type': 'privmsg',
'data': {
'to': channel,
'message': message,
}
}))
def send_admin_message(message):
config = load_config('../bot.json')
send_privmsg(config['servers'][0]['admin_spew_channels'], message)
def non_blocking_redis_subscribe(func, args=[], kwargs={}):
pubsub = get_redis_client().pubsub()
pubsub.subscribe('in')
for msg in pubsub.listen():
message = json.loads(msg['data'])
func(message=message, *args, **kwargs)
def register_commands(service, commands):
send_admin_message(service + ' online!')
if commands:
def registration_reply(message, service, commands):
if message['version'] == 1 and message['type'] == 'privmsg':
if message['data']['message'] == "commands":
for command in commands:
send_privmsg(message['data']['sender'],
"%s: %s - %s" % (service,
command['name'],
command['description']))
redis_sub = Thread(target=non_blocking_redis_subscribe,
kwargs={'func': registration_reply,
'kwargs': {'service': service,
'commands': commands}})
redis_sub.start()
def load_config(name):
with open(name) as f:
return json.loads(f.read())
def get_redis_client(redis_config=None):
if not redis_config:
redis_config = load_config('../bot.json')['redis']
return StrictRedis(host=redis_config['host'],
port=redis_config['port'],
db=redis_config['db']) | 37.080645 | 78 | 0.472379 |
ab6a1b0f8df393e8ed5c9d19bdbccf86b154b7a9 | 371 | py | Python | src/users/api/urls.py | aliharby12/Simple-vezeeta-project | feb6df8b354ac284edc645059bea17021169dcfa | [
"MIT"
] | null | null | null | src/users/api/urls.py | aliharby12/Simple-vezeeta-project | feb6df8b354ac284edc645059bea17021169dcfa | [
"MIT"
] | 5 | 2021-03-19T12:06:16.000Z | 2022-02-10T11:44:27.000Z | src/users/api/urls.py | aliharby12/Simple-vezeeta-project | feb6df8b354ac284edc645059bea17021169dcfa | [
"MIT"
] | null | null | null | from django.urls import path, include
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register('users', views.UserView)
router.register('profiles', views.ProfileView)
router.register('comments', views.CommentView)
router.register('reservation', views.ReservationView)
urlpatterns = [
path('', include(router.urls)),
] | 26.5 | 53 | 0.77628 |
3d9d9e5589abaa3defaea13ad9aa8ee11182b15c | 3,204 | py | Python | helm/dagster/schema/schema/charts/utils/kubernetes.py | elsenorbw/dagster | b38822d7463812624dab0b2dae7c62e2a8d59828 | [
"Apache-2.0"
] | null | null | null | helm/dagster/schema/schema/charts/utils/kubernetes.py | elsenorbw/dagster | b38822d7463812624dab0b2dae7c62e2a8d59828 | [
"Apache-2.0"
] | 1 | 2021-06-21T18:30:02.000Z | 2021-06-25T21:18:39.000Z | helm/dagster/schema/schema/charts/utils/kubernetes.py | elsenorbw/dagster | b38822d7463812624dab0b2dae7c62e2a8d59828 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra # pylint: disable=no-name-in-module
from .utils import SupportedKubernetes, create_definition_ref
class Annotations(BaseModel):
__root__: Dict[str, str]
class Config:
schema_extra = {
"$ref": create_definition_ref(
"io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/annotations"
)
}
class Labels(BaseModel):
class Config:
schema_extra = {
"$ref": create_definition_ref(
"io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta/properties/labels"
)
}
class PullPolicy(str, Enum):
ALWAYS = "Always"
IF_NOT_PRESENT = "IfNotPresent"
NEVER = "Never"
class Image(BaseModel):
repository: str
tag: str
pullPolicy: PullPolicy
@property
def name(self) -> str:
return f"{self.repository}:{self.tag}"
class ImageWithRegistry(BaseModel):
registry: str
repository: str
tag: str
pullPolicy: PullPolicy
class Service(BaseModel):
type: str
port: int
annotations: Optional[Annotations]
class Config:
extra = Extra.forbid
class NodeSelector(BaseModel):
__root__: Dict[str, str]
class Config:
schema_extra = {
"$ref": create_definition_ref("io.k8s.api.core.v1.PodSpec/properties/nodeSelector")
}
class Affinity(BaseModel):
__root__: Dict[str, Any]
class Config:
schema_extra = {"$ref": create_definition_ref("io.k8s.api.core.v1.Affinity")}
class Tolerations(BaseModel):
__root__: List[Dict[str, Any]]
class Config:
schema_extra = {
"$ref": create_definition_ref("io.k8s.api.core.v1.PodSpec/properties/tolerations")
}
class PodSecurityContext(BaseModel):
__root__: Dict[str, Any]
class Config:
schema_extra = {"$ref": create_definition_ref("io.k8s.api.core.v1.PodSecurityContext")}
class SecurityContext(BaseModel):
__root__: Dict[str, Any]
class Config:
schema_extra = {"$ref": create_definition_ref("io.k8s.api.core.v1.SecurityContext")}
class Resources(BaseModel):
__root__: Dict[str, Any]
class Config:
schema_extra = {"$ref": create_definition_ref("io.k8s.api.core.v1.ResourceRequirements")}
class LivenessProbe(BaseModel):
class Config:
schema_extra = {"$ref": create_definition_ref("io.k8s.api.core.v1.Probe")}
class StartupProbe(BaseModel):
enabled: bool = True
class Config:
schema_extra = {
"$ref": create_definition_ref(
"io.k8s.api.core.v1.Probe",
version=SupportedKubernetes.V1_16,
),
}
class SecretRef(BaseModel):
class Config:
schema_extra = {"$ref": create_definition_ref("io.k8s.api.core.v1.LocalObjectReference")}
class SecretEnvSource(BaseModel):
class Config:
schema_extra = {"$ref": create_definition_ref("io.k8s.api.core.v1.SecretEnvSource")}
class ConfigMapEnvSource(BaseModel):
class Config:
schema_extra = {"$ref": create_definition_ref("io.k8s.api.core.v1.ConfigMapEnvSource")}
| 23.386861 | 97 | 0.654182 |
37ea81f9715c58e33a6eadde971b59eb52fb0840 | 5,590 | py | Python | src/helheimr_heating/tests/test_broadcast.py | snototter/ragnaroek | b81ef86a8fbbf0be74a039784aedc7bfb5321bc0 | [
"MIT"
] | null | null | null | src/helheimr_heating/tests/test_broadcast.py | snototter/ragnaroek | b81ef86a8fbbf0be74a039784aedc7bfb5321bc0 | [
"MIT"
] | null | null | null | src/helheimr_heating/tests/test_broadcast.py | snototter/ragnaroek | b81ef86a8fbbf0be74a039784aedc7bfb5321bc0 | [
"MIT"
] | null | null | null | import pytest
import numpy as np
from attributedict.collections import AttributeDict
from helheimr_heating.utils import broadcast as bc, img_utils
def test_message_bc():
# Helper to verify number of messages for a specific receiver
def _check(rcv, num):
rsp = bc.peek_broadcasts(rcv)
assert rsp.success
assert len(rsp.broadcasts) == num
assert not bc.info(None, None, None)
assert not bc.warn('test', None, None)
# Start with empty "message board"
assert bc.clear_broadcasts()
_check('both', 0)
_check('display', 0)
_check('bot', 0)
# Basic sanity checks (invalid inputs)
bc.info(None)
_check('both', 0)
_check('display', 0)
_check('bot', 0)
bc.information('msg', 'unknown receiver')
_check('both', 0)
_check('display', 0)
_check('bot', 0)
# Now add some messages
bc.info('msg')
_check('both', 1)
_check('bot', 1)
_check('display', 1)
bc.warn('fubar', 'both')
_check('both', 2)
_check('bot', 2)
_check('display', 2)
bc.error('critical error', 'both')
_check('both', 3)
_check('bot', 3)
_check('display', 3)
bc.warning('not so critical', 'both')
_check('both', 4)
_check('bot', 4)
_check('display', 4)
# Add some receiver-specific messages
bc.info('potential issue', 'bot')
_check('both', 4)
_check('bot', 5)
_check('display', 4)
bc.failure('display only error', 'display')
_check('both', 4)
_check('bot', 5)
_check('display', 5)
bc.info('Something for both again', 'both')
_check('both', 5)
_check('bot', 6)
_check('display', 6)
bc.info('just some visualization', 'display')
_check('both', 5)
_check('bot', 6)
_check('display', 7)
# Pop messages (we should receive them, and they should be removed or
# their receivers should be updated in the message board)
rsp = bc.pop_broadcasts('invalid')
assert not rsp.success
assert not hasattr(rsp, 'broadcasts') and hasattr(rsp, 'message') # Upon errors, we get a message/text back instead of "broadcasts"
assert not bc.pop_broadcasts('both').success # Popping messages for 'both' simultaneously is not allowed
rsp = bc.pop_broadcasts('display')
assert rsp.success
assert len(rsp.broadcasts) == 7
_check('both', 0) # Now there should be no more messages which are marked for both receiver
_check('bot', 6) # Only messages for the bot should be left
_check('display', 0)
# Insert yet again
bc.warn('This test is becoming long', 'display')
_check('both', 0)
_check('bot', 6)
_check('display', 1)
bc.info('Final message for both', 'both')
_check('both', 1)
_check('bot', 7)
_check('display', 2)
# Now pop messages for "the other" receiver
rsp = bc.pop_broadcasts('bot')
assert rsp.success
assert len(rsp.broadcasts) == 7
_check('both', 0) # Now there should be no more messages which are marked for both receiver
_check('bot', 0)
_check('display', 2)
# Test update/delete with a single message each:
bc.clear_broadcasts()
bc.info('Info message', 'both')
_check('both', 1)
_check('bot', 1)
_check('display', 1)
rsp = bc.pop_broadcasts('bot') # Test update of a single message (no deletion)
assert rsp.success
assert len(rsp.broadcasts) == 1
_check('both', 0)
_check('bot', 0)
_check('display', 1)
rsp = bc.pop_broadcasts('display')
assert rsp.success
assert len(rsp.broadcasts) == 1
_check('both', 0)
_check('bot', 0)
_check('display', 0)
def test_image_bc():
# Invalid inputs to encode/decode
assert img_utils.b64encode_image(None) is None
assert img_utils.b64decode_image(None) is None
# Helper to verify number of messages for a specific receiver
def _check(rcv, num):
rsp = bc.peek_broadcasts(rcv)
assert rsp.success
assert len(rsp.broadcasts) == num
# Start with empty "message board"
assert bc.clear_broadcasts()
_check('both', 0)
_check('bot', 0)
_check('display', 0)
img1 = np.zeros((800, 600), dtype=np.uint8)
msg1 = 'All black'
bc.push_image(img1, msg1, 'invalid')
_check('both', 0)
_check('bot', 0)
_check('display', 0)
bc.push_image(img1, None, 'bot') # Caption cannot be empty
_check('both', 0)
_check('bot', 0)
_check('display', 0)
bc.push_image(None, msg1, 'bot') # Invalid image
_check('both', 0)
_check('bot', 0)
_check('display', 0)
bc.push_image(img1, msg1, 'bot') # Everything is valid, should work
_check('both', 0)
_check('bot', 1)
_check('display', 0)
rsp = bc.pop_broadcasts('display')
assert rsp.success
assert len(rsp.broadcasts) == 0
img2 = img1.copy()
img2[:] = 200
msg2 = 'Non-black image'
bc.push_image(img2, msg2, 'bot')
rsp = bc.pop_broadcasts('bot')
assert rsp.success
assert len(rsp.broadcasts) == 2
assert bc.decode_extra_message_data(None) is None
assert rsp.broadcasts[0].message == msg1
dec1 = bc.decode_extra_message_data(rsp.broadcasts[0])
assert np.array_equal(img1, dec1)
assert rsp.broadcasts[1].message == msg2
dec2 = bc.decode_extra_message_data(rsp.broadcasts[1])
assert np.array_equal(img2, dec2)
assert bc.decode_extra_message_data(AttributeDict({'msg_type': 'foo', 'extra': 'bar'})) is None
# Clean up the table after these tests
bc.clear_broadcasts()
_check('both', 0)
_check('bot', 0)
_check('display', 0)
| 28.090452 | 136 | 0.632916 |
b0fad354eb06525e09ed2bc1fb486219fcef0ca5 | 2,468 | py | Python | src/controller/python/chip/ble/library_handle.py | SiliconLabs/connectedhomeip-1 | e2b2f30b01d13838481d08f79b343a5aeaca91d9 | [
"Apache-2.0"
] | 14 | 2021-06-23T19:52:29.000Z | 2022-03-31T14:14:03.000Z | src/controller/python/chip/ble/library_handle.py | SiliconLabs/connectedhomeip-1 | e2b2f30b01d13838481d08f79b343a5aeaca91d9 | [
"Apache-2.0"
] | 40 | 2021-05-04T20:37:25.000Z | 2022-01-25T22:25:40.000Z | src/controller/python/chip/ble/library_handle.py | SiliconLabs/connectedhomeip-1 | e2b2f30b01d13838481d08f79b343a5aeaca91d9 | [
"Apache-2.0"
] | 7 | 2021-08-14T04:36:31.000Z | 2022-03-31T10:39:09.000Z | #
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import chip.native
import ctypes
from ctypes import c_bool, c_void_p, c_char_p, c_uint32, py_object
from chip.ble.types import DeviceScannedCallback, ScanDoneCallback
# This prevents python auto-casting c_void_p to integers and
# auto-casting 32/64 bit values to int/long respectively. Without this
# passing in c_void_p does not see to work well for numbers
# in [0x80000000; 0xFFFFFFFF] (argument will be auto-cast to 64-bit negative)
class VoidPointer(c_void_p):
pass
def _GetBleLibraryHandle() -> ctypes.CDLL:
""" Get the native library handle with BLE method initialization.
Retreives the CHIP native library handle and attaches signatures to
native methods.
"""
handle = chip.native.GetLibraryHandle()
# Uses one of the type decorators as an indicator for everything being
# initialized. Native methods default to c_int return types
if handle.pychip_ble_adapter_list_new.restype != VoidPointer:
setter = chip.native.NativeLibraryHandleMethodArguments(handle)
setter.Set('pychip_ble_adapter_list_new', VoidPointer, [])
setter.Set('pychip_ble_adapter_list_next', c_bool, [VoidPointer])
setter.Set('pychip_ble_adapter_list_get_index', c_uint32, [VoidPointer])
setter.Set('pychip_ble_adapter_list_get_address', c_char_p, [VoidPointer])
setter.Set('pychip_ble_adapter_list_get_alias', c_char_p, [VoidPointer])
setter.Set('pychip_ble_adapter_list_get_name', c_char_p, [VoidPointer])
setter.Set('pychip_ble_adapter_list_is_powered', c_bool, [VoidPointer])
setter.Set('pychip_ble_adapter_list_delete', None, [VoidPointer])
setter.Set('pychip_ble_adapter_list_get_raw_adapter', VoidPointer, [VoidPointer])
setter.Set('pychip_ble_start_scanning', VoidPointer, [
py_object, VoidPointer, c_uint32, DeviceScannedCallback, ScanDoneCallback
])
return handle
| 41.133333 | 85 | 0.763776 |
5f5c9de0dc9c34a96e949ee394ca0541736cfaa3 | 920 | py | Python | kubernetes/test/test_v1_container.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_container.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_container.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | 1 | 2020-05-09T07:16:55.000Z | 2020-05-09T07:16:55.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_container import V1Container
class TestV1Container(unittest.TestCase):
""" V1Container unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1Container(self):
"""
Test V1Container
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_container.V1Container()
pass
if __name__ == '__main__':
unittest.main()
| 20.444444 | 105 | 0.695652 |
1858b27dd7a47120adc7808ad2a025ba6bb75032 | 2,986 | py | Python | waypoint/utils/cookie_fetcher.py | amickael/waypoint | 7e05f58635bdfda32d5a16e28c14cdb4a0fffce9 | [
"MIT"
] | null | null | null | waypoint/utils/cookie_fetcher.py | amickael/waypoint | 7e05f58635bdfda32d5a16e28c14cdb4a0fffce9 | [
"MIT"
] | null | null | null | waypoint/utils/cookie_fetcher.py | amickael/waypoint | 7e05f58635bdfda32d5a16e28c14cdb4a0fffce9 | [
"MIT"
] | null | null | null | from urllib.parse import urlparse, parse_qs, unquote
from typing import Union
from selenium.webdriver import Chrome, ChromeOptions
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
from webdriver_manager.chrome import ChromeDriverManager
class CookieFetcher:
oauth_link = (
"https://login.live.com/oauth20_authorize.srf?client_id=000000004C0BD2F1&scope=xbox.basic+xbox"
".offline_access&response_type=code&redirect_uri=https://www.halowaypoint.com/auth/callback&locale"
"=en-us&display=touch&state=https%3a%2f%2fwww.halowaypoint.com%2fen-us%2fgames%2fhalo-the-master"
"-chief-collection%2fxbox-one%2fgame-history%3fview%3dDataOnly"
)
oauth_dest = unquote(parse_qs(urlparse(oauth_link).query)["state"][0])
def __init__(self, username: str, password: str):
self.username = username
self.password = password
self.webdriver_service = Service(ChromeDriverManager().install())
self.webdriver_options = ChromeOptions()
self.webdriver_options.headless = True
self.webdriver_options.add_argument("--no-sandbox")
self.webdriver_options.add_argument("--disable-dev-shm-usage")
self.webdriver_options.add_argument("--remote-debugging-port=9222")
self.driver: Union[Chrome, None] = None
self.wait: Union[WebDriverWait, None] = None
self.get_driver()
def get_driver(self, timeout: int = 5):
self.cleanup()
self.driver = Chrome(
service=self.webdriver_service,
options=self.webdriver_options,
)
self.wait = WebDriverWait(self.driver, timeout)
def cleanup(self):
if self.driver:
self.driver.quit()
def perform_auth(self) -> str:
# Navigate to OAuth link
self.driver.get(self.oauth_link)
# Fill out username and click to next step
username_el = self.driver.find_element(By.NAME, "loginfmt")
username_el.send_keys(self.username)
next_btn = self.driver.find_element(By.ID, "idSIButton9")
next_btn.click()
# Wait for sign in button to become clickable & fill out password & continue
self.wait.until(ec.element_to_be_clickable((By.ID, "idSIButton9")))
password_el = self.driver.find_element(By.NAME, "passwd")
password_el.send_keys(self.password)
signin_btn = self.driver.find_element(By.ID, "idSIButton9")
signin_btn.click()
# Wait until redirect has finished, then return Auth cookie
self.wait.until(ec.url_to_be(self.oauth_dest))
cookie = self.driver.get_cookie("Auth")
if cookie:
self.cleanup()
return cookie["value"]
# Loop if failure, this is common for this service
self.get_driver()
return self.perform_auth()
| 40.351351 | 107 | 0.689551 |
4c5254ffc3773c3ca91fa76b55ed873e0e71d7bc | 6,970 | py | Python | python/rootba/latex/results_table.py | zeta1999/rootba | d1a680a88980d7ac57cf2ff7459d00ac1cab6c9a | [
"BSD-3-Clause"
] | 139 | 2021-06-20T17:20:44.000Z | 2022-03-30T01:15:38.000Z | python/rootba/latex/results_table.py | zeta1999/rootba | d1a680a88980d7ac57cf2ff7459d00ac1cab6c9a | [
"BSD-3-Clause"
] | 5 | 2021-07-10T11:51:08.000Z | 2022-01-01T00:05:39.000Z | python/rootba/latex/results_table.py | NikolausDemmel/rootba | 0762f36a0afa7196709bd0fa147ae75ee7c7632c | [
"BSD-3-Clause"
] | 20 | 2021-06-22T03:33:30.000Z | 2022-03-28T11:41:54.000Z | #
# BSD 3-Clause License
#
# This file is part of the RootBA project.
# https://github.com/NikolausDemmel/rootba
#
# Copyright (c) 2021, Nikolaus Demmel.
# All rights reserved.
#
import numbers
import os
import math
import numpy as np
from pylatex import Subsection, Tabular, TextColor
from pylatex import MultiRow, FootnoteText
from pylatex.utils import italic, bold, NoEscape, escape_latex, dumps_list
from .containers import ExperimentsTable
from .util import format_ratio_percent
from .util import best_two_non_repeating
class ResultsTable(ExperimentsTable):
def __init__(self, exps, spec, show_values_failed_runs, seq_displayname_mapping, export_basepath):
super().__init__(exps, spec, show_values_failed_runs, seq_displayname_mapping, export_basepath)
self.doit()
def doit(self):
is_multirow = self.num_metrics > 1 and self.spec.multirow
def render_metric(value, best, second, decimals, format_string, highlight_top, relative_to):
if isinstance(value, numbers.Number):
if relative_to is None or relative_to == 0 or not np.isfinite(relative_to):
# absolute number
rendered = format_string.format(value, prec=decimals)
else:
# percent
rendered = format_ratio_percent(value, relative_to, decimals=decimals)
if highlight_top:
if value == best:
rendered = bold(rendered)
elif value == second:
rendered = italic(rendered)
return rendered
else:
return value
if self.spec.export_latex:
row_height = None
else:
row_height = 0.65 if is_multirow and self.num_metrics >= 3 else 1
column_spec = '|r' if self.spec.vertical_bars else 'r'
t = Tabular('l' + column_spec * self.num_exps, row_height=row_height, pos=['t'])
escape_header_fun = lambda text: text if self.spec.escape_latex_header else NoEscape(text)
if self.spec.rotate_header:
t.add_row([''] + [
NoEscape(r"\rotatebox{90}{%s}" % escape_latex(escape_header_fun(s.display_name(self.exps[s.name]))))
for s in self.experiment_specs
])
else:
t.add_row([''] + [escape_header_fun(s.display_name(self.exps[s.name])) for s in self.experiment_specs])
t.add_hline()
for seq in self.seq_names:
fails = [self.is_failed(self.exps[s.name], seq) for s in self.experiment_specs]
failure_strings = [self.render_failure(self.exps[s.name], seq) for s in self.experiment_specs]
values = np.array([self.get_metrics(self.exps[s.name], seq, s.it) for s in self.experiment_specs])
top_values = list(range(self.num_metrics))
for c, m in enumerate(self.metrics):
try:
values[:, c] = np.around(values[:, c], m.decimals)
except IndexError:
pass
non_excluded_values = np.array(values[:, c])
for i in m.exclude_columns_highlight:
non_excluded_values[i] = math.nan
top_values[c] = best_two_non_repeating(non_excluded_values, reverse=m.larger_is_better)
if is_multirow:
rows = [[MultiRow(self.num_metrics, data=self.seq_displayname(seq))]
] + [list(['']) for _ in range(1, self.num_metrics)]
else:
rows = [[self.seq_displayname(seq)]]
for c, (fail, failure_str, value_col) in enumerate(zip(fails, failure_strings, values)):
if failure_str is not None:
if self.spec.color_failed:
failure_str = TextColor(self.spec.color_failed, failure_str)
if is_multirow:
rows[0].append(MultiRow(self.num_metrics, data=failure_str))
for r in range(1, self.num_metrics):
rows[r].append('')
else:
rows[0].append(failure_str)
else:
tmp_data = [None] * self.num_metrics
for r, m in enumerate(self.metrics):
if m.failed_threshold and value_col[r] > m.failed_threshold:
obj = "x"
if self.spec.color_failed:
obj = TextColor(self.spec.color_failed, obj)
else:
relative_to = None
if m.relative_to_column is not None and m.relative_to_column != c:
relative_to = values[m.relative_to_column, r]
obj = render_metric(value_col[r],
top_values[r][0],
top_values[r][1],
m.effective_display_decimals(),
m.format_string,
m.highlight_top,
relative_to=relative_to)
if fail and self.spec.color_failed:
obj = TextColor(self.spec.color_failed, obj)
tmp_data[r] = obj
if self.num_metrics == 1 or is_multirow:
for r, obj in enumerate(tmp_data):
rows[r].append(obj)
else:
entry = []
for v in tmp_data:
entry.append(v)
entry.append(NoEscape("~/~"))
entry.pop()
rows[0].append(dumps_list(entry))
for row in rows:
t.add_row(row)
if is_multirow:
t.add_hline()
if self.spec.export_latex:
os.makedirs(self.export_basepath, exist_ok=True)
t.generate_tex(os.path.join(self.export_basepath, self.spec.export_latex))
with self.create(Subsection(self.spec.name, numbering=False)) as p:
if self.spec.metrics_legend:
legend = Tabular('|c|', row_height=row_height, pos=['t'])
legend.add_hline()
legend.add_row(["Metrics"])
legend.add_hline()
for m in self.metrics:
legend.add_row([m.display_name])
legend.add_hline()
tab = Tabular("ll")
tab.add_row([t, legend])
content = tab
else:
content = t
if True:
content = FootnoteText(content)
p.append(content)
| 42.5 | 116 | 0.521664 |
a43df9db57fcdc6460f57512677600052c4f8c11 | 1,364 | py | Python | final/170401073/client.py | hasan-se/blm304 | 893d15282497a426ff96b0c8b6c77d57c406742e | [
"Unlicense"
] | 1 | 2021-05-04T21:46:08.000Z | 2021-05-04T21:46:08.000Z | final/170401073/client.py | hasan-se/blm304 | 893d15282497a426ff96b0c8b6c77d57c406742e | [
"Unlicense"
] | null | null | null | final/170401073/client.py | hasan-se/blm304 | 893d15282497a426ff96b0c8b6c77d57c406742e | [
"Unlicense"
] | null | null | null | #Mehmet Salih Çelik 170401073
import socket
import datetime
import os
import time
import sys
ipiste=str(input("Sunucunun ip adresini giriniz :"))
HOST = ipiste
PORT = 142
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
a=time.time()
data = s.recv(1024)
b=time.time()
data=data.decode("utf-8")
def utchesapla():
x = datetime.datetime.now()
y = str(x.astimezone().timetz())
z = y.strip()[15:18]
if z[1]=="0":
z=int(z[0]+z[2])
else:
z=int(z[0]+z[1]+z[2])
return z
z=int(utchesapla())
x=float(data.split()[0])/1000
y=int(data.split()[2])-z
t=y*3600
x=x+t
gecikme=b-a
gecikmelizaman=x+(gecikme)
ayarla=datetime.datetime.fromtimestamp(x)
ayarla=str(ayarla)
ayarla2=datetime.datetime.fromtimestamp(gecikmelizaman)
ayarla2=str(ayarla2)
print("Sunucudan alinan zaman verisi (ms): ",data.split()[0])
print("Alinan verinin tarihe cevrilmisi :",ayarla)
print("Veri alisverisinde yasanan gecikme ",gecikme," saniye")
print("Gecikme eklenerek hesaplanan zaman : ",datetime.datetime.fromtimestamp(gecikmelizaman))
komut="sudo date -s "+'"'+ayarla2+'"'
komut2="timedatectl set-ntp false"
komut3="sudo hwclock -w"
os.system(komut)
os.system(komut2)
os.system(komut3)
print("Client saati ",ayarla2," olarak guncellendi")
| 24.357143 | 95 | 0.670088 |
0284231d28993a9e9487209754aaef984b06b8ab | 432 | py | Python | hca/dss/cli.py | mshadbolt/dcp-cli | 4d844f3c3a299162c68e25e9ffc6ffe7e8bf7ce8 | [
"MIT"
] | null | null | null | hca/dss/cli.py | mshadbolt/dcp-cli | 4d844f3c3a299162c68e25e9ffc6ffe7e8bf7ce8 | [
"MIT"
] | null | null | null | hca/dss/cli.py | mshadbolt/dcp-cli | 4d844f3c3a299162c68e25e9ffc6ffe7e8bf7ce8 | [
"MIT"
] | null | null | null | from . import DSSClient
def add_commands(subparsers, help_menu=False):
dss_parser = subparsers.add_parser('dss', help="Interact with the HCA Data Storage System")
def help(args):
dss_parser.print_help()
dss_parser.set_defaults(entry_point=help)
dss_subparsers = dss_parser.add_subparsers()
dss_cli_client = DSSClient()
dss_cli_client.build_argparse_subparsers(dss_subparsers, help_menu=help_menu)
| 33.230769 | 95 | 0.763889 |
c279bd7a3be9883d55576c1ca57228b66b78d412 | 4,083 | py | Python | unsupervised_learning/partitioning_around_medoids.py | SuccessionEcologicalServices/ML-From-Scratch | d10c898d2376aa062257321fdfd2c57bf74d5237 | [
"MIT"
] | 1 | 2018-05-01T19:40:45.000Z | 2018-05-01T19:40:45.000Z | unsupervised_learning/partitioning_around_medoids.py | elmerehbi/ML-From-Scratch | 6d5b655f76513a4f19b236a683c3d5028014e951 | [
"MIT"
] | null | null | null | unsupervised_learning/partitioning_around_medoids.py | elmerehbi/ML-From-Scratch | 6d5b655f76513a4f19b236a683c3d5028014e951 | [
"MIT"
] | 1 | 2017-06-13T19:39:14.000Z | 2017-06-13T19:39:14.000Z | import sys, os, math, random
from sklearn import datasets
import numpy as np
# Import helper functions
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, dir_path + "/../utils")
from data_manipulation import normalize
from data_operation import euclidean_distance
sys.path.insert(0, dir_path + "/../unsupervised_learning/")
from principal_component_analysis import PCA
class PAM():
def __init__(self, k=2):
self.k = k
# Initialize the medoids as random samples
def _init_random_medoids(self, X):
n_samples = np.shape(X)[0]
n_features = np.shape(X)[1]
medoids = np.zeros((self.k, n_features))
for i in range(self.k):
medoid = X[np.random.choice(range(n_samples))]
medoids[i] = medoid
return medoids
# Return the index of the closest medoid to the sample
def _closest_medoid(self, sample, medoids):
closest_i = None
closest_distance = float("inf")
for i, medoid in enumerate(medoids):
distance = euclidean_distance(sample, medoid)
if distance < closest_distance:
closest_i = i
closest_distance = distance
return closest_i
# Assign the samples to the closest medoids to create clusters
def _create_clusters(self, X, medoids):
clusters = [[] for _ in range(self.k)]
for sample_i, sample in enumerate(X):
medoid_i = self._closest_medoid(sample, medoids)
clusters[medoid_i].append(sample_i)
return clusters
# Calculate the cost (total distance between samples and their medoids)
def _calculate_cost(self, X, clusters, medoids):
cost = 0
# For each cluster
for i, cluster in enumerate(clusters):
medoid = medoids[i]
for sample_i in cluster:
# Add distance between sample and medoid as cost
cost += euclidean_distance(X[sample_i], medoid)
return cost
# Returns a list of all samples that are not currently medoids
def _get_non_medoids(self, X, medoids):
non_medoids = []
for sample in X:
if not sample in medoids:
non_medoids.append(sample)
return non_medoids
# Classify samples as the index of their clusters
def _get_cluster_labels(self, clusters, X):
# One prediction for each sample
y_pred = np.zeros(np.shape(X)[0])
for cluster_i in range(len(clusters)):
cluster = clusters[cluster_i]
for sample_i in cluster:
y_pred[sample_i] = cluster_i
return y_pred
# Do Partitioning Around Medoids and return the cluster labels
def predict(self, X):
# Initialize medoids randomly
medoids = self._init_random_medoids(X)
# Assign samples to closest medoids
clusters = self._create_clusters(X, medoids)
# Set initial cost to inf
cost = float("inf")
# Calculate the initial cost (total distance between samples and corresponding medoids)
new_cost = self._calculate_cost(X, clusters, medoids)
swap = False
# Iterate until we no longer have a cheaper cost
while new_cost < cost:
cost = new_cost
for medoid in medoids:
# Get all non-medoid samples
non_medoids = self._get_non_medoids(X, medoids)
# Calculate the cost when swapping medoid and samples
for sample in non_medoids:
# Swap sample with the medoid
new_medoids = medoids.copy()
new_medoids[medoids == medoid] = sample
# Assign samples to new medoids
new_clusters = self._create_clusters(X, new_medoids)
# Calculate the cost with the new set of medoids
_new_cost = self._calculate_cost(X, new_clusters, new_medoids)
# If the swap gives us a lower cost start over with new medoids
if _new_cost < new_cost:
new_cost = _new_cost
medoids = new_medoids
swap = True
break
# If there was a swap start over
if swap:
swap = False
break
# Return the samples cluster indices as labels
return self._get_cluster_labels(clusters, X)
# Demo
def main():
# Load the dataset
X, y = datasets.make_blobs()
# Cluster the data using K-Medoids
clf = PAM(k=3)
y_pred = clf.predict(X)
# Project the data onto the 2 primary principal components
pca = PCA()
pca.plot_in_2d(X, y_pred)
pca.plot_in_2d(X, y)
if __name__ == "__main__": main()
| 29.586957 | 89 | 0.715895 |
1b485b8ff41fe9ceb78d997db282f2edca4d6656 | 303 | py | Python | src/python/Problem097.py | mchrzanowski/ProjectEuler | 06a24cadbd2c38fb42c3935779fc7ffb6de4e1b5 | [
"MIT"
] | null | null | null | src/python/Problem097.py | mchrzanowski/ProjectEuler | 06a24cadbd2c38fb42c3935779fc7ffb6de4e1b5 | [
"MIT"
] | null | null | null | src/python/Problem097.py | mchrzanowski/ProjectEuler | 06a24cadbd2c38fb42c3935779fc7ffb6de4e1b5 | [
"MIT"
] | null | null | null | '''
Created on Jan 22, 2012
@author: mchrzanowski
'''
from time import time
LIMIT = 10
def main():
start = time()
print "Last ten digits: ", (28433 * 2 ** 7830457 + 1) % 10 ** LIMIT
end = time()
print "Runtime: ", end - start, " seconds. "
if __name__ == '__main__':
main() | 15.947368 | 71 | 0.567657 |
bec5e54ea1d82c721dccfee82f6c4271155e2930 | 12,484 | py | Python | google/cloud/aiplatform_v1/services/prediction_service/async_client.py | geraint0923/python-aiplatform | f40f32289e1fbeb93b35e4b66f65d15528a6481c | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1/services/prediction_service/async_client.py | geraint0923/python-aiplatform | f40f32289e1fbeb93b35e4b66f65d15528a6481c | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1/services/prediction_service/async_client.py | geraint0923/python-aiplatform | f40f32289e1fbeb93b35e4b66f65d15528a6481c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1.types import prediction_service
from google.protobuf import struct_pb2 # type: ignore
from .transports.base import PredictionServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import PredictionServiceGrpcAsyncIOTransport
from .client import PredictionServiceClient
class PredictionServiceAsyncClient:
"""A service for online predictions and explanations."""
_client: PredictionServiceClient
DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT
endpoint_path = staticmethod(PredictionServiceClient.endpoint_path)
parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path)
common_billing_account_path = staticmethod(
PredictionServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
PredictionServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(PredictionServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
PredictionServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
PredictionServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
PredictionServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(PredictionServiceClient.common_project_path)
parse_common_project_path = staticmethod(
PredictionServiceClient.parse_common_project_path
)
common_location_path = staticmethod(PredictionServiceClient.common_location_path)
parse_common_location_path = staticmethod(
PredictionServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PredictionServiceAsyncClient: The constructed client.
"""
return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PredictionServiceAsyncClient: The constructed client.
"""
return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> PredictionServiceTransport:
"""Return the transport used by the client instance.
Returns:
PredictionServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, PredictionServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the prediction service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.PredictionServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = PredictionServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def predict(
self,
request: prediction_service.PredictRequest = None,
*,
endpoint: str = None,
instances: Sequence[struct_pb2.Value] = None,
parameters: struct_pb2.Value = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> prediction_service.PredictResponse:
r"""Perform an online prediction.
Args:
request (:class:`google.cloud.aiplatform_v1.types.PredictRequest`):
The request object. Request message for
[PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict].
endpoint (:class:`str`):
Required. The name of the Endpoint requested to serve
the prediction. Format:
``projects/{project}/locations/{location}/endpoints/{endpoint}``
This corresponds to the ``endpoint`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
instances (:class:`Sequence[google.protobuf.struct_pb2.Value]`):
Required. The instances that are the input to the
prediction call. A DeployedModel may have an upper limit
on the number of instances it supports per request, and
when it is exceeded the prediction call errors in case
of AutoML Models, or, in case of customer created
Models, the behaviour is as documented by that Model.
The schema of any single instance may be specified via
Endpoint's DeployedModels'
[Model's][google.cloud.aiplatform.v1.DeployedModel.model]
[PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
[instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri].
This corresponds to the ``instances`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
parameters (:class:`google.protobuf.struct_pb2.Value`):
The parameters that govern the prediction. The schema of
the parameters may be specified via Endpoint's
DeployedModels' [Model's
][google.cloud.aiplatform.v1.DeployedModel.model]
[PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
[parameters_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.parameters_schema_uri].
This corresponds to the ``parameters`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.PredictResponse:
Response message for
[PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([endpoint, instances, parameters])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = prediction_service.PredictRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if endpoint is not None:
request.endpoint = endpoint
if parameters is not None:
request.parameters = parameters
if instances:
request.instances.extend(instances)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.predict,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("PredictionServiceAsyncClient",)
| 44.585714 | 146 | 0.675665 |
8c3648c743043666e6bedf9ae44a8dcdbf8c5bf4 | 782 | py | Python | Gems/Atom/RPI/Tools/atom_rpi_tools/utils.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-13T00:01:12.000Z | 2021-09-13T00:01:12.000Z | Gems/Atom/RPI/Tools/atom_rpi_tools/utils.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | null | null | null | Gems/Atom/RPI/Tools/atom_rpi_tools/utils.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-07-20T11:07:25.000Z | 2021-07-20T11:07:25.000Z | """
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os.path
from os import path
import shutil
import json
def find_or_copy_file(destFilePath, sourceFilePath):
if path.exists(destFilePath):
return
if not path.exists(sourceFilePath):
raise ValueError('find_or_copy_file: source file [', sourceFilePath, '] doesn\'t exist')
return
dstDir = path.dirname(destFilePath)
if not path.isdir(dstDir):
os.makedirs(dstDir)
shutil.copyfile(sourceFilePath, destFilePath)
def load_json_file(filePath):
file_stream = open(filePath, "r")
return json.load(file_stream)
| 28.962963 | 155 | 0.726343 |
b71275b0e2d9c1fa8459e5d9329358fb2d897c23 | 10,096 | py | Python | examples/text_classification/train.py | Qin-Folks/text | cb1ca5b29c794d87e8351b4b29f8f7544ee69794 | [
"BSD-3-Clause"
] | null | null | null | examples/text_classification/train.py | Qin-Folks/text | cb1ca5b29c794d87e8351b4b29f8f7544ee69794 | [
"BSD-3-Clause"
] | null | null | null | examples/text_classification/train.py | Qin-Folks/text | cb1ca5b29c794d87e8351b4b29f8f7544ee69794 | [
"BSD-3-Clause"
] | null | null | null | import os
import logging
import argparse
import torch
import sys
from torchtext.datasets import text_classification
from torch.utils.data import DataLoader
from model import TextSentiment
from torch.utils.data.dataset import random_split
r"""
This file shows the training process of the text classification model.
"""
def generate_batch(batch):
r"""
Since the text entries have different lengths, a custom function
generate_batch() is used to generate data batches and offsets,
which are compatible with EmbeddingBag. The function is passed
to 'collate_fn' in torch.utils.data.DataLoader. The input to
'collate_fn' is a list of tensors with the size of batch_size,
and the 'collate_fn' function packs them into a mini-batch.
Pay attention here and make sure that 'collate_fn' is declared
as a top level def. This ensures that the function is available
in each worker.
Output:
text: the text entries in the data_batch are packed into a list and
concatenated as a single tensor for the input of nn.EmbeddingBag.
offsets: the offsets is a tensor of delimiters to represent the beginning
index of the individual sequence in the text tensor.
cls: a tensor saving the labels of individual text entries.
"""
label = torch.tensor([entry[0] for entry in batch])
text = [entry[1] for entry in batch]
offsets = [0] + [len(entry) for entry in text]
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
text = torch.cat(text)
return text, offsets, label
r"""
torch.utils.data.DataLoader is recommended for PyTorch users to load data.
We use DataLoader here to load datasets and send it to the train_and_valid()
and text() functions.
"""
def own_softmax(x, label_proportions):
if not isinstance(label_proportions, torch.Tensor):
label_proportions = torch.tensor(label_proportions).to('cuda')
x_exp = torch.exp(x)
# Switch these two
weighted_x_exp = x_exp * label_proportions
# weighted_x_exp = x_exp
x_exp_sum = torch.sum(weighted_x_exp, 1, keepdim=True)
return x_exp / x_exp_sum
def train_and_valid(lr_, sub_train_, sub_valid_):
r"""
We use a SGD optimizer to train the model here and the learning rate
decreases linearly with the progress of the training process.
Arguments:
lr_: learning rate
sub_train_: the data used to train the model
sub_valid_: the data used for validation
"""
optimizer = torch.optim.SGD(model.parameters(), lr=lr_)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=args.lr_gamma)
train_data = DataLoader(sub_train_, batch_size=batch_size, shuffle=True,
collate_fn=generate_batch, num_workers=args.num_workers)
num_lines = num_epochs * len(train_data)
for epoch in range(num_epochs):
# Train the model
for i, (text, offsets, cls) in enumerate(train_data):
optimizer.zero_grad()
text, offsets, cls = text.to(device), offsets.to(device), cls.to(device)
output = model(text, offsets)
log_softmax_ = torch.log(own_softmax(output, train_label_prop) + 1e-5)
loss = criterion(log_softmax_, cls)
loss.backward()
optimizer.step()
processed_lines = i + len(train_data) * epoch
progress = processed_lines / float(num_lines)
if processed_lines % 128 == 0:
sys.stderr.write(
"\rProgress: {:3.0f}% lr: {:3.3f} loss: {:3.3f}".format(
progress * 100, scheduler.get_lr()[0], loss))
# Adjust the learning rate
scheduler.step()
# Test the model on valid set
print("")
print("Valid - Accuracy: {}".format(test(sub_valid_)))
def test(data_):
r"""
Arguments:
data_: the data used to train the model
"""
confusion_matrix = torch.zeros(train_label_prop.shape[0], train_label_prop.shape[0])
data = DataLoader(data_, batch_size=batch_size, collate_fn=generate_batch)
total_accuracy = []
for text, offsets, cls in data:
text, offsets, cls = text.to(device), offsets.to(device), cls.to(device)
with torch.no_grad():
output = model(text, offsets)
log_softmax_ = torch.log(own_softmax(output, train_label_prop) + 1e-5)
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
accuracy = (log_softmax_.argmax(1) == cls).float().mean().item()
total_accuracy.append(accuracy)
for t, p in zip(cls.view(-1), pred.view(-1)):
confusion_matrix[t.long(), p.long()] += 1
# In case that nothing in the dataset
if total_accuracy == []:
return 0.0
print('confusion_matrix.sum(1): ', confusion_matrix.sum(1))
per_class_acc = confusion_matrix.diag() / confusion_matrix.sum(1)
print('per class accuracy: ', per_class_acc)
print('macro avg accuracy: ', torch.mean(per_class_acc))
return sum(total_accuracy) / len(total_accuracy)
def get_prop_tensor(label_count_dict):
rtn = []
for a_key in sorted(label_count_dict.keys()):
rtn.append(label_count_dict[a_key])
return torch.tensor(rtn).to(device=args.device)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Train a text classification model on text classification datasets.')
parser.add_argument('dataset', choices=text_classification.DATASETS)
parser.add_argument('--num-epochs', type=int, default=5,
help='num epochs (default=5)')
parser.add_argument('--embed-dim', type=int, default=32,
help='embed dim. (default=32)')
parser.add_argument('--batch-size', type=int, default=16,
help='batch size (default=16)')
parser.add_argument('--split-ratio', type=float, default=0.95,
help='train/valid split ratio (default=0.95)')
parser.add_argument('--lr', type=float, default=4.0,
help='learning rate (default=4.0)')
parser.add_argument('--lr-gamma', type=float, default=0.8,
help='gamma value for lr (default=0.8)')
parser.add_argument('--ngrams', type=int, default=2,
help='ngrams (default=2)')
parser.add_argument('--num-workers', type=int, default=1,
help='num of workers (default=1)')
parser.add_argument('--device', default='cpu',
help='device (default=cpu)')
parser.add_argument('--data', default='.data',
help='data directory (default=.data)')
parser.add_argument('--use-sp-tokenizer', type=bool, default=False,
help='use sentencepiece tokenizer (default=False)')
parser.add_argument('--sp-vocab-size', type=int, default=20000,
help='vocab size in sentencepiece model (default=20000)')
parser.add_argument('--dictionary',
help='path to save vocab')
parser.add_argument('--save-model-path',
help='path for saving model')
parser.add_argument('--logging-level', default='WARNING',
help='logging level (default=WARNING)')
args = parser.parse_args()
args.num_epochs = 20
num_epochs = args.num_epochs
embed_dim = args.embed_dim
batch_size = args.batch_size
lr = args.lr
device = args.device
data = args.data
split_ratio = args.split_ratio
# two args for sentencepiece tokenizer
use_sp_tokenizer = args.use_sp_tokenizer
sp_vocab_size = args.sp_vocab_size
logging.basicConfig(level=getattr(logging, args.logging_level))
if not os.path.exists(data):
print("Creating directory {}".format(data))
os.mkdir(data)
if use_sp_tokenizer:
import spm_dataset
train_dataset, test_dataset = spm_dataset.setup_datasets(args.dataset,
root='.data',
vocab_size=sp_vocab_size)
model = TextSentiment(sp_vocab_size, embed_dim,
len(train_dataset.get_labels())).to(device)
else:
train_dataset, test_dataset = text_classification.DATASETS[args.dataset](
root=data, ngrams=args.ngrams)
model = TextSentiment(len(train_dataset.get_vocab()),
embed_dim, len(train_dataset.get_labels())).to(device)
train_cls_prop_dict = train_dataset.cls_prop_dict
test_cls_prop_dict = test_dataset.cls_prop_dict
train_label_prop = get_prop_tensor(train_cls_prop_dict)
test_label_prop = get_prop_tensor(test_cls_prop_dict)
# criterion = torch.nn.CrossEntropyLoss().to(device)
criterion = torch.nn.NLLLoss().to(device)
# split train_dataset into train and valid
train_len = int(len(train_dataset) * split_ratio)
sub_train_, sub_valid_ = \
random_split(train_dataset, [train_len, len(train_dataset) - train_len])
# # Label statistics
# label_dict = {}
# all_train_data = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
# collate_fn=generate_batch, num_workers=args.num_workers)
# for i, (text, offsets, cls) in enumerate(all_train_data):
# for a_cls in cls:
# if a_cls.item() not in label_dict.keys():
# label_dict[a_cls.item()] = 1
# else:
# label_dict[a_cls.item()] += 1
# print(label_dict)
train_and_valid(lr, sub_train_, sub_valid_)
print("Test - Accuracy: {}".format(test(test_dataset)))
if args.save_model_path:
print("Saving model to {}".format(args.save_model_path))
torch.save(model.to('cpu'), args.save_model_path)
if args.dictionary is not None:
print("Save vocab to {}".format(args.dictionary))
torch.save(train_dataset.get_vocab(), args.dictionary)
| 39.905138 | 97 | 0.641343 |
1ea709cb526c6748b23ee8e96a7a0916fb1594c6 | 2,314 | py | Python | setup.py | adamnovak/PyVCF | b8c0af7ee8382297aaf41409a359410263df5f0c | [
"MIT"
] | null | null | null | setup.py | adamnovak/PyVCF | b8c0af7ee8382297aaf41409a359410263df5f0c | [
"MIT"
] | null | null | null | setup.py | adamnovak/PyVCF | b8c0af7ee8382297aaf41409a359410263df5f0c | [
"MIT"
] | null | null | null | from setuptools import setup
from distutils.core import setup
from distutils.extension import Extension
try:
from Cython.Distutils import build_ext
CYTHON = True
except:
CYTHON = False
requires = []
# python 2.6 does not have argparse
try:
import argparse
except ImportError:
requires.append('argparse')
import collections
try:
collections.Counter
except AttributeError:
requires.append('counter')
try:
collections.OrderedDict
except AttributeError:
requires.append('ordereddict')
# get the version without an import
VERSION = "Undefined"
DOC = ""
inside_doc = False
for line in open('vcf/__init__.py'):
if "'''" in line:
inside_doc = not inside_doc
if inside_doc:
DOC += line.replace("'''", "")
if (line.startswith('VERSION')):
exec(line.strip())
extras = {}
if CYTHON:
extras['cmdclass'] = {'build_ext': build_ext}
extras['ext_modules'] = [Extension("vcf.cparse", ["vcf/cparse.pyx"])]
setup(
name='PyVCF',
packages=['vcf', 'vcf.test'],
scripts=['scripts/vcf_melt', 'scripts/vcf_filter.py'],
author='James Casbon and @jdoughertyii',
author_email='casbon@gmail.com',
description='Variant Call Format (VCF) parser for Python',
long_description=DOC,
test_suite='vcf.test.test_vcf.suite',
install_requires=['distribute'],
requires=requires,
entry_points = {
'vcf.filters': [
'site_quality = vcf.filters:SiteQuality',
'vgq = vcf.filters:VariantGenotypeQuality',
'eb = vcf.filters:ErrorBiasFilter',
'dps = vcf.filters:DepthPerSample',
'avg-dps = vcf.filters:AvgDepthPerSample',
'snp-only = vcf.filters:SnpOnly',
]
},
url='https://github.com/jamescasbon/PyVCF',
version=VERSION,
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering',
],
keywords='bioinformatics',
use_2to3=True,
include_package_data=True,
package_data = {
'': ['*.vcf', '*.gz', '*.tbi'],
},
**extras
)
| 26.597701 | 73 | 0.632239 |
90dcb9647b5b1bdc6d25ab689d26c967b9576abb | 1,222 | py | Python | modoboa/lib/migrations/0002_rename_parameters.py | HarshCasper/modoboa | a00baa0593107992f545ee3e89cd4346b9615a96 | [
"0BSD"
] | 1,602 | 2016-12-15T14:25:34.000Z | 2022-03-31T16:49:25.000Z | modoboa/lib/migrations/0002_rename_parameters.py | sebageek/modoboa | 57f5d57ea60a57e8dcac970085dfc07082481fc6 | [
"0BSD"
] | 1,290 | 2016-12-14T15:39:05.000Z | 2022-03-31T13:49:09.000Z | modoboa/lib/migrations/0002_rename_parameters.py | sebageek/modoboa | 57f5d57ea60a57e8dcac970085dfc07082481fc6 | [
"0BSD"
] | 272 | 2016-12-22T11:58:18.000Z | 2022-03-17T15:57:24.000Z | from django.db import models, migrations
APPLICATIONS = [
("admin", "modoboa_admin"),
("amavis", "modoboa_amavis"),
("limits", "modoboa_admin_limits"),
("postfix_autoreply", "modoboa_postfix_autoreply"),
("postfix_relay_domains", "modoboa_admin_relaydomains"),
("radicale", "modoboa_radicale"),
("stats", "modoboa_stats"),
("sievefilters", "modoboa_sievefilters"),
("webmail", "modoboa_webmail"),
]
def rename_app_parameters(app, model):
"""Rename all parameters for a given app."""
qset = model.objects.filter(name__startswith=app[0])
for param in qset:
param.name = param.name.replace(
"{}.".format(app[0]), "{}.".format(app[1])
)
param.save()
def rename_parameters(apps, schema_editor):
"""Rename old parameters."""
Parameter = apps.get_model("lib", "Parameter")
UserParameter = apps.get_model("lib", "UserParameter")
for app in APPLICATIONS:
rename_app_parameters(app, Parameter)
rename_app_parameters(app, UserParameter)
class Migration(migrations.Migration):
dependencies = [
('lib', '0001_initial'),
]
operations = [
migrations.RunPython(rename_parameters)
]
| 28.418605 | 60 | 0.651391 |
ac8e94ec74891883deda87c681e00df6aaccff43 | 523 | py | Python | molecules/utils/helper_functions/clear_dir.py | yngtodd/molecules-deprecated | adf9477e6122fa4e92cedbbba26a358ea903e3a4 | [
"MIT"
] | null | null | null | molecules/utils/helper_functions/clear_dir.py | yngtodd/molecules-deprecated | adf9477e6122fa4e92cedbbba26a358ea903e3a4 | [
"MIT"
] | null | null | null | molecules/utils/helper_functions/clear_dir.py | yngtodd/molecules-deprecated | adf9477e6122fa4e92cedbbba26a358ea903e3a4 | [
"MIT"
] | 1 | 2021-11-19T01:57:35.000Z | 2021-11-19T01:57:35.000Z | import os, shutil;
# Directory paths for extract_native-contact
path_1 = "./native-contact/"
path_2 = "./native-contact/raw/"
path_3 = "./native-contact/data/"
# EFFECTS: Deletes raw and data directories and leaves native-contact directory empty.
def empty_directory(dir_path):
print "Emptying directory ..."
for root, dirs, files in os.walk(dir_path):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
print "Empty!"
| 30.764706 | 86 | 0.665392 |
992228510d8298775eb3ca006b9b560215b268fe | 1,414 | py | Python | Graphic/graphic_image_with_js.py | pyecharts/pyecharts_gallery | 8430c37df923860b36c9d1d86f2adc9d94b9d72c | [
"MIT"
] | 759 | 2019-04-28T22:42:10.000Z | 2022-03-31T12:32:10.000Z | Graphic/graphic_image_with_js.py | pyecharts/pyecharts_gallery | 8430c37df923860b36c9d1d86f2adc9d94b9d72c | [
"MIT"
] | 65 | 2019-06-10T07:38:25.000Z | 2022-03-24T10:10:03.000Z | Graphic/graphic_image_with_js.py | pyecharts/pyecharts_gallery | 8430c37df923860b36c9d1d86f2adc9d94b9d72c | [
"MIT"
] | 505 | 2019-04-28T08:45:33.000Z | 2022-03-29T07:08:33.000Z | from pyecharts import options as opts
from pyecharts.charts import Bar, Grid
from pyecharts.faker import Faker
bar = (
Bar(init_opts=opts.InitOpts(chart_id="1234"))
.add_xaxis(Faker.choose())
.add_yaxis("商家A", Faker.values())
.add_yaxis("商家B", Faker.values())
.set_global_opts(
title_opts=opts.TitleOpts(title="Bar-Graphic Image(旋转功能)组件示例"),
graphic_opts=[
opts.GraphicImage(
graphic_item=opts.GraphicItem(
id_="logo", right=20, top=20, z=-10, bounding="raw", origin=[75, 75]
),
graphic_imagestyle_opts=opts.GraphicImageStyleOpts(
image="https://echarts.apache.org/zh/images/favicon.png",
width=150,
height=150,
opacity=0.4,
),
)
],
)
)
c = (
Grid(init_opts=opts.InitOpts(chart_id="1234"))
.add(
chart=bar,
grid_opts=opts.GridOpts(pos_left="5%", pos_right="4%", pos_bottom="5%"),
)
.add_js_funcs(
"""
var rotation = 0;
setInterval(function () {
chart_1234.setOption({
graphic: {
id: 'logo',
rotation: (rotation += Math.PI / 360) % (Math.PI * 2)
}
});
}, 30);
"""
)
.render("graphic_image_with_js.html")
)
| 29.458333 | 88 | 0.509194 |
43527976603141661888e5adf4dd25fd0d0f9b57 | 9,152 | py | Python | tools/tests/test_template_parser.py | Awawdi/zulip | f2e7b92b024cfec6a9babace8298de9537925347 | [
"Apache-2.0"
] | 3 | 2020-11-03T15:18:04.000Z | 2021-02-16T11:49:51.000Z | tools/tests/test_template_parser.py | adb-web-designs/zulip | 1b303e7b2f0271f81265123ad1e7125ed3914d68 | [
"Apache-2.0"
] | 18 | 2021-08-14T01:12:30.000Z | 2022-03-04T20:28:51.000Z | tools/tests/test_template_parser.py | adb-web-designs/zulip | 1b303e7b2f0271f81265123ad1e7125ed3914d68 | [
"Apache-2.0"
] | 1 | 2021-08-29T11:13:05.000Z | 2021-08-29T11:13:05.000Z | import sys
import unittest
from typing import Optional
try:
from tools.lib.template_parser import (
TemplateParserException,
is_django_block_tag,
tokenize,
validate,
)
except ImportError:
print("ERROR!!! You need to run this via tools/test-tools.")
sys.exit(1)
class ParserTest(unittest.TestCase):
def _assert_validate_error(
self,
error: str,
fn: Optional[str] = None,
text: Optional[str] = None,
) -> None:
with self.assertRaisesRegex(TemplateParserException, error):
validate(fn=fn, text=text)
def test_is_django_block_tag(self) -> None:
self.assertTrue(is_django_block_tag("block"))
self.assertFalse(is_django_block_tag("not a django tag"))
def test_validate_vanilla_html(self) -> None:
"""
Verify that validate() does not raise errors for
well-formed HTML.
"""
my_html = """
<table>
<tr>
<td>foo</td>
</tr>
</table>"""
validate(text=my_html)
def test_validate_handlebars(self) -> None:
my_html = """
{{#with stream}}
<p>{{stream}}</p>
{{/with}}
"""
validate(text=my_html)
def test_validate_comment(self) -> None:
my_html = """
<!---
<h1>foo</h1>
-->"""
validate(text=my_html)
def test_validate_django(self) -> None:
my_html = """
{% include "some_other.html" %}
{% if foo %}
<p>bar</p>
{% endif %}
"""
validate(text=my_html)
my_html = """
{% block "content" %}
{% with className="class" %}
{% include 'foobar' %}
{% endwith %}
{% endblock %}
"""
validate(text=my_html)
def test_validate_no_start_tag(self) -> None:
my_html = """
foo</p>
"""
self._assert_validate_error("No start tag", text=my_html)
def test_validate_mismatched_tag(self) -> None:
my_html = """
<b>foo</i>
"""
self._assert_validate_error(r"Mismatched tags: \(b != i\)", text=my_html)
def test_validate_bad_indentation(self) -> None:
my_html = """
<p>
foo
</p>
"""
self._assert_validate_error("Indentation for start/end tags does not match.", text=my_html)
def test_validate_state_depth(self) -> None:
my_html = """
<b>
"""
self._assert_validate_error("Missing end tag", text=my_html)
def test_validate_incomplete_handlebars_tag_1(self) -> None:
my_html = """
{{# foo
"""
self._assert_validate_error(
'''Tag missing "}}" at line 2 col 13:"{{# foo
"''',
text=my_html,
)
def test_validate_incomplete_handlebars_tag_2(self) -> None:
my_html = """
{{# foo }
"""
self._assert_validate_error('Tag missing "}}" at line 2 col 13:"{{# foo }\n"', text=my_html)
def test_validate_incomplete_django_tag_1(self) -> None:
my_html = """
{% foo
"""
self._assert_validate_error(
'''Tag missing "%}" at line 2 col 13:"{% foo
"''',
text=my_html,
)
def test_validate_incomplete_django_tag_2(self) -> None:
my_html = """
{% foo %
"""
self._assert_validate_error('Tag missing "%}" at line 2 col 13:"{% foo %\n"', text=my_html)
def test_validate_incomplete_html_tag_1(self) -> None:
my_html = """
<b
"""
self._assert_validate_error(
'''Tag missing ">" at line 2 col 13:"<b
"''',
text=my_html,
)
def test_validate_incomplete_html_tag_2(self) -> None:
my_html = """
<a href="
"""
my_html1 = """
<a href=""
"""
self._assert_validate_error(
'''Tag missing ">" at line 2 col 13:"<a href=""
"''',
text=my_html1,
)
self._assert_validate_error(
'''Unbalanced quotes at line 2 col 13:"<a href="
"''',
text=my_html,
)
def test_validate_empty_html_tag(self) -> None:
my_html = """
< >
"""
self._assert_validate_error("Tag name missing", text=my_html)
def test_code_blocks(self) -> None:
# This is fine.
my_html = """
<code>
x = 5
y = x + 1
</code>"""
validate(text=my_html)
# This is also fine.
my_html = "<code>process_widgets()</code>"
validate(text=my_html)
# This is illegal.
my_html = """
<code>x =
5</code>
"""
self._assert_validate_error("Code tag is split across two lines.", text=my_html)
def test_anchor_blocks(self) -> None:
# This is allowed, although strange.
my_html = """
<a hef="/some/url">
Click here
for more info.
</a>"""
validate(text=my_html)
# This is fine.
my_html = '<a href="/some/url">click here</a>'
validate(text=my_html)
# Even this is fine.
my_html = """
<a class="twitter-timeline" href="https://twitter.com/ZulipStatus"
data-widget-id="443457763394334720"
data-screen-name="ZulipStatus"
>@ZulipStatus on Twitter</a>.
"""
validate(text=my_html)
def test_validate_jinja2_whitespace_markers_1(self) -> None:
my_html = """
{% if foo -%}
this is foo
{% endif %}
"""
validate(text=my_html)
def test_validate_jinja2_whitespace_markers_2(self) -> None:
my_html = """
{% if foo %}
this is foo
{%- endif %}
"""
validate(text=my_html)
def test_validate_jinja2_whitespace_markers_3(self) -> None:
my_html = """
{% if foo %}
this is foo
{% endif -%}
"""
validate(text=my_html)
def test_validate_jinja2_whitespace_markers_4(self) -> None:
my_html = """
{%- if foo %}
this is foo
{% endif %}
"""
validate(text=my_html)
def test_validate_mismatch_jinja2_whitespace_markers_1(self) -> None:
my_html = """
{% if foo %}
this is foo
{%- if bar %}
"""
self._assert_validate_error("Missing end tag", text=my_html)
def test_validate_jinja2_whitespace_type2_markers(self) -> None:
my_html = """
{%- if foo -%}
this is foo
{% endif %}
"""
validate(text=my_html)
def test_tokenize(self) -> None:
tag = "<!DOCTYPE html>"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "html_doctype")
tag = "<a>bla"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "html_start")
self.assertEqual(token.tag, "a")
tag = "<br />bla"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "html_singleton")
self.assertEqual(token.tag, "br")
tag = "<input>bla"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "html_start") # We later mark this an error.
self.assertEqual(token.tag, "input")
tag = "<input />bla"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "html_singleton")
self.assertEqual(token.tag, "input")
tag = "</a>bla"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "html_end")
self.assertEqual(token.tag, "a")
tag = "{{#with foo}}bla"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "handlebars_start")
self.assertEqual(token.tag, "with")
tag = "{{/with}}bla"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "handlebars_end")
self.assertEqual(token.tag, "with")
tag = "{% if foo %}bla"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "django_start")
self.assertEqual(token.tag, "if")
tag = "{% endif %}bla"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "django_end")
self.assertEqual(token.tag, "if")
tag = "{% if foo -%}bla"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "jinja2_whitespace_stripped_start")
self.assertEqual(token.tag, "if")
tag = "{%- endif %}bla"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "jinja2_whitespace_stripped_end")
self.assertEqual(token.tag, "if")
tag = "{%- if foo -%}bla"
token = tokenize(tag)[0]
self.assertEqual(token.kind, "jinja2_whitespace_stripped_type2_start")
self.assertEqual(token.tag, "if")
| 28.16 | 100 | 0.518247 |
55646953d75af0537b630bd353627a0b1f625c3f | 1,204 | py | Python | python-demos/read_write_files/write_json_file.py | zdenek-nemec/sandbox | 31572cb790869c4c2036c0a0902c1a054a9043d5 | [
"MIT"
] | 1 | 2020-05-31T09:24:51.000Z | 2020-05-31T09:24:51.000Z | python-demos/read_write_files/write_json_file.py | zdenek-nemec/sandbox | 31572cb790869c4c2036c0a0902c1a054a9043d5 | [
"MIT"
] | 33 | 2019-04-21T13:22:03.000Z | 2022-03-06T12:11:18.000Z | python-demos/read_write_files/write_json_file.py | zdenek-nemec/sandbox | 31572cb790869c4c2036c0a0902c1a054a9043d5 | [
"MIT"
] | null | null | null | import json
DEFAULT_FILENAME = "output_test_file.json"
DEFAULT_CONTENT = [
{
"record_id": 1,
"calling": {
"imsi": "230010000000001",
"msisdn": "+420731000001",
"operator": "T-Mobile CZ",
},
"called": {
"imsi": "230010000000002",
"msisdn": "+420731000002",
"operator": "T-Mobile CZ",
},
"start_time": "2020-07-18 11:47:00.123",
"end_time": "2020-07-18 11:48:30.123",
"call_duration_ms": 90000
}, {
"record_id": 2,
"calling": {
"imsi": "230010000000002",
"msisdn": "+420731000002",
"operator": "T-Mobile CZ",
},
"called": {
"msisdn": "+420721000003",
"operator": "O2 CZ",
},
"start_time": "2020-07-18 11:50:00.123",
"end_time": "2020-07-18 11:55:00.123",
"call_duration_ms": 300000
}
]
def main():
filename = DEFAULT_FILENAME
content = DEFAULT_CONTENT;
with open(filename, "w", encoding="utf-8") as json_file:
json.dump(content, json_file, ensure_ascii=False, indent=4)
if __name__ == "__main__":
main()
| 25.083333 | 67 | 0.505814 |
9563db86f4f40acb75a4897e1831ac76bba49024 | 16,918 | py | Python | opensfm/features.py | YonatanSimson/OpenSfM | 358843738359f4b5d767b22df2f3960ded31c981 | [
"BSD-2-Clause"
] | 1 | 2019-05-31T13:50:41.000Z | 2019-05-31T13:50:41.000Z | opensfm/features.py | Pandinosaurus/OpenSfM | b892ba9fd5e7fd6c7a9e3c81edddca80f71c1cd5 | [
"BSD-2-Clause"
] | null | null | null | opensfm/features.py | Pandinosaurus/OpenSfM | b892ba9fd5e7fd6c7a9e3c81edddca80f71c1cd5 | [
"BSD-2-Clause"
] | 2 | 2017-03-31T16:54:34.000Z | 2018-07-10T11:32:22.000Z | """Tools to extract features."""
import logging
import sys
import time
import cv2
import numpy as np
from opensfm import context, pyfeatures
logger = logging.getLogger(__name__)
def resized_image(image, max_size):
"""Resize image to feature_process_size."""
h, w, _ = image.shape
size = max(w, h)
if 0 < max_size < size:
dsize = w * max_size // size, h * max_size // size
return cv2.resize(image, dsize=dsize, interpolation=cv2.INTER_AREA)
else:
return image
def root_feature(desc, l2_normalization=False):
if l2_normalization:
s2 = np.linalg.norm(desc, axis=1)
desc = (desc.T / s2).T
s = np.sum(desc, 1)
desc = np.sqrt(desc.T / s).T
return desc
def root_feature_surf(desc, l2_normalization=False, partial=False):
"""
Experimental square root mapping of surf-like feature, only work for 64-dim surf now
"""
if desc.shape[1] == 64:
if l2_normalization:
s2 = np.linalg.norm(desc, axis=1)
desc = (desc.T / s2).T
if partial:
ii = np.array([i for i in range(64) if (i % 4 == 2 or i % 4 == 3)])
else:
ii = np.arange(64)
desc_sub = np.abs(desc[:, ii])
desc_sub_sign = np.sign(desc[:, ii])
# s_sub = np.sum(desc_sub, 1) # This partial normalization gives slightly better results for AKAZE surf
s_sub = np.sum(np.abs(desc), 1)
desc_sub = np.sqrt(desc_sub.T / s_sub).T
desc[:, ii] = desc_sub * desc_sub_sign
return desc
def normalized_image_coordinates(pixel_coords, width, height):
size = max(width, height)
p = np.empty((len(pixel_coords), 2))
p[:, 0] = (pixel_coords[:, 0] + 0.5 - width / 2.0) / size
p[:, 1] = (pixel_coords[:, 1] + 0.5 - height / 2.0) / size
return p
def denormalized_image_coordinates(norm_coords, width, height):
size = max(width, height)
p = np.empty((len(norm_coords), 2))
p[:, 0] = norm_coords[:, 0] * size - 0.5 + width / 2.0
p[:, 1] = norm_coords[:, 1] * size - 0.5 + height / 2.0
return p
def normalize_features(points, desc, colors, width, height):
"""Normalize feature coordinates and size."""
points[:, :2] = normalized_image_coordinates(points[:, :2], width, height)
points[:, 2:3] /= max(width, height)
return points, desc, colors
def _in_mask(point, width, height, mask):
"""Check if a point is inside a binary mask."""
u = mask.shape[1] * (point[0] + 0.5) / width
v = mask.shape[0] * (point[1] + 0.5) / height
return mask[int(v), int(u)] != 0
def extract_features_sift(image, config, features_count):
sift_edge_threshold = config["sift_edge_threshold"]
sift_peak_threshold = float(config["sift_peak_threshold"])
# SIFT support is in cv2 main from version 4.4.0
if context.OPENCV44 or context.OPENCV5:
# OpenCV versions concerned /** 3.4.11, >= 4.4.0 **/ ==> Sift became free since March 2020
detector = cv2.SIFT_create(
edgeThreshold=sift_edge_threshold, contrastThreshold=sift_peak_threshold
)
descriptor = detector
elif context.OPENCV3 or context.OPENCV4:
try:
# OpenCV versions concerned /** 3.2.x, 3.3.x, 3.4.0, 3.4.1, 3.4.2, 3.4.10, 4.3.0, 4.4.0 **/
detector = cv2.xfeatures2d.SIFT_create(
edgeThreshold=sift_edge_threshold, contrastThreshold=sift_peak_threshold
)
except AttributeError as ae:
# OpenCV versions concerned /** 3.4.3, 3.4.4, 3.4.5, 3.4.6, 3.4.7, 3.4.8, 3.4.9, 4.0.x, 4.1.x, 4.2.x **/
if "no attribute 'xfeatures2d'" in str(ae):
logger.error(
"OpenCV Contrib modules are required to extract SIFT features"
)
raise
descriptor = detector
else:
detector = cv2.FeatureDetector_create("SIFT")
descriptor = cv2.DescriptorExtractor_create("SIFT")
detector.setDouble("edgeThreshold", sift_edge_threshold)
while True:
logger.debug("Computing sift with threshold {0}".format(sift_peak_threshold))
t = time.time()
# SIFT support is in cv2 main from version 4.4.0
if context.OPENCV44 or context.OPENCV5:
detector = cv2.SIFT_create(
edgeThreshold=sift_edge_threshold, contrastThreshold=sift_peak_threshold
)
elif context.OPENCV3:
detector = cv2.xfeatures2d.SIFT_create(
edgeThreshold=sift_edge_threshold, contrastThreshold=sift_peak_threshold
)
else:
detector.setDouble("contrastThreshold", sift_peak_threshold)
points = detector.detect(image)
logger.debug("Found {0} points in {1}s".format(len(points), time.time() - t))
if len(points) < features_count and sift_peak_threshold > 0.0001:
sift_peak_threshold = (sift_peak_threshold * 2) / 3
logger.debug("reducing threshold")
else:
logger.debug("done")
break
points, desc = descriptor.compute(image, points)
if config["feature_root"]:
desc = root_feature(desc)
points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])
return points, desc
def extract_features_surf(image, config, features_count):
surf_hessian_threshold = config["surf_hessian_threshold"]
if context.OPENCV3:
try:
detector = cv2.xfeatures2d.SURF_create()
except AttributeError as ae:
if "no attribute 'xfeatures2d'" in str(ae):
logger.error(
"OpenCV Contrib modules are required to extract SURF features"
)
raise
descriptor = detector
detector.setHessianThreshold(surf_hessian_threshold)
detector.setNOctaves(config["surf_n_octaves"])
detector.setNOctaveLayers(config["surf_n_octavelayers"])
detector.setUpright(config["surf_upright"])
else:
detector = cv2.FeatureDetector_create("SURF")
descriptor = cv2.DescriptorExtractor_create("SURF")
detector.setDouble("hessianThreshold", surf_hessian_threshold)
detector.setDouble("nOctaves", config["surf_n_octaves"])
detector.setDouble("nOctaveLayers", config["surf_n_octavelayers"])
detector.setInt("upright", config["surf_upright"])
while True:
logger.debug("Computing surf with threshold {0}".format(surf_hessian_threshold))
t = time.time()
if context.OPENCV3:
detector.setHessianThreshold(surf_hessian_threshold)
else:
detector.setDouble(
"hessianThreshold", surf_hessian_threshold
) # default: 0.04
points = detector.detect(image)
logger.debug("Found {0} points in {1}s".format(len(points), time.time() - t))
if len(points) < features_count and surf_hessian_threshold > 0.0001:
surf_hessian_threshold = (surf_hessian_threshold * 2) / 3
logger.debug("reducing threshold")
else:
logger.debug("done")
break
points, desc = descriptor.compute(image, points)
if config["feature_root"]:
desc = root_feature_surf(desc, partial=True)
points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])
return points, desc
def akaze_descriptor_type(name):
d = pyfeatures.AkazeDescriptorType.__dict__
if name in d:
return d[name]
else:
logger.debug("Wrong akaze descriptor type")
return d["MSURF"]
def extract_features_akaze(image, config, features_count):
options = pyfeatures.AKAZEOptions()
options.omax = config["akaze_omax"]
akaze_descriptor_name = config["akaze_descriptor"]
options.descriptor = akaze_descriptor_type(akaze_descriptor_name)
options.descriptor_size = config["akaze_descriptor_size"]
options.descriptor_channels = config["akaze_descriptor_channels"]
options.dthreshold = config["akaze_dthreshold"]
options.kcontrast_percentile = config["akaze_kcontrast_percentile"]
options.use_isotropic_diffusion = config["akaze_use_isotropic_diffusion"]
options.target_num_features = features_count
options.use_adaptive_suppression = config["feature_use_adaptive_suppression"]
logger.debug("Computing AKAZE with threshold {0}".format(options.dthreshold))
t = time.time()
points, desc = pyfeatures.akaze(image, options)
logger.debug("Found {0} points in {1}s".format(len(points), time.time() - t))
if config["feature_root"]:
if akaze_descriptor_name in ["SURF_UPRIGHT", "MSURF_UPRIGHT"]:
desc = root_feature_surf(desc, partial=True)
elif akaze_descriptor_name in ["SURF", "MSURF"]:
desc = root_feature_surf(desc, partial=False)
points = points.astype(float)
return points, desc
def extract_features_hahog(image, config, features_count):
t = time.time()
points, desc = pyfeatures.hahog(
image.astype(np.float32) / 255, # VlFeat expects pixel values between 0, 1
peak_threshold=config["hahog_peak_threshold"],
edge_threshold=config["hahog_edge_threshold"],
target_num_features=features_count,
use_adaptive_suppression=config["feature_use_adaptive_suppression"],
)
if config["feature_root"]:
desc = np.sqrt(desc)
uchar_scaling = 362 # x * 512 < 256 => sqrt(x) * 362 < 256
else:
uchar_scaling = 512
if config["hahog_normalize_to_uchar"]:
desc = (uchar_scaling * desc).clip(0, 255).round()
logger.debug("Found {0} points in {1}s".format(len(points), time.time() - t))
return points, desc
def extract_features_orb(image, config, features_count):
if context.OPENCV3:
detector = cv2.ORB_create(nfeatures=features_count)
descriptor = detector
else:
detector = cv2.FeatureDetector_create("ORB")
descriptor = cv2.DescriptorExtractor_create("ORB")
detector.setDouble("nFeatures", features_count)
logger.debug("Computing ORB")
t = time.time()
points = detector.detect(image)
points, desc = descriptor.compute(image, points)
points = np.array([(i.pt[0], i.pt[1], i.size, i.angle) for i in points])
logger.debug("Found {0} points in {1}s".format(len(points), time.time() - t))
return points, desc
def extract_features(image, config, is_panorama):
"""Detect features in a color or gray-scale image.
The type of feature detected is determined by the ``feature_type``
config option.
The coordinates of the detected points are returned in normalized
image coordinates.
Parameters:
- image: a color image with shape (h, w, 3) or
gray-scale image with (h, w) or (h, w, 1)
- config: the configuration structure
- is_panorama : if True, alternate settings are used for feature count and extraction size.
Returns:
tuple:
- points: ``x``, ``y``, ``size`` and ``angle`` for each feature
- descriptors: the descriptor of each feature
- colors: the color of the center of each feature
"""
extraction_size = (
config["feature_process_size_panorama"]
if is_panorama
else config["feature_process_size"]
)
features_count = (
config["feature_min_frames_panorama"]
if is_panorama
else config["feature_min_frames"]
)
assert len(image.shape) == 3 or len(image.shape) == 2
if len(image.shape) == 2: # convert (h, w) to (h, w, 1)
image = np.expand_dims(image, axis=2)
image = resized_image(image, extraction_size)
# convert color to gray-scale if necessary
if image.shape[2] == 3:
image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
else:
image_gray = image
feature_type = config["feature_type"].upper()
if feature_type == "SIFT":
points, desc = extract_features_sift(image_gray, config, features_count)
elif feature_type == "SURF":
points, desc = extract_features_surf(image_gray, config, features_count)
elif feature_type == "AKAZE":
points, desc = extract_features_akaze(image_gray, config, features_count)
elif feature_type == "HAHOG":
points, desc = extract_features_hahog(image_gray, config, features_count)
elif feature_type == "ORB":
points, desc = extract_features_orb(image_gray, config, features_count)
else:
raise ValueError(
"Unknown feature type " "(must be SURF, SIFT, AKAZE, HAHOG or ORB)"
)
xs = points[:, 0].round().astype(int)
ys = points[:, 1].round().astype(int)
colors = image[ys, xs]
if image.shape[2] == 1:
colors = np.repeat(colors, 3).reshape((-1, 3))
return normalize_features(points, desc, colors, image.shape[1], image.shape[0])
def build_flann_index(features, config):
# FLANN_INDEX_LINEAR = 0
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_KMEANS = 2
# FLANN_INDEX_COMPOSITE = 3
# FLANN_INDEX_KDTREE_SINGLE = 4
# FLANN_INDEX_HIERARCHICAL = 5
FLANN_INDEX_LSH = 6
if features.dtype.type is np.float32:
algorithm_type = config["flann_algorithm"].upper()
if algorithm_type == "KMEANS":
FLANN_INDEX_METHOD = FLANN_INDEX_KMEANS
elif algorithm_type == "KDTREE":
FLANN_INDEX_METHOD = FLANN_INDEX_KDTREE
else:
raise ValueError("Unknown flann algorithm type " "must be KMEANS, KDTREE")
else:
FLANN_INDEX_METHOD = FLANN_INDEX_LSH
flann_params = {
"algorithm": FLANN_INDEX_METHOD,
"branching": config["flann_branching"],
"iterations": config["flann_iterations"],
"tree": config["flann_tree"],
}
return context.flann_Index(features, flann_params)
FEATURES_VERSION = 2
FEATURES_HEADER = "OPENSFM_FEATURES_VERSION"
def load_features(filepath, config):
""" Load features from filename """
s = np.load(filepath, allow_pickle=True)
version = _features_file_version(s)
return getattr(sys.modules[__name__], "_load_features_v%d" % version)(s, config)
def _features_file_version(obj):
""" Retrieve features file version. Return 0 if none """
if FEATURES_HEADER in obj:
return obj[FEATURES_HEADER]
else:
return 0
def _load_features_v0(s, config):
"""Base version of features file
Scale (desc[2]) set to reprojection_error_sd by default (legacy behaviour)
"""
feature_type = config["feature_type"]
if feature_type == "HAHOG" and config["hahog_normalize_to_uchar"]:
descriptors = s["descriptors"].astype(np.float32)
else:
descriptors = s["descriptors"]
points = s["points"]
points[:, 2:3] = config["reprojection_error_sd"]
return points, descriptors, s["colors"].astype(float), None
def _load_features_v1(s, config):
"""Version 1 of features file
Scale is not properly set higher in the pipeline, default is gone.
"""
feature_type = config["feature_type"]
if feature_type == "HAHOG" and config["hahog_normalize_to_uchar"]:
descriptors = s["descriptors"].astype(np.float32)
else:
descriptors = s["descriptors"]
return s["points"], descriptors, s["colors"].astype(float), None
def _load_features_v2(s, config):
"""Version 2 of features file
Added segmentation and segmentation labels.
"""
feature_type = config["feature_type"]
if feature_type == "HAHOG" and config["hahog_normalize_to_uchar"]:
descriptors = s["descriptors"].astype(np.float32)
else:
descriptors = s["descriptors"]
has_segmentation = s["segmentations"].any()
has_instances = s["instances"].any()
return (
s["points"],
descriptors,
s["colors"].astype(float),
{
"segmentations": s["segmentations"] if has_segmentation else None,
"instances": s["instances"] if has_instances else None,
"segmentation_labels": s["segmentation_labels"],
},
)
def save_features(
filepath,
points,
desc,
colors,
segmentations,
instances,
segmentation_labels,
config,
):
feature_type = config["feature_type"]
if (
(
feature_type == "AKAZE"
and config["akaze_descriptor"] in ["MLDB_UPRIGHT", "MLDB"]
)
or (feature_type == "HAHOG" and config["hahog_normalize_to_uchar"])
or (feature_type == "ORB")
):
feature_data_type = np.uint8
else:
feature_data_type = np.float32
np.savez_compressed(
filepath,
points=points.astype(np.float32),
descriptors=desc.astype(feature_data_type),
colors=colors,
segmentations=segmentations,
instances=instances,
segmentation_labels=segmentation_labels,
OPENSFM_FEATURES_VERSION=FEATURES_VERSION,
allow_pickle=True,
)
| 35.616842 | 116 | 0.645053 |
7be391124b1d1a2a6491b7e4674100439f9b62a9 | 5,867 | py | Python | c2.py | Sigmoid-Frontsquat-LLC/classification-model-backend | 7366302063315a245b7ab20219fb22ecf67bd377 | [
"MIT"
] | null | null | null | c2.py | Sigmoid-Frontsquat-LLC/classification-model-backend | 7366302063315a245b7ab20219fb22ecf67bd377 | [
"MIT"
] | null | null | null | c2.py | Sigmoid-Frontsquat-LLC/classification-model-backend | 7366302063315a245b7ab20219fb22ecf67bd377 | [
"MIT"
] | null | null | null | import sys # this is for extracting command line arguments.
def parse_activator(flag, value):
if flag[1] == 'a':
return (True, value)
else:
return (False,None)
pass
def parse_optimizer(flag, value):
if flag[1] == 'o':
return (True, value)
else:
return (False,None)
pass
def parse_source(flag, value):
if flag[1] == 's':
return (True, value)
else:
return (False,None)
pass
activator = ''
optimizer = ''
source = ''
if len(sys.argv) == 1 or (len(sys.argv) - 1) % 2 != 0:
raise ValueError("Usage: [-s image] [-a activator] [-o optimizer]")
else:
# could this be done better?
# sure, but this works for now...
for i in range(1, len(sys.argv) - 1):
flag = sys.argv[i]
value = sys.argv[i + 1]
isActivator, act = parse_activator(flag, value)
if isActivator:
if act != '-o':
activator = act
continue
isOptimizer, opt = parse_optimizer(flag, value)
if isOptimizer:
optimizer = opt
continue
isSource, so = parse_source(flag, value)
if isSource:
source = so
continue
pass
pass
# naive check to ensure no argument is left unfilled
if len(activator) == 0 or len(optimizer) == 0 or len(source) == 0 :
raise ValueError("Usage: [-s image] [-a activator] [-o optimizer]")
# exit(0)
############# Classification Logic ##################
import pandas as pd
import io
import requests
import numpy as np
import os
import logging
import json
import shutil
from sklearn.model_selection import train_test_split
from sklearn import metrics
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.applications.vgg16 import VGG16
from PIL import Image, ImageFile, ImageEnhance
from matplotlib.pyplot import imshow
import requests
from io import BytesIO
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
####### warning messages not printed #######
logging.disable(logging.WARNING)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class_labels = ['coupe','motorcycle','sedan','suv','truck']
num_classes = 10
# Image preprocessing
img = Image.open(source)
img = img.resize((256,256))
enhancer = ImageEnhance.Sharpness(img)
enhanced_im = enhancer.enhance(10.0)
enhanced_im.save('resized.jpg')
img_array = np.asarray(enhanced_im)
img_array = img_array / 255
input_shape = (256,256,3)
# reshape for model
# original model was trained with (32,32,3)
img_array = img_array.reshape((1,256,256,3))
# modelo = Sequential()
# modelo.add(Conv2D(32, (3, 3), activation=activator, padding='same', input_shape=input_shape))
# modelo.add(Conv2D(32, (3, 3), activation=activator, padding='same'))
# modelo.add(Conv2D(32, (3, 3), activation=activator, padding='same'))
# modelo.add(MaxPooling2D((3, 3)))
# modelo.add(Dropout(0.2))
# modelo.add(Conv2D(64, (3, 3), activation=activator, padding='same'))
# modelo.add(Conv2D(64, (3, 3), activation=activator, padding='same'))
# modelo.add(Conv2D(64, (3, 3), activation=activator, padding='same'))
# modelo.add(MaxPooling2D((3, 3)))
# modelo.add(Dropout(0.2))
# modelo.add(Conv2D(128, (3, 3), activation=activator, padding='same'))
# modelo.add(Conv2D(128, (3, 3), activation=activator, padding='same'))
# modelo.add(MaxPooling2D((3, 3)))
# modelo.add(Flatten())
# modelo.add(Dense(128, activation=activator))
# modelo.add(Dropout(0.2))
# modelo.add(Dense(10, activation='softmax'))
# modelo.compile(loss='categorical_crossentropy',optimizer=optimizer)
model = tf.keras.models.load_model('dnn/model_tl.h5')
model.load_weights('dnn/test2_tl.h5')
model.compile(loss='categorical_crossentropy',optimizer=optimizer)
# validate the 'activator'
pass
# validate the 'optimizer'
pass
# Load weights based on activator and optimizer
# probably not needed as we are already passing the optimizer as a variable
# if optimizer == 'adam':
# # compile with adam
# modelo.compile(loss='categorical_crossentropy',optimizer=optimizer)
# # activator
# if activator == 'relu':
# # load adam-relu
# modelo.load_weights('dnn/relu-adam2.hdf5')
# elif activator == 'sigmoid':
# # load sigmoid-adam
# modelo.load_weights('dnn/sigmoid-adam2.hdf5')
# elif activator == 'tanh':
# # load tanh-adam
# modelo.load_weights('dnn/tanh-adam2.hdf5')
# else:
# print('error')
# elif optimizer == 'sgd':
# # compile with sgd
# modelo.compile(loss='categorical_crossentropy',optimizer=optimizer)
# if activator == 'relu':
# # load relu-sgd
# modelo.load_weights('dnn/relu-sgd2.hdf5')
# elif activator == 'sigmoid':
# # load sigmoid-sgd
# modelo.load_weights('dnn/sigmoid-sgd2.hdf5')
# elif activator == 'tanh':
# # load tanh-sgd
# modelo.load_weights('dnn/tanh-sgd2.hdf5')
# else:
# print('error')
# Get the classification
############# classification ##############
# pred = modelo.predict(img_array)
# pred = pred[0]
# pred_class = class_labels[np.argmax(pred)]
pred = model.predict(img_array)
pred = pred[0]
pred_class = class_labels[np.argmax(pred)]
print(pred_class)
############# JSON ###############
# classification = {k:v for k,v in zip(class_labels,pred)}
classification = [
{
class_labels[0] : pred[0]
},
{
class_labels[1] : pred[1]
},
{
class_labels[2] : pred[2]
},
{
class_labels[3] : pred[3]
},
{
class_labels[4] : pred[4]
}
]
########## output ################
print(classification)
| 25.845815 | 95 | 0.647009 |
3f727d697f7faa9c1f148dfdf8d3d44621a8d10a | 509 | py | Python | my_lambdata/my_script.py | TravisJRCain/lambdata-zmurray | 61a906c896ec629e2cd486b300b04921159840da | [
"MIT"
] | null | null | null | my_lambdata/my_script.py | TravisJRCain/lambdata-zmurray | 61a906c896ec629e2cd486b300b04921159840da | [
"MIT"
] | null | null | null | my_lambdata/my_script.py | TravisJRCain/lambdata-zmurray | 61a906c896ec629e2cd486b300b04921159840da | [
"MIT"
] | null | null | null | import pandas as pd
# import my mod
# from my-mod import flto_nc and dato_cl
from my_lambdata.my_mod import flto_nc
from my_lambdata.my_mod import split_column_date
# Instantiate df to test mod.py
df = pd.DataFrame({"Friends": ['Gene, Aaron, Dom, Scott, Zack'],
"Times won setback": [3, 1, 2, 0, 5072],
"First game won": [12-31-2012, 2-13-2015, 10-9-2008,
5-6-2007, 10-2-1991]})
# List also to test mod.py
l = [5, 10, 15, 20, 25]
| 31.8125 | 71 | 0.591356 |
7e395bf332750acabbf445b806ce6aac0957dbc2 | 1,658 | py | Python | Interface/code.py | vagabond11/Elk-search-analysis | fc7ee38c57bf8eb1abc2ccaaefd75734a5ecc27b | [
"MIT"
] | null | null | null | Interface/code.py | vagabond11/Elk-search-analysis | fc7ee38c57bf8eb1abc2ccaaefd75734a5ecc27b | [
"MIT"
] | null | null | null | Interface/code.py | vagabond11/Elk-search-analysis | fc7ee38c57bf8eb1abc2ccaaefd75734a5ecc27b | [
"MIT"
] | null | null | null | from flask import Flask , render_template , url_for , request, redirect, Markup
import json
import codecs
import pyodbc
from tqdm.notebook import tqdm
from elasticsearch import Elasticsearch
import requests
set
es = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
app = Flask(__name__ , template_folder = 'templates')
@app.route('/')
def index():
return render_template('index.html')
@app.route("/" , methods=["GET","POST"])
def search():
if request.method == "POST" :
search_input = request.form["search-query"]
if request.form['action'] == 'search quran':
query_body = {
"size":1000,
"query": {
"match": {
"aya_no_chakl.shingle_nostem": {
"query":search_input
}
}
}
}
result = es.search(index="quran_final_mapping", body=query_body)
return render_template('search.html',result=result)
if request.form['action'] == 'search hadith':
query_body = {
"size":1000,
"query": {
"match": {
"hadith_arabic.shingle_nostem": {
"query":search_input
}
}
}
}
result = es.search(index="hadith_with_mapping", body=query_body)
return render_template('search_hadith.html',result=result)
if __name__ == "__main__":
app.run() | 29.087719 | 79 | 0.486731 |
27d8c98d1bc0c2ae241f07a40306c297317d6c3b | 5,333 | py | Python | tangos/web/views/halo_view.py | TobiBu/tangos | decab8c892c5937fd68474a375089abef198dba2 | [
"BSD-3-Clause"
] | null | null | null | tangos/web/views/halo_view.py | TobiBu/tangos | decab8c892c5937fd68474a375089abef198dba2 | [
"BSD-3-Clause"
] | null | null | null | tangos/web/views/halo_view.py | TobiBu/tangos | decab8c892c5937fd68474a375089abef198dba2 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from pyramid.view import view_config
import tangos
from tangos import core
import numpy as np
from .halo_data import format_number, _relative_description
import sqlalchemy, sqlalchemy.orm
from six.moves import zip
from . import halo_from_request
class TimestepInfo(object):
def __init__(self, ts):
self.z = "%.2f"%ts.redshift
self.t = "%.2e Gyr"%ts.time_gyr
class TimeLinks(object):
def __init__(self, request, halo):
link_names = ['earliest', '-10', '-1', '+1', '+10', 'latest']
route_names = ['halo_earlier']*3 + ['halo_later']*3
ns = ['inf',10,1,1,10,'inf']
urls = [
request.route_url(r, simid=halo.timestep.simulation.escaped_basename,
timestepid=halo.timestep.escaped_extension,
halonumber=halo.basename,
n=n)
for r,n in zip(route_names, ns)
]
self.urls = urls
self.names = link_names
class DisplayProperty(object):
def __init__(self, property):
self.name = property.name.text
self.value = format_property_data(property)
self.is_array = property.data_is_array()
class TimeProperty(DisplayProperty):
def __init__(self, halo):
self.name = "t()"
self.value = format_number(halo.timestep.time_gyr) + " Gyr"
self.is_array = False
class RedshiftProperty(DisplayProperty):
def __init__(self, halo):
self.name = "z()"
self.value = format_number(halo.timestep.redshift)
self.is_array = False
def default_properties(halo):
properties = [TimeProperty(halo), RedshiftProperty(halo)]
for property in halo.properties.options(sqlalchemy.orm.joinedload(core.HaloProperty.name)):
properties.append(DisplayProperty(property))
return properties
def format_property_data(property):
if property.data_is_array():
"""
data = property.data_raw
if len(data)>5 or len(data.shape)>1:
return "size "+(" x ".join([str(s) for s in data.shape]))+" array"
else:
return "["+(",".join([_number_format(d) for d in data]))+"]"
"""
return "Array"
else:
return format_number(property.data)
class SimulationInfo(object):
def __init__(self, sim, request):
self.name = sim.basename
self.url = request.route_url('halo_in',simid=request.matchdict['simid'],
timestepid=request.matchdict['timestepid'],
halonumber=request.matchdict['halonumber'],
n=sim.basename)
class HaloLinkInfo(object):
def __init__(self, link, request):
halo_source = link.halo_from
halo_dest = link.halo_to
weight_text = "( %.2f)"%link.weight if link.weight else ""
self.name = "%s%s: %s"%(link.relation.text,weight_text,_relative_description(halo_source, halo_dest))
self.url = request.route_url('halo_view', simid=halo_dest.timestep.simulation.escaped_basename,
timestepid=halo_dest.timestep.escaped_extension,
halonumber=halo_dest.basename)
def all_simulations(request):
return [SimulationInfo(x,request) for x in tangos.all_simulations(request.dbsession)]
def halo_links(halo, request):
links = []
links_query = request.dbsession.query(core.HaloLink).filter_by(halo_from_id=halo.id).\
order_by(core.HaloLink.weight.desc()).\
options(sqlalchemy.orm.joinedload(core.HaloLink.halo_to).joinedload(core.Halo.timestep).joinedload(core.TimeStep.simulation))
for lk in links_query.all():
links.append(HaloLinkInfo(lk, request))
return links
@view_config(route_name='halo_view', renderer='../templates/halo_view.jinja2')
def halo_view(request):
halo = halo_from_request(request)
ts = halo.timestep
sim = ts.simulation
return {'ts_info': TimestepInfo(ts),
'this_id': halo.id,
'halo_number': halo.halo_number,
'halo_typetag': halo.tag,
'timestep': ts.extension,
'simulation': sim.basename,
'all_simulations': all_simulations(request),
'halo_links': halo_links(halo, request),
'time_links': TimeLinks(request, halo),
'properties': default_properties(halo),
'halo_path': halo.path,
'finder_id': halo.finder_id,
'calculate_url': request.route_url('get_property',simid=request.matchdict['simid'],
timestepid=request.matchdict['timestepid'],
halonumber=request.matchdict['halonumber'],
nameid="")[:-5],
'tree_url': request.route_url('merger_tree',simid=request.matchdict['simid'],
timestepid=request.matchdict['timestepid'],
halonumber=request.matchdict['halonumber']),
'gather_url': "/%s/%s/"%(sim.escaped_basename,ts.escaped_extension),
'cascade_url': "/%s/%s/%s/"%(sim.escaped_basename,ts.escaped_extension,halo.basename)} | 40.709924 | 137 | 0.606413 |
f8800b71b2af30725ff065514dd022c3834aa1f4 | 8,943 | py | Python | gslib/addlhelp/metadata.py | maxshine/gsutil | c81d67f2286402accfcdf79f0199844949bebefc | [
"Apache-2.0"
] | 1,894 | 2015-04-17T18:29:53.000Z | 2022-03-28T22:41:06.000Z | gslib/addlhelp/metadata.py | maxshine/gsutil | c81d67f2286402accfcdf79f0199844949bebefc | [
"Apache-2.0"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | gslib/addlhelp/metadata.py | maxshine/gsutil | c81d67f2286402accfcdf79f0199844949bebefc | [
"Apache-2.0"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | # -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about object metadata."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW OF METADATA</B>
Objects can have associated metadata, which control aspects of how
GET requests are handled, including Content-Type, Cache-Control,
Content-Disposition, and Content-Encoding (discussed in more detail in
the subsections below). In addition, you can set custom metadata that
can be used by applications (e.g., tagging that particular objects possess
some property).
There are two ways to set metadata on objects:
- At upload time you can specify one or more metadata properties to
associate with objects, using the gsutil -h option. For example, the
following command would cause gsutil to set the Content-Type and
Cache-Control for each of the files being uploaded:
gsutil -h "Content-Type:text/html" \\
-h "Cache-Control:public, max-age=3600" cp -r images \\
gs://bucket/images
Note that -h is an option on the gsutil command, not the cp sub-command.
- You can set or remove metadata fields from already uploaded objects using
the gsutil setmeta command. See "gsutil help setmeta".
More details about specific pieces of metadata are discussed below.
<B>CONTENT-TYPE</B>
The most commonly set metadata is Content-Type (also known as MIME type),
which allows browsers to render the object properly. gsutil sets the
Content-Type automatically at upload time, based on each filename extension.
For example, uploading files with names ending in .txt will set Content-Type
to text/plain. If you're running gsutil on Linux or macOS and would prefer to
have content type set based on naming plus content examination, see the
use_magicfile configuration variable in the .boto configuration file (See
also "gsutil help config"). In general, using use_magicfile is more robust
and configurable, but is not available on Windows.
If you specify Content-Type with -h when uploading content (like the
example gsutil command given in the previous section), it overrides the
Content-Type that would have been set based on filename extension or content.
This can be useful if the Content-Type detection algorithm doesn't work as
desired for some of your files.
<B>CACHE-CONTROL</B>
Another commonly set piece of metadata is Cache-Control, which allows
you to control whether and for how long browser and Internet caches are
allowed to cache your objects. Cache-Control only applies to objects with
a public-read ACL. Non-public data are not cacheable.
Here's an example of uploading a set of objects to allow caching:
gsutil -h "Cache-Control:public,max-age=3600" cp -a public-read \\
-r html gs://bucket/html
This command would upload all files in the html directory (and subdirectories)
and make them publicly readable and cacheable, with cache expiration of
one hour.
Note that if you allow caching, at download time you may see older versions
of objects after uploading a newer replacement object. Note also that because
objects can be cached at various places on the Internet there is no way to
force a cached object to expire globally (unlike the way you can force your
browser to refresh its cache). If you want to prevent serving cached versions
of publicly readable objects, set "Cache-Control:no-cache, max-age=0" on the
object. You can do this with a command such as:
gsutil -h "Cache-Control:no-cache,max-age=0" \\
cp -a public-read file.png gs://your-bucket
Another use of Cache-Control is through the "no-transform" value,
which instructs Google Cloud Storage to not apply any content transformations
based on specifics of a download request, such as removing gzip
content-encoding for incompatible clients. Note that this parameter is only
respected by the XML API. The Google Cloud Storage JSON API respects only the
public, private, no-cache, and max-age Cache-Control parameters.
For details about how to set the Cache-Control metadata see
"gsutil help setmeta".
<B>CONTENT-ENCODING</B>
You can specify a Content-Encoding to indicate that an object is compressed
(for example, with gzip compression) while maintaining its Content-Type.
You will need to ensure that the files have been compressed using the
specified Content-Encoding before using gsutil to upload them. Consider the
following example for Linux:
echo "Highly compressible text" | gzip > foo.txt
gsutil -h "Content-Encoding:gzip" \\
-h "Content-Type:text/plain" \\
cp foo.txt gs://bucket/compressed
Note that this is different from uploading a gzipped object foo.txt.gz with
Content-Type: application/x-gzip because most browsers are able to
dynamically decompress and process objects served with Content-Encoding: gzip
based on the underlying Content-Type.
For compressible content, using Content-Encoding: gzip saves network and
storage costs, and improves content serving performance. However, for content
that is already inherently compressed (archives and many media formats, for
instance) applying another level of compression via Content-Encoding is
typically detrimental to both object size and performance and should be
avoided.
Note also that gsutil provides an easy way to cause content to be compressed
and stored with Content-Encoding: gzip: see the -z and -Z options in
"gsutil help cp".
<B>CONTENT-DISPOSITION</B>
You can set Content-Disposition on your objects, to specify presentation
information about the data being transmitted. Here's an example:
gsutil -h 'Content-Disposition:attachment; filename=filename.ext' \\
cp -r attachments gs://bucket/attachments
Setting the Content-Disposition allows you to control presentation style
of the content, for example determining whether an attachment should be
automatically displayed vs should require some form of action from the user to
open it. See https://tools.ietf.org/html/rfc6266
for more details about the meaning of Content-Disposition.
<B>CUSTOM METADATA</B>
You can add your own custom metadata (e.g,. for use by your application)
to a Google Cloud Storage object by using "x-goog-meta" with -h. For example:
gsutil -h x-goog-meta-reviewer:jane cp mycode.java gs://bucket/reviews
You can add multiple differently-named custom metadata fields to each object.
<B>SETTABLE FIELDS; FIELD VALUES</B>
You can't set some metadata fields, such as ETag and Content-Length. The
fields you can set are:
- Cache-Control
- Content-Disposition
- Content-Encoding
- Content-Language
- Content-Type
- Custom metadata
Field names are case-insensitive.
All fields and their values must consist only of ASCII characters, with the
exception of values for x-goog-meta- fields, which may contain arbitrary
Unicode values. Note that when setting metadata using the XML API, which sends
custom metadata as HTTP headers, Unicode characters will be encoded using
UTF-8, then url-encoded to ASCII. For example:
gsutil setmeta -h "x-goog-meta-foo: ã" gs://bucket/object
would store the custom metadata key-value pair of "foo" and "%C3%A3".
Subsequently, running "ls -L" using the JSON API to list the object's metadata
would print "%C3%A3", while "ls -L" using the XML API would url-decode this
value automatically, printing the character "ã".
<B>VIEWING CURRENTLY SET METADATA</B>
You can see what metadata is currently set on an object by using:
gsutil ls -L gs://the_bucket/the_object
""")
class CommandOptions(HelpProvider):
"""Additional help about object metadata."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='metadata',
help_name_aliases=[
'cache-control',
'caching',
'content type',
'mime type',
'mime',
'type',
],
help_type='additional_help',
help_one_line_summary='Working With Object Metadata',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
| 42.183962 | 80 | 0.74986 |
cd0e17de71b2819051cf2e6b0a19521b1e984dce | 52 | py | Python | old_lambda/lambda_function/__init__.py | jdkandersson/cloudformation-kubernetes | 8bd14379540bd2d122283c74166883e375cb348e | [
"Apache-2.0"
] | null | null | null | old_lambda/lambda_function/__init__.py | jdkandersson/cloudformation-kubernetes | 8bd14379540bd2d122283c74166883e375cb348e | [
"Apache-2.0"
] | null | null | null | old_lambda/lambda_function/__init__.py | jdkandersson/cloudformation-kubernetes | 8bd14379540bd2d122283c74166883e375cb348e | [
"Apache-2.0"
] | null | null | null | """Custom cloudformation handler for Kubernetes."""
| 26 | 51 | 0.769231 |
1c609eeb2ae4ca6a6e95e8ddada6e94504cbcf2d | 451 | py | Python | boost_adaptbx/tests/tst_swig_args.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 155 | 2016-11-23T12:52:16.000Z | 2022-03-31T15:35:44.000Z | boost_adaptbx/tests/tst_swig_args.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 590 | 2016-12-10T11:31:18.000Z | 2022-03-30T23:10:09.000Z | boost_adaptbx/tests/tst_swig_args.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 115 | 2016-11-15T08:17:28.000Z | 2022-02-09T15:30:14.000Z | from __future__ import absolute_import, division, print_function
import example
import boost_python_swig_args_ext
import sys
def exercise():
c = example.Circle(10)
c.x = 20
c.y = 30
s = example.Square(10)
s.x = -10
s.y = 5
forever = "--forever" in sys.argv[1:]
while True:
boost_python_swig_args_ext.show(c.this)
boost_python_swig_args_ext.show(s.this)
if (not forever): break
if (__name__ == "__main__"):
exercise()
| 19.608696 | 64 | 0.698448 |
1af78bae29c96149df5fcc66b322cdc7b576767e | 12,920 | py | Python | sdks/python/appcenter_sdk/models/StackFrame.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/appcenter_sdk/models/StackFrame.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/appcenter_sdk/models/StackFrame.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class StackFrame(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
JavaScript = "JavaScript"
CSharp = "CSharp"
Objective-C = "Objective-C"
Objective-Cpp = "Objective-Cpp"
Cpp = "Cpp"
C = "C"
Swift = "Swift"
Java = "Java"
Unknown = "Unknown"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'address': 'string',
'class_name': 'string',
'method': 'string',
'class_method': 'boolean',
'file': 'string',
'line': 'integer',
'app_code': 'boolean',
'framework_name': 'string',
'code_raw': 'string',
'code_formatted': 'string',
'language': 'string',
'relevant': 'boolean',
'method_params': 'string'
}
attribute_map = {
'address': 'address',
'class_name': 'class_name',
'method': 'method',
'class_method': 'class_method',
'file': 'file',
'line': 'line',
'app_code': 'app_code',
'framework_name': 'framework_name',
'code_raw': 'code_raw',
'code_formatted': 'code_formatted',
'language': 'language',
'relevant': 'relevant',
'method_params': 'method_params'
}
def __init__(self, address=None, class_name=None, method=None, class_method=None, file=None, line=None, app_code=None, framework_name=None, code_raw=None, code_formatted=None, language=None, relevant=None, method_params=None): # noqa: E501
"""StackFrame - a model defined in Swagger""" # noqa: E501
self._address = None
self._class_name = None
self._method = None
self._class_method = None
self._file = None
self._line = None
self._app_code = None
self._framework_name = None
self._code_raw = None
self._code_formatted = None
self._language = None
self._relevant = None
self._method_params = None
self.discriminator = None
if address is not None:
self.address = address
if class_name is not None:
self.class_name = class_name
if method is not None:
self.method = method
if class_method is not None:
self.class_method = class_method
if file is not None:
self.file = file
if line is not None:
self.line = line
self.app_code = app_code
if framework_name is not None:
self.framework_name = framework_name
self.code_raw = code_raw
self.code_formatted = code_formatted
if language is not None:
self.language = language
if relevant is not None:
self.relevant = relevant
if method_params is not None:
self.method_params = method_params
@property
def address(self):
"""Gets the address of this StackFrame. # noqa: E501
address of the frame # noqa: E501
:return: The address of this StackFrame. # noqa: E501
:rtype: string
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this StackFrame.
address of the frame # noqa: E501
:param address: The address of this StackFrame. # noqa: E501
:type: string
"""
self._address = address
@property
def class_name(self):
"""Gets the class_name of this StackFrame. # noqa: E501
name of the class # noqa: E501
:return: The class_name of this StackFrame. # noqa: E501
:rtype: string
"""
return self._class_name
@class_name.setter
def class_name(self, class_name):
"""Sets the class_name of this StackFrame.
name of the class # noqa: E501
:param class_name: The class_name of this StackFrame. # noqa: E501
:type: string
"""
self._class_name = class_name
@property
def method(self):
"""Gets the method of this StackFrame. # noqa: E501
name of the method # noqa: E501
:return: The method of this StackFrame. # noqa: E501
:rtype: string
"""
return self._method
@method.setter
def method(self, method):
"""Sets the method of this StackFrame.
name of the method # noqa: E501
:param method: The method of this StackFrame. # noqa: E501
:type: string
"""
self._method = method
@property
def class_method(self):
"""Gets the class_method of this StackFrame. # noqa: E501
is a class method # noqa: E501
:return: The class_method of this StackFrame. # noqa: E501
:rtype: boolean
"""
return self._class_method
@class_method.setter
def class_method(self, class_method):
"""Sets the class_method of this StackFrame.
is a class method # noqa: E501
:param class_method: The class_method of this StackFrame. # noqa: E501
:type: boolean
"""
self._class_method = class_method
@property
def file(self):
"""Gets the file of this StackFrame. # noqa: E501
name of the file # noqa: E501
:return: The file of this StackFrame. # noqa: E501
:rtype: string
"""
return self._file
@file.setter
def file(self, file):
"""Sets the file of this StackFrame.
name of the file # noqa: E501
:param file: The file of this StackFrame. # noqa: E501
:type: string
"""
self._file = file
@property
def line(self):
"""Gets the line of this StackFrame. # noqa: E501
line number # noqa: E501
:return: The line of this StackFrame. # noqa: E501
:rtype: integer
"""
return self._line
@line.setter
def line(self, line):
"""Sets the line of this StackFrame.
line number # noqa: E501
:param line: The line of this StackFrame. # noqa: E501
:type: integer
"""
self._line = line
@property
def app_code(self):
"""Gets the app_code of this StackFrame. # noqa: E501
this line isn't from any framework # noqa: E501
:return: The app_code of this StackFrame. # noqa: E501
:rtype: boolean
"""
return self._app_code
@app_code.setter
def app_code(self, app_code):
"""Sets the app_code of this StackFrame.
this line isn't from any framework # noqa: E501
:param app_code: The app_code of this StackFrame. # noqa: E501
:type: boolean
"""
if app_code is None:
raise ValueError("Invalid value for `app_code`, must not be `None`") # noqa: E501
self._app_code = app_code
@property
def framework_name(self):
"""Gets the framework_name of this StackFrame. # noqa: E501
Name of the framework # noqa: E501
:return: The framework_name of this StackFrame. # noqa: E501
:rtype: string
"""
return self._framework_name
@framework_name.setter
def framework_name(self, framework_name):
"""Sets the framework_name of this StackFrame.
Name of the framework # noqa: E501
:param framework_name: The framework_name of this StackFrame. # noqa: E501
:type: string
"""
self._framework_name = framework_name
@property
def code_raw(self):
"""Gets the code_raw of this StackFrame. # noqa: E501
Raw frame string # noqa: E501
:return: The code_raw of this StackFrame. # noqa: E501
:rtype: string
"""
return self._code_raw
@code_raw.setter
def code_raw(self, code_raw):
"""Sets the code_raw of this StackFrame.
Raw frame string # noqa: E501
:param code_raw: The code_raw of this StackFrame. # noqa: E501
:type: string
"""
if code_raw is None:
raise ValueError("Invalid value for `code_raw`, must not be `None`") # noqa: E501
self._code_raw = code_raw
@property
def code_formatted(self):
"""Gets the code_formatted of this StackFrame. # noqa: E501
Formatted frame string # noqa: E501
:return: The code_formatted of this StackFrame. # noqa: E501
:rtype: string
"""
return self._code_formatted
@code_formatted.setter
def code_formatted(self, code_formatted):
"""Sets the code_formatted of this StackFrame.
Formatted frame string # noqa: E501
:param code_formatted: The code_formatted of this StackFrame. # noqa: E501
:type: string
"""
if code_formatted is None:
raise ValueError("Invalid value for `code_formatted`, must not be `None`") # noqa: E501
self._code_formatted = code_formatted
@property
def language(self):
"""Gets the language of this StackFrame. # noqa: E501
programming language of the frame # noqa: E501
:return: The language of this StackFrame. # noqa: E501
:rtype: string
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this StackFrame.
programming language of the frame # noqa: E501
:param language: The language of this StackFrame. # noqa: E501
:type: string
"""
allowed_values = [undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined, undefined, ] # noqa: E501
self._language = language
@property
def relevant(self):
"""Gets the relevant of this StackFrame. # noqa: E501
frame should be shown always # noqa: E501
:return: The relevant of this StackFrame. # noqa: E501
:rtype: boolean
"""
return self._relevant
@relevant.setter
def relevant(self, relevant):
"""Sets the relevant of this StackFrame.
frame should be shown always # noqa: E501
:param relevant: The relevant of this StackFrame. # noqa: E501
:type: boolean
"""
self._relevant = relevant
@property
def method_params(self):
"""Gets the method_params of this StackFrame. # noqa: E501
parameters of the frames method # noqa: E501
:return: The method_params of this StackFrame. # noqa: E501
:rtype: string
"""
return self._method_params
@method_params.setter
def method_params(self, method_params):
"""Sets the method_params of this StackFrame.
parameters of the frames method # noqa: E501
:param method_params: The method_params of this StackFrame. # noqa: E501
:type: string
"""
self._method_params = method_params
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StackFrame):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.844828 | 244 | 0.583359 |
9ff28a71dc4a95cbff431c182b9284a7de2461b3 | 14,701 | py | Python | preprocess.py | xkortex/Siraj_Chatbot_Challenge | 3e2aaafb70afd3301ab79d74c08d6fb714c224c8 | [
"Apache-2.0"
] | 60 | 2017-04-08T05:30:21.000Z | 2021-06-19T17:40:11.000Z | preprocess.py | xkortex/Siraj_Chatbot_Challenge | 3e2aaafb70afd3301ab79d74c08d6fb714c224c8 | [
"Apache-2.0"
] | 1 | 2017-04-09T06:03:18.000Z | 2017-04-11T03:38:07.000Z | preprocess.py | xkortex/Siraj_Chatbot_Challenge | 3e2aaafb70afd3301ab79d74c08d6fb714c224c8 | [
"Apache-2.0"
] | 39 | 2017-04-11T08:49:34.000Z | 2019-10-07T14:38:46.000Z |
'''Preprocessing code for network on bAbI dataset.
References:
- Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush,
"Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks",
http://arxiv.org/abs/1502.05698
- Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus,
"End-To-End Memory Networks",
http://arxiv.org/abs/1503.08895
'''
from functools import reduce
import tarfile
import re
import glob
import numpy as np
import pandas as pd
from keras.utils.data_utils import get_file
from keras.preprocessing.sequence import pad_sequences
def charvectorize(word, lower=True, setsize=128):
"""
Convert a word (sequence of characters) to a n-vector of length setsize, using one-hot encoding
:param word: Word to vectorize
:param lower: Render word lowercase first before vectorizing
:param setsize: Size of character set
:return:
>>> charvectorize('Mary')
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
"""
if lower:
word = word.lower()
vec = np.zeros(setsize, int)
for c in word:
vec[ord(c)] = 1
return vec
def dist(v1, v2):
"""
Euclidean distance
:param v1: Vector
:param v2: Vector or list of vectors
:return:
>>> dist(0.5, 0.25)
0.25
>>> dist((.5, .6, .7), (.3, .3, .3))
0.53851648071345037
"""
v1 = np.array(v1)
v2 = np.array(v2)
dv = v2 - v1
dv = dv ** 2
dv = np.sum(dv, axis=-1)
return dv ** 0.5
def matchnocase(word, vocab):
"""
Match a word to a vocabulary while ignoring case
:param word: Word to try to match
:param vocab: Valid vocabulary
:return:
>>> matchnocase('mary', {'Alice', 'Bob', 'Mary'})
'Mary'
"""
lword = word.lower()
listvocab = list(vocab) # this trick catches dict and set in addition to list
lvocab = [w.lower() for w in listvocab]
if lword in lvocab:
return listvocab[lvocab.index(lword)]
return None
def softmatch(word, vocab, lower=True, cutoff=2.):
"""
Try to soft-match to catch various typos.
:param word: Word to try to match
:param vocab: Valid vocabulary
:param cutoff: Maximum distance (exclusive) to return match
:return: Corrected word
>>> softmatch('mbry', {'Alice', 'Bob', 'Mary'})
'Mary'
"""
listvocab = list(vocab)
vw = charvectorize(word)
vecs = np.array([charvectorize(w, lower=lower) for w in listvocab])
distances = dist(vw, vecs)
idx = np.argmin(distances)
confidence = distances[idx]
if confidence < cutoff:
return listvocab[idx]
return None
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
"""
Parse stories provided in the bAbi tasks format
If only_supporting is true, only the sentences
that support the answer are kept.
:param lines:
:param only_supporting:
:return:
"""
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
substory = None
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
"""
Given a file name, read the file,
retrieve the stories,
and then convert the sentences into a single story.
If max_length is supplied,
any stories longer than max_length tokens will be discarded.
:param f: file
:param only_supporting:
:param max_length:
:return:
"""
data = parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length]
return data
class BabiVectorizer:
allow_case_insensitive = True
allow_softmatch = False
ignore_keyerror = True
basedir = 'tasks_1-20_v1-2/en-10k/'
challenge_files = glob.glob(basedir + 'qa*.txt')
# challenges = {
# # QA1 with 10,000 samples
# 'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
# # QA2 with 10,000 samples
# 'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
# }
challenges = {1: '{}qa1_single-supporting-fact_{}.txt',
2: '{}qa2_two-supporting-facts_{}.txt',
3: '{}qa3_three-supporting-facts_{}.txt',
4: '{}qa4_two-arg-relations_{}.txt',
5: '{}qa5_three-arg-relations_{}.txt',
6: '{}qa6_yes-no-questions_{}.txt',
7: '{}qa7_counting_{}.txt',
8: '{}qa8_lists-sets_{}.txt',
9: '{}qa9_simple-negation_{}.txt',
10: '{}qa10_indefinite-knowledge_{}.txt',
11: '{}qa11_basic-coreference_{}.txt',
12: '{}qa12_conjunction_{}.txt',
13: '{}qa13_compound-coreference_{}.txt',
14: '{}qa14_time-reasoning_{}.txt',
15: '{}qa15_basic-deduction_{}.txt',
16: '{}qa16_basic-induction_{}.txt',
17: '{}qa17_positional-reasoning_{}.txt',
18: '{}qa18_size-reasoning_{}.txt',
19: '{}qa19_path-finding_{}.txt',
20: '{}qa20_agents-motivations_{}.txt'}
lookup_challenge = {1:'single_supporting_fact_10k', 2: 'two_supporting_facts_10k' }
def __init__(self, challenge_num=1):
"""
Word Vectorizer for for Babi Dataset. Handles loading data, parsing, converting to int index, maintaining the
vocabulary, and converting back from vectors to sentences.
:param challenge_num: {1|2} Specify the challenge which to load.
1 = One supporting fact
2 = Two supporting facts
"""
try:
path = get_file('babi-tasks-v1-2.tar.gz',
origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz')
except:
print('Error downloading dataset, please download it manually:\n'
'$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz\n'
'$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz')
raise
tar = tarfile.open(path)
challenge = self.challenges[challenge_num]
print('Loading: {}'.format(challenge))
train_records = get_stories(tar.extractfile(challenge.format(self.basedir, 'train')))
test_records = get_stories(tar.extractfile(challenge.format(self.basedir, 'test')))
vocab = set()
for story, q, answer in train_records + test_records:
vocab |= set(story + q + [answer])
vocab = sorted(vocab)
vocab_size = len(vocab) + 1
story_maxlen = max(map(len, (x for x, _, _ in train_records + test_records)))
query_maxlen = max(map(len, (x for _, x, _ in train_records + test_records)))
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
idx_word = {value: key for (key, value) in word_idx.items()} # reverse lookup
idx_word.update({0: ''})
stories, queries, answers = zip(*test_records)
self._vocab = vocab
self._vocab_size = vocab_size
self._word_idx = word_idx
self._idx_word = idx_word
self.story_maxlen = story_maxlen
self.query_maxlen = query_maxlen
self._train_records = train_records
self._test_records = test_records
self._lookup = dict(word_idx) # deal with null cases if necessary
self.stories = stories
self.answers = answers
def deindex_sentence(self, ary, prettify=True):
"""
Take a list of ints and return a sentence of words
:param ary: array-like, List of ints (vectorized sentence)
:param prettify: Clean up the sentence, e.g. trim extra spaces, add line breaks
:return: Sentence
:rtype: str
"""
sentence = []
for scalar in ary:
try:
word = self.idx_word[scalar]
if word:
sentence.append(word)
except KeyError:
print('Index not found in vocab: {}'.format(scalar))
sentence = ' '.join(sentence)
if prettify: # just tidy up a bit
sentence = sentence.replace(' . ', '.\n').replace(' .', '.')
return sentence
def vectorize_all(self, datatype='train'):
"""
Vectorize all items in the dataset
:param datatype: {'train'|'test'} specify the dataset to use
:return: (stories, queries, answers) each is a numpy array
:rtype: tuple
"""
if datatype == 'train':
data = self.train_records
elif datatype == 'test':
data = self.test_records
else:
raise ValueError('Invalid argument "datatype" specified: {}'.format(datatype))
X = []
Xq = []
Y = []
for story, query, answer in data:
x = [self.word_idx[w] for w in story]
xq = [self.word_idx[w] for w in query]
# let's not forget that index 0 is reserved
y = np.zeros(len(self.word_idx) + 1)
y[self.word_idx[answer]] = 1
X.append(x)
Xq.append(xq)
Y.append(y)
return (pad_sequences(X, maxlen=self.story_maxlen),
pad_sequences(Xq, maxlen=self.query_maxlen), np.array(Y))
def vectorize_story(self, story):
"""
Take a "story" and convert it to a sequence of ints using the vocab list
:param story:
:type story: list
:return: list of ints
"""
story = [self[w] for w in story]
return pad_sequences([story], maxlen=self.story_maxlen) # note: this expects a sequence
def vectorize_query(self, query, verbose=False):
"""
Take a query as a sentence string and return the vector in int-list form
:param query:
:type query: str
:param verbose:
:return: list of ints
"""
query = query.replace('?', ' ?')
query = query.split(' ')
exclude = ['', ' ']
query = [q for q in query if q not in exclude]
query = [self[q] for q in query]
if verbose: print('<v>Vectorize_query(): {}'.format(query))
queryvec = pad_sequences([query], maxlen=self.query_maxlen)
return queryvec
def devectorize_ans(self, ansvec, show_conf=False):
"""
Take a vector from NN answer and convert it back to word form
:param ansvec: n-dim vector, n=vocab size
:param show_conf: print out the confidence of the top few potential matches
:return:
"""
idx = np.argmax(ansvec)
if show_conf:
conf = list(ansvec.ravel())
vocab = [self.idx_word[i] for i in range(len(conf))]
df = pd.DataFrame(list(zip(vocab, conf )), columns=['vocab', 'conf'])
df = df.sort_values(by='conf', ascending=False)
df['conf'] = pd.Series(["{0:.2f}%".format(val * 100) for val in df['conf']], index=df.index)
print(df.head().to_string(index=False))
return self.idx_word[idx], ansvec.ravel()[idx]
def format_story(self, story):
print('-' * 30)
print(' '.join(story).replace(' . ', '.\n').replace(' .', '.'))
print('-' * 30)
def get_random_story(self, show=False):
"""Migrating this over to the StoryHandler, where it belongs"""
story = np.random.choice(self.stories)
if show:
self.format_story(story)
return story
@property
def vocab(self): return self._vocab
@property
def vocab_size(self): return self._vocab_size
@property
def word_idx(self): return self._word_idx
@property
def idx_word(self): return self._idx_word
@property
def train_records(self): return self._train_records
@property
def test_records(self): return self._test_records
@property
def lookup(self): return self._lookup
def __getitem__(self, item):
"""Allows us to use the vectorizer object itself to do lookups. Clever, perhaps too clever.
Only does word_to_index lookups. index_to_word lookups must be invoked with self.idx_word
If allow_case_insensitive is specified, try to do a match with all lower case.
If that fails, flag the error."""
try:
return self.lookup[item]
except KeyError:
pass
if self.allow_case_insensitive:
correctitem = matchnocase(item, self.word_idx)
try:
return self.lookup[correctitem]
except KeyError:
pass
if self.allow_softmatch:
correctitem = softmatch(item, self.word_idx, lower=True, cutoff=2.)
try:
return self.lookup[correctitem]
except KeyError:
pass
# fallthrough condition. Key not found with soft matches
if self.ignore_keyerror:
print('<!> Value not found in lookup: {}'.format(item))
return 0
else:
raise KeyError('Value not found in lookup: {}'.format(item))
| 35.424096 | 123 | 0.577308 |
8ad69fcbd4f1ee977e00b5b94eacf77b3737ab0a | 9,237 | py | Python | nova/notifications/objects/base.py | karimull/nova | 9dcff4d4ed3e5ed5c0f58638c863562f4761495c | [
"Apache-2.0"
] | null | null | null | nova/notifications/objects/base.py | karimull/nova | 9dcff4d4ed3e5ed5c0f58638c863562f4761495c | [
"Apache-2.0"
] | null | null | null | nova/notifications/objects/base.py | karimull/nova | 9dcff4d4ed3e5ed5c0f58638c863562f4761495c | [
"Apache-2.0"
] | null | null | null | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_versionedobjects import exception as ovo_exception
from nova import exception
from nova.objects import base
from nova.objects import fields
from nova import rpc
LOG = logging.getLogger(__name__)
@base.NovaObjectRegistry.register_if(False)
class NotificationObject(base.NovaObject):
"""Base class for every notification related versioned object."""
# Version 1.0: Initial version
VERSION = '1.0'
def __init__(self, **kwargs):
super(NotificationObject, self).__init__(**kwargs)
# The notification objects are created on the fly when nova emits the
# notification. This causes that every object shows every field as
# changed. We don't want to send this meaningless information so we
# reset the object after creation.
self.obj_reset_changes(recursive=False)
@base.NovaObjectRegistry.register_notification
class EventType(NotificationObject):
# Version 1.0: Initial version
# Version 1.1: New valid actions values are added to the
# NotificationActionField enum
# Version 1.2: DELETE value is added to the NotificationActionField enum
# Version 1.3: Set of new values are added to NotificationActionField enum
# Version 1.4: Another set of new values are added to
# NotificationActionField enum
# Version 1.5: Aggregate related values have been added to
# NotificationActionField enum
# Version 1.6: ADD_FIX_IP replaced with INTERFACE_ATTACH in
# NotificationActionField enum
# Version 1.7: REMOVE_FIXED_IP replaced with INTERFACE_DETACH in
# NotificationActionField enum
# Version 1.8: IMPORT value is added to NotificationActionField enum
# Version 1.9: ADD_MEMBER value is added to NotificationActionField enum
VERSION = '1.9'
fields = {
'object': fields.StringField(nullable=False),
'action': fields.NotificationActionField(nullable=False),
'phase': fields.NotificationPhaseField(nullable=True),
}
def __init__(self, object, action, phase=None):
super(EventType, self).__init__()
self.object = object
self.action = action
self.phase = phase
def to_notification_event_type_field(self):
"""Serialize the object to the wire format."""
s = '%s.%s' % (self.object, self.action)
if self.phase:
s += '.%s' % self.phase
return s
@base.NovaObjectRegistry.register_if(False)
class NotificationPayloadBase(NotificationObject):
"""Base class for the payload of versioned notifications."""
# SCHEMA defines how to populate the payload fields. It is a dictionary
# where every key value pair has the following format:
# <payload_field_name>: (<data_source_name>,
# <field_of_the_data_source>)
# The <payload_field_name> is the name where the data will be stored in the
# payload object, this field has to be defined as a field of the payload.
# The <data_source_name> shall refer to name of the parameter passed as
# kwarg to the payload's populate_schema() call and this object will be
# used as the source of the data. The <field_of_the_data_source> shall be
# a valid field of the passed argument.
# The SCHEMA needs to be applied with the populate_schema() call before the
# notification can be emitted.
# The value of the payload.<payload_field_name> field will be set by the
# <data_source_name>.<field_of_the_data_source> field. The
# <data_source_name> will not be part of the payload object internal or
# external representation.
# Payload fields that are not set by the SCHEMA can be filled in the same
# way as in any versioned object.
SCHEMA = {}
# Version 1.0: Initial version
VERSION = '1.0'
def __init__(self):
super(NotificationPayloadBase, self).__init__()
self.populated = not self.SCHEMA
@rpc.if_notifications_enabled
def populate_schema(self, **kwargs):
"""Populate the object based on the SCHEMA and the source objects
:param kwargs: A dict contains the source object at the key defined in
the SCHEMA
"""
for key, (obj, field) in self.SCHEMA.items():
source = kwargs[obj]
# trigger lazy-load if possible
try:
setattr(self, key, getattr(source, field))
# ObjectActionError - not lazy loadable field
# NotImplementedError - obj_load_attr() is not even defined
# OrphanedObjectError - lazy loadable field but context is None
except (exception.ObjectActionError,
NotImplementedError,
exception.OrphanedObjectError,
ovo_exception.OrphanedObjectError) as e:
LOG.debug(("Defaulting the value of the field '%(field)s' "
"to None in %(payload)s due to '%(exception)s'"),
{'field': key,
'payload': self.__class__.__name__,
'exception': e})
# NOTE(gibi): This will fail if the payload field is not
# nullable, but that means that either the source object is not
# properly initialized or the payload field needs to be defined
# as nullable
setattr(self, key, None)
self.populated = True
# the schema population will create changed fields but we don't need
# this information in the notification
self.obj_reset_changes(recursive=True)
@base.NovaObjectRegistry.register_notification
class NotificationPublisher(NotificationObject):
# Version 1.0: Initial version
# 2.0: The binary field has been renamed to source
# 2.1: The type of the source field changed from string to enum.
# This only needs a minor bump as the enum uses the possible
# values of the previous string field
# 2.2: New enum for source fields added
VERSION = '2.2'
fields = {
'host': fields.StringField(nullable=False),
'source': fields.NotificationSourceField(nullable=False),
}
def __init__(self, host, source):
super(NotificationPublisher, self).__init__()
self.host = host
self.source = source
@classmethod
def from_service_obj(cls, service):
source = fields.NotificationSource.get_source_by_binary(service.binary)
return cls(host=service.host, source=source)
@base.NovaObjectRegistry.register_if(False)
class NotificationBase(NotificationObject):
"""Base class for versioned notifications.
Every subclass shall define a 'payload' field.
"""
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'priority': fields.NotificationPriorityField(),
'event_type': fields.ObjectField('EventType'),
'publisher': fields.ObjectField('NotificationPublisher'),
}
def _emit(self, context, event_type, publisher_id, payload):
notifier = rpc.get_versioned_notifier(publisher_id)
notify = getattr(notifier, self.priority)
notify(context, event_type=event_type, payload=payload)
@rpc.if_notifications_enabled
def emit(self, context):
"""Send the notification."""
assert self.payload.populated
# Note(gibi): notification payload will be a newly populated object
# therefore every field of it will look changed so this does not carry
# any extra information so we drop this from the payload.
self.payload.obj_reset_changes(recursive=True)
self._emit(context,
event_type=
self.event_type.to_notification_event_type_field(),
publisher_id='%s:%s' %
(self.publisher.source,
self.publisher.host),
payload=self.payload.obj_to_primitive())
def notification_sample(sample):
"""Class decorator to attach the notification sample information
to the notification object for documentation generation purposes.
:param sample: the path of the sample json file relative to the
doc/notification_samples/ directory in the nova repository
root.
"""
def wrap(cls):
if not getattr(cls, 'samples', None):
cls.samples = [sample]
else:
cls.samples.append(sample)
return cls
return wrap
| 41.236607 | 79 | 0.658439 |
568e879c9c5d983665843ace84cf11dcffa25212 | 118,721 | py | Python | tensorflow/python/keras/layers/recurrent.py | TaeWoo21/Tensorflow | 7b9f418c374b3e919fe88e22ddcaf5528222eef7 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/layers/recurrent.py | TaeWoo21/Tensorflow | 7b9f418c374b3e919fe88e22ddcaf5528222eef7 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/layers/recurrent.py | TaeWoo21/Tensorflow | 7b9f418c374b3e919fe88e22ddcaf5528222eef7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Recurrent layers and their base classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_cudnn_rnn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.StackedRNNCells')
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
Arguments:
cells: List of RNN cell instances.
Examples:
```python
cells = [
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
]
inputs = keras.Input((timesteps, input_dim))
x = keras.layers.RNN(cells)(inputs)
```
"""
def __init__(self, cells, **kwargs):
for cell in cells:
if not hasattr(cell, 'call'):
raise ValueError('All cells must have a `call` method. '
'received cells:', cells)
if not hasattr(cell, 'state_size'):
raise ValueError('All cells must have a '
'`state_size` attribute. '
'received cells:', cells)
self.cells = cells
# reverse_state_order determines whether the state size will be in a reverse
# order of the cells' state. User might want to set this to True to keep the
# existing behavior. This is only useful when use RNN(return_state=True)
# since the state will be returned as the same order of state_size.
self.reverse_state_order = kwargs.pop('reverse_state_order', False)
if self.reverse_state_order:
logging.warning('reverse_state_order=True in StackedRNNCells will soon '
'be deprecated. Please update the code to work with the '
'natural order of states if you reply on the RNN states, '
'eg RNN(return_state=True).')
super(StackedRNNCells, self).__init__(**kwargs)
@property
def state_size(self):
return tuple(c.state_size for c in
(self.cells[::-1] if self.reverse_state_order else self.cells))
@property
def output_size(self):
if getattr(self.cells[-1], 'output_size', None) is not None:
return self.cells[-1].output_size
elif _is_multiple_state(self.cells[-1].state_size):
return self.cells[-1].state_size[0]
else:
return self.cells[-1].state_size
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
initial_states = []
for cell in self.cells[::-1] if self.reverse_state_order else self.cells:
get_initial_state_fn = getattr(cell, 'get_initial_state', None)
if get_initial_state_fn:
initial_states.append(get_initial_state_fn(
inputs=inputs, batch_size=batch_size, dtype=dtype))
else:
initial_states.append(_generate_zero_filled_state_for_cell(
cell, inputs, batch_size, dtype))
return tuple(initial_states)
def call(self, inputs, states, constants=None, **kwargs):
# Recover per-cell states.
state_size = (self.state_size[::-1]
if self.reverse_state_order else self.state_size)
nested_states = nest.pack_sequence_as(state_size, nest.flatten(states))
# Call the cells in order and store the returned states.
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
states = states if nest.is_sequence(states) else [states]
if generic_utils.has_arg(cell.call, 'constants'):
inputs, states = cell.call(inputs, states, constants=constants,
**kwargs)
else:
inputs, states = cell.call(inputs, states, **kwargs)
new_nested_states.append(states)
return inputs, nest.pack_sequence_as(state_size,
nest.flatten(new_nested_states))
@tf_utils.shape_type_conversion
def build(self, input_shape):
if isinstance(input_shape, list):
constants_shape = input_shape[1:]
input_shape = input_shape[0]
for cell in self.cells:
if isinstance(cell, Layer):
if generic_utils.has_arg(cell.call, 'constants'):
cell.build([input_shape] + constants_shape)
else:
cell.build(input_shape)
if getattr(cell, 'output_size', None) is not None:
output_dim = cell.output_size
elif _is_multiple_state(cell.state_size):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
input_shape = tuple([input_shape[0]] +
tensor_shape.as_shape(output_dim).as_list())
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append({
'class_name': cell.__class__.__name__,
'config': cell.get_config()
})
config = {'cells': cells}
base_config = super(StackedRNNCells, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cells = []
for cell_config in config.pop('cells'):
cells.append(
deserialize_layer(cell_config, custom_objects=custom_objects))
return cls(cells, **config)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self.cells:
if isinstance(cell, Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def get_weights(self):
"""Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays.
"""
weights = []
for cell in self.cells:
if isinstance(cell, Layer):
weights += cell.weights
return K.batch_get_value(weights)
def set_weights(self, weights):
"""Sets the weights of the model.
Arguments:
weights: A list of Numpy arrays with shapes and types matching
the output of `model.get_weights()`.
"""
tuples = []
for cell in self.cells:
if isinstance(cell, Layer):
num_param = len(cell.weights)
weights = weights[:num_param]
for sw, w in zip(cell.weights, weights):
tuples.append((sw, w))
weights = weights[num_param:]
K.batch_set_value(tuples)
@property
def losses(self):
losses = []
for cell in self.cells:
if isinstance(cell, Layer):
losses += cell.losses
return losses + self._losses
@property
def updates(self):
updates = []
for cell in self.cells:
if isinstance(cell, Layer):
updates += cell.updates
return updates + self._updates
@tf_export('keras.layers.RNN')
class RNN(Layer):
"""Base class for recurrent layers.
Arguments:
cell: A RNN cell instance or a list of RNN cell instances.
A RNN cell is a class that has:
- a `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- a `state_size` attribute. This can be a single integer
(single state) in which case it is the size of the recurrent
state. This can also be a list/tuple of integers (one size per
state).
The `state_size` can also be TensorShape or tuple/list of
TensorShape, to represent high dimension state.
- a `output_size` attribute. This can be a single integer or a
TensorShape, which represent the shape of the output. For backward
compatible reason, if this attribute is not available for the
cell, the value will be inferred by the first element of the
`state_size`.
- a `get_initial_state(inputs=None, batch_size=None, dtype=None)`
method that creates a tensor meant to be fed to `call()` as the
initial state, if user didn't specify any initial state via other
means. The returned initial state should be in shape of
[batch, cell.state_size]. Cell might choose to create zero filled
tensor, or with other values based on the cell implementations.
`inputs` is the input tensor to the RNN layer, which should
contain the batch size as its shape[0], and also dtype. Note that
the shape[0] might be None during the graph construction. Either
the `inputs` or the pair of `batch` and `dtype `are provided.
`batch` is a scalar tensor that represent the batch size
of the input. `dtype` is `tf.dtype` that represent the dtype of
the input.
For backward compatible reason, if this method is not implemented
by the cell, RNN layer will create a zero filled tensors with the
size of [batch, cell.state_size].
In the case that `cell` is a list of RNN cell instances, the cells
will be stacked on after the other in the RNN, implementing an
efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
input_dim: dimensionality of the input (integer or tuple of integers).
This argument (or alternatively, the keyword argument `input_shape`)
is required when using this layer as the first layer in a model.
input_length: Length of input sequences, to be specified
when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Note that if the recurrent layer is not the first layer
in your model, you would need to specify the input length
at the level of the first layer
(e.g. via the `input_shape` argument)
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
Input shape:
N-D tensor with shape `(batch_size, timesteps, ...)` or
`(timesteps, batch_size, ...)` when time_major is True.
Output shape:
- if `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, state_size)`, where `state_size` could
be a high dimension tensor shape.
- if `return_sequences`: N-D tensor with shape
`(batch_size, timesteps, output_size)`, where `output_size` could
be a high dimension tensor shape, or
`(timesteps, batch_size, output_size)` when `time_major` is True.
- else, N-D tensor with shape `(batch_size, output_size)`, where
`output_size` could be a high dimension tensor shape.
# Masking
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
# Note on using statefulness in RNNs
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- specify `stateful=True` in the layer constructor.
- specify a fixed batch size for your model, by passing
if sequential model:
`batch_input_shape=(...)` to the first layer in your model.
else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
# Note on specifying the initial state of RNNs
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
# Note on passing external constants to RNNs
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
Examples:
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if not hasattr(cell, 'call'):
raise ValueError('`cell` should have a `call` method. '
'The RNN was passed:', cell)
if not hasattr(cell, 'state_size'):
raise ValueError('The RNN cell should have '
'an attribute `state_size` '
'(tuple of integers, '
'one integer per RNN state).')
# If True, the output for masked timestep will be zeros, whereas in the
# False case, output from previous timestep is returned for masked timestep.
self.zero_output_for_mask = kwargs.pop('zero_output_for_mask', False)
super(RNN, self).__init__(**kwargs)
self.cell = cell
if isinstance(cell, checkpointable.CheckpointableBase):
self._track_checkpointable(self.cell, name='cell')
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.time_major = time_major
self.supports_masking = True
# The input shape is unknown yet, it could have nested tensor inputs, and
# the input spec will be the list of specs for flattened inputs.
self.input_spec = None
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = None
self._num_inputs = None
@property
def states(self):
if self._states is None:
state = nest.map_structure(lambda _: None, self.cell.state_size)
return state if nest.is_sequence(self.cell.state_size) else [state]
return self._states
@states.setter
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tensor_shape.as_shape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
input_shape = nest.flatten(input_shape)[0]
batch = input_shape[0]
time_step = input_shape[1]
if self.time_major:
batch, time_step = time_step, batch
if _is_multiple_state(self.cell.state_size):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
def _get_output_shape(flat_output_size):
output_dim = tensor_shape.as_shape(flat_output_size).as_list()
if self.return_sequences:
if self.time_major:
output_shape = tensor_shape.as_shape([time_step, batch] + output_dim)
else:
output_shape = tensor_shape.as_shape([batch, time_step] + output_dim)
else:
output_shape = tensor_shape.as_shape([batch] + output_dim)
return output_shape
if getattr(self.cell, 'output_size', None) is not None:
# cell.output_size could be nested structure.
output_shape = nest.flatten(nest.map_structure(
_get_output_shape, self.cell.output_size))
output_shape = output_shape[0] if len(output_shape) == 1 else output_shape
else:
# Note that state_size[0] could be a tensor_shape or int.
output_shape = _get_output_shape(state_size[0])
if self.return_state:
def _get_state_shape(flat_state):
state_shape = [batch] + tensor_shape.as_shape(flat_state).as_list()
return tensor_shape.as_shape(state_shape)
state_shape = nest.map_structure(_get_state_shape, state_size)
return generic_utils.to_list(output_shape) + nest.flatten(state_shape)
else:
return output_shape
def compute_mask(self, inputs, mask):
if isinstance(mask, list):
mask = mask[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
# Note input_shape will be list of shapes of initial states and
# constants if these are passed in __call__.
if self._num_constants is not None:
constants_shape = input_shape[-self._num_constants:] # pylint: disable=invalid-unary-operand-type
constants_shape = nest.map_structure(
lambda s: tuple(tensor_shape.TensorShape(s).as_list()),
constants_shape)
else:
constants_shape = None
if isinstance(input_shape, list):
input_shape = input_shape[0]
# The input_shape here could be a nest structure.
# do the tensor_shape to shapes here. The input could be single tensor, or a
# nested structure of tensors.
def get_input_spec(shape):
if isinstance(shape, tensor_shape.TensorShape):
input_spec_shape = shape.as_list()
else:
input_spec_shape = list(shape)
batch_index, time_step_index = (1, 0) if self.time_major else (0, 1)
if not self.stateful:
input_spec_shape[batch_index] = None
input_spec_shape[time_step_index] = None
return InputSpec(shape=tuple(input_spec_shape))
def get_step_input_shape(shape):
if isinstance(shape, tensor_shape.TensorShape):
shape = tuple(shape.as_list())
# remove the timestep from the input_shape
return shape[1:] if self.time_major else (shape[0],) + shape[2:]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tensor_shape.as_shape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
pass
if not nest.is_sequence(input_shape):
# This indicates the there is only one input.
if self.input_spec is not None:
self.input_spec[0] = get_input_spec(input_shape)
else:
self.input_spec = [get_input_spec(input_shape)]
step_input_shape = get_step_input_shape(input_shape)
else:
flat_input_shapes = nest.flatten(input_shape)
flat_input_shapes = nest.map_structure(get_input_spec, flat_input_shapes)
assert len(flat_input_shapes) == self._num_inputs
if self.input_spec is not None:
self.input_spec[:self._num_inputs] = flat_input_shapes
else:
self.input_spec = flat_input_shapes
step_input_shape = nest.map_structure(get_step_input_shape, input_shape)
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
if constants_shape is not None:
self.cell.build([step_input_shape] + constants_shape)
else:
self.cell.build(step_input_shape)
# set or validate state_spec
if _is_multiple_state(self.cell.state_size):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
self._validate_state_spec(state_size, self.state_spec)
else:
self.state_spec = [
InputSpec(shape=[None] + tensor_shape.as_shape(dim).as_list())
for dim in state_size
]
if self.stateful:
self.reset_states()
self.built = True
@staticmethod
def _validate_state_spec(cell_state_sizes, init_state_specs):
"""Validate the state spec between the initial_state and the state_size.
Args:
cell_state_sizes: list, the `state_size` attribute from the cell.
init_state_specs: list, the `state_spec` from the initial_state that is
passed in call()
Raises:
ValueError: When initial state spec is not compatible with the state size.
"""
validation_error = ValueError(
'An `initial_state` was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'however `cell.state_size` is '
'{}'.format(init_state_specs, cell_state_sizes))
if len(cell_state_sizes) == len(init_state_specs):
for i in range(len(cell_state_sizes)):
if not tensor_shape.TensorShape(
# Ignore the first axis for init_state which is for batch
init_state_specs[i].shape[1:]).is_compatible_with(
tensor_shape.TensorShape(cell_state_sizes[i])):
raise validation_error
else:
raise validation_error
def get_initial_state(self, inputs):
get_initial_state_fn = getattr(self.cell, 'get_initial_state', None)
if nest.is_sequence(inputs):
# The input are nested sequences. Use the first element in the seq to get
# batch size and dtype.
inputs = nest.flatten(inputs)[0]
input_shape = array_ops.shape(inputs)
batch_size = input_shape[1] if self.time_major else input_shape[0]
dtype = inputs.dtype
if get_initial_state_fn:
init_state = get_initial_state_fn(
inputs=None, batch_size=batch_size, dtype=dtype)
else:
init_state = _generate_zero_filled_state(batch_size, self.cell.state_size,
dtype)
# Keras RNN expect the states in a list, even if it's a single state tensor.
if not nest.is_sequence(init_state):
init_state = [init_state]
# Force the state to be a list in case it is a namedtuple eg LSTMStateTuple.
return list(init_state)
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = _standardize_args(inputs,
initial_state,
constants,
self._num_constants,
self._num_inputs)
# in case the real inputs is a nested structure, set the size of flatten
# input so that we can distinguish between real inputs, initial_state and
# constants.
self._num_inputs = len(nest.flatten(inputs))
if initial_state is None and constants is None:
return super(RNN, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
additional_inputs += initial_state
self.state_spec = [
InputSpec(shape=K.int_shape(state)) for state in initial_state
]
additional_specs += self.state_spec
if constants is not None:
additional_inputs += constants
self.constants_spec = [
InputSpec(shape=K.int_shape(constant)) for constant in constants
]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# at this point additional_inputs cannot be empty
is_keras_tensor = K.is_keras_tensor(additional_inputs[0])
for tensor in additional_inputs:
if K.is_keras_tensor(tensor) != is_keras_tensor:
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors'
' (a "Keras tensor" is a tensor that was'
' returned by a Keras layer, or by `Input`)')
if is_keras_tensor:
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
# The original input_spec is None since there could be a nested tensor
# input. Update the input_spec to match the inputs.
full_input_spec = [None for _ in range(len(nest.flatten(inputs)))
] + additional_specs
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(RNN, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
if initial_state is not None:
kwargs['initial_state'] = initial_state
if constants is not None:
kwargs['constants'] = constants
return super(RNN, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
inputs, initial_state, constants = self._process_inputs(
inputs, initial_state, constants)
if isinstance(mask, list):
mask = mask[0]
if nest.is_sequence(inputs):
# In the case of nested input, use the first element for shape check.
input_shape = K.int_shape(nest.flatten(inputs)[0])
else:
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if self.unroll and timesteps in [None, 1]:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined or equal to 1. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
kwargs = {}
if generic_utils.has_arg(self.cell.call, 'training'):
kwargs['training'] = training
# TF RNN cells expect single tensor as state instead of list wrapped tensor.
is_tf_rnn_cell = getattr(self.cell, '_is_tf_rnn_cell', None) is not None
if constants:
if not generic_utils.has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:] # pylint: disable=invalid-unary-operand-type
states = states[:-self._num_constants] # pylint: disable=invalid-unary-operand-type
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
output, new_states = self.cell.call(
inputs, states, constants=constants, **kwargs)
if not nest.is_sequence(new_states):
new_states = [new_states]
return output, new_states
else:
def step(inputs, states):
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
output, new_states = self.cell.call(inputs, states, **kwargs)
if not nest.is_sequence(new_states):
new_states = [new_states]
return output, new_states
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
if self.stateful:
updates = []
for i in range(len(states)):
updates.append(state_ops.assign(self.states[i], states[i]))
self.add_update(updates, inputs)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return generic_utils.to_list(output) + states
else:
return output
def _process_inputs(self, inputs, initial_state, constants):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if isinstance(inputs, list):
# get initial_state from full input spec
# as they could be copied to multiple GPU.
if self._num_constants is None:
initial_state = inputs[1:]
else:
initial_state = inputs[1:-self._num_constants]
constants = inputs[-self._num_constants:]
if len(initial_state) == 0:
initial_state = None
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' + str(len(initial_state)) +
' initial states.')
return inputs, initial_state, constants
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
if self.time_major:
batch_size = self.input_spec[0].shape[1]
else:
batch_size = self.input_spec[0].shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the batch size by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if self.states[0] is None:
if _is_multiple_state(self.cell.state_size):
self.states = [
K.zeros([batch_size] + tensor_shape.as_shape(dim).as_list())
for dim in self.cell.state_size
]
else:
self.states = [
K.zeros([batch_size] +
tensor_shape.as_shape(self.cell.state_size).as_list())
]
elif states is None:
if _is_multiple_state(self.cell.state_size):
for state, dim in zip(self.states, self.cell.state_size):
K.set_value(state,
np.zeros([batch_size] +
tensor_shape.as_shape(dim).as_list()))
else:
K.set_value(self.states[0], np.zeros(
[batch_size] +
tensor_shape.as_shape(self.cell.state_size).as_list()))
else:
if not isinstance(states, (list, tuple)):
states = [states]
if len(states) != len(self.states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(self.states)) + ' states, '
'but it received ' + str(len(states)) +
' state values. Input received: ' + str(states))
for index, (value, state) in enumerate(zip(states, self.states)):
if _is_multiple_state(self.cell.state_size):
dim = self.cell.state_size[index]
else:
dim = self.cell.state_size
if value.shape != tuple([batch_size] +
tensor_shape.as_shape(dim).as_list()):
raise ValueError(
'State ' + str(index) + ' is incompatible with layer ' +
self.name + ': expected shape=' + str(
(batch_size, dim)) + ', found shape=' + str(value.shape))
# TODO(fchollet): consider batch calls to `set_value`.
K.set_value(state, value)
def get_config(self):
config = {
'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'time_major': self.time_major
}
if self._num_constants is not None:
config['num_constants'] = self._num_constants
if self.zero_output_for_mask:
config['zero_output_for_mask'] = self.zero_output_for_mask
cell_config = self.cell.get_config()
config['cell'] = {
'class_name': self.cell.__class__.__name__,
'config': cell_config
}
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cell = deserialize_layer(config.pop('cell'), custom_objects=custom_objects)
num_constants = config.pop('num_constants', None)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@property
def trainable_weights(self):
if not self.trainable:
return []
if isinstance(self.cell, Layer):
return self.cell.trainable_weights
return []
@property
def non_trainable_weights(self):
if isinstance(self.cell, Layer):
if not self.trainable:
return self.cell.weights
return self.cell.non_trainable_weights
return []
@property
def losses(self):
layer_losses = super(RNN, self).losses
if isinstance(self.cell, Layer):
return self.cell.losses + layer_losses
return layer_losses
@property
def updates(self):
updates = []
if isinstance(self.cell, Layer):
updates += self.cell.updates
return updates + self._updates
@tf_export('keras.layers.SimpleRNNCell')
class SimpleRNNCell(Layer):
"""Cell class for SimpleRNN.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNNCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = self.units
self.output_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
@tf_utils.shape_type_conversion
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0]
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.dropout,
training=training)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
array_ops.ones_like(prev_output),
self.recurrent_dropout,
training=training)
dp_mask = self._dropout_mask
rec_dp_mask = self._recurrent_dropout_mask
if dp_mask is not None:
h = K.dot(inputs * dp_mask, self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output *= rec_dp_mask
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
return output, [output]
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(SimpleRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export('keras.layers.SimpleRNN')
class SimpleRNN(RNN):
"""Fully-connected RNN where the output is to be fed back to input.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
logging.warning('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
cell = SimpleRNNCell(
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout)
super(SimpleRNN, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell._dropout_mask = None
self.cell._recurrent_dropout_mask = None
return super(SimpleRNN, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(SimpleRNN, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
@tf_export('keras.layers.GRUCell')
class GRUCell(Layer):
"""Cell class for the GRU layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
reset_after=False,
**kwargs):
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.reset_after = reset_after
self.state_size = self.units
self.output_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if not self.reset_after:
bias_shape = (3 * self.units,)
else:
# separate biases for input and recurrent kernels
# Note: the shape is intentionally different from CuDNNGRU biases
# `(2 * 3 * self.units,)`, so that we can distinguish the classes
# when loading and converting saved weights.
bias_shape = (2, 3 * self.units)
self.bias = self.add_weight(shape=bias_shape,
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.dropout,
training=training,
count=3)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
array_ops.ones_like(h_tm1),
self.recurrent_dropout,
training=training,
count=3)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
if self.use_bias:
if not self.reset_after:
input_bias, recurrent_bias = self.bias, None
else:
input_bias, recurrent_bias = array_ops.unstack(self.bias)
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = K.dot(inputs_z, self.kernel[:, :self.units])
x_r = K.dot(inputs_r, self.kernel[:, self.units:self.units * 2])
x_h = K.dot(inputs_h, self.kernel[:, self.units * 2:])
if self.use_bias:
x_z = K.bias_add(x_z, input_bias[:self.units])
x_r = K.bias_add(x_r, input_bias[self.units: self.units * 2])
x_h = K.bias_add(x_h, input_bias[self.units * 2:])
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
recurrent_z = K.dot(h_tm1_z, self.recurrent_kernel[:, :self.units])
recurrent_r = K.dot(h_tm1_r,
self.recurrent_kernel[:, self.units:self.units * 2])
if self.reset_after and self.use_bias:
recurrent_z = K.bias_add(recurrent_z, recurrent_bias[:self.units])
recurrent_r = K.bias_add(recurrent_r,
recurrent_bias[self.units:self.units * 2])
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
# reset gate applied after/before matrix multiplication
if self.reset_after:
recurrent_h = K.dot(h_tm1_h, self.recurrent_kernel[:, self.units * 2:])
if self.use_bias:
recurrent_h = K.bias_add(recurrent_h, recurrent_bias[self.units * 2:])
recurrent_h = r * recurrent_h
else:
recurrent_h = K.dot(r * h_tm1_h,
self.recurrent_kernel[:, self.units * 2:])
hh = self.activation(x_h + recurrent_h)
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
# inputs projected by all gate matrices at once
matrix_x = K.dot(inputs, self.kernel)
if self.use_bias:
# biases: bias_z_i, bias_r_i, bias_h_i
matrix_x = K.bias_add(matrix_x, input_bias)
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
x_h = matrix_x[:, 2 * self.units:]
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
if self.reset_after:
# hidden state projected by all gate matrices at once
matrix_inner = K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
matrix_inner = K.bias_add(matrix_inner, recurrent_bias)
else:
# hidden state projected separately for update/reset and new
matrix_inner = K.dot(h_tm1, self.recurrent_kernel[:, :2 * self.units])
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units:2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
if self.reset_after:
recurrent_h = r * matrix_inner[:, 2 * self.units:]
else:
recurrent_h = K.dot(r * h_tm1,
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
return h, [h]
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation,
'reset_after': self.reset_after
}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
@tf_export('keras.layers.GRU')
class GRU(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
There are two variants. The default one is based on 1406.1078v3 and
has reset gate applied to hidden state before matrix multiplication. The
other one is based on original 1406.1078v1 and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. Use `'reset_after'=True` and
`recurrent_activation='sigmoid'`.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
reset_after=False,
**kwargs):
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
cell = GRUCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
reset_after=reset_after)
super(GRU, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell._dropout_mask = None
self.cell._recurrent_dropout_mask = None
return super(GRU, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation,
'reset_after':
self.reset_after
}
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
@tf_export('keras.layers.LSTMCell')
class LSTMCell(Layer):
"""Cell class for the LSTM layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.state_size = [self.units, self.units]
self.output_size = self.units
self._dropout_mask = None
self._recurrent_dropout_mask = None
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
"""Computes carry and output using split kernels."""
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))
f = self.recurrent_activation(x_f + K.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]))
c = f * c_tm1 + i * self.activation(x_c + K.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))
o = self.recurrent_activation(
x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
"""Computes carry and output using fused kernels."""
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
return c, o
def call(self, inputs, states, training=None):
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.dropout,
training=training,
count=4)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
array_ops.ones_like(states[0]),
self.recurrent_dropout,
training=training,
count=4)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
x_i = K.dot(inputs_i, self.kernel[:, :self.units])
x_f = K.dot(inputs_f, self.kernel[:, self.units:self.units * 2])
x_c = K.dot(inputs_c, self.kernel[:, self.units * 2:self.units * 3])
x_o = K.dot(inputs_o, self.kernel[:, self.units * 3:])
if self.use_bias:
x_i = K.bias_add(x_i, self.bias[:self.units])
x_f = K.bias_add(x_f, self.bias[self.units:self.units * 2])
x_c = K.bias_add(x_c, self.bias[self.units * 2:self.units * 3])
x_o = K.bias_add(x_o, self.bias[self.units * 3:])
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x = (x_i, x_f, x_c, x_o)
h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o)
c, o = self._compute_carry_and_output(x, h_tm1, c_tm1)
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
z = K.dot(inputs, self.kernel)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
z += K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units:2 * self.units]
z2 = z[:, 2 * self.units:3 * self.units]
z3 = z[:, 3 * self.units:]
z = (z0, z1, z2, z3)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
h = o * self.activation(c)
return h, [h, c]
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return list(_generate_zero_filled_state_for_cell(
self, inputs, batch_size, dtype))
@tf_export('keras.experimental.PeepholeLSTMCell')
class PeepholeLSTMCell(LSTMCell):
"""Equivalent to LSTMCell class but adds peephole connections.
Peephole connections allow the gates to utilize the previous internal state as
well as the previous hidden state (which is what LSTMCell is limited to).
This allows PeepholeLSTMCell to better learn precise timings over LSTMCell.
From [Gers et al.](http://www.jmlr.org/papers/volume3/gers02a/gers02a.pdf):
"We find that LSTM augmented by 'peephole connections' from its internal
cells to its multiplicative gates can learn the fine distinction between
sequences of spikes spaced either 50 or 49 time steps apart without the help
of any short training exemplars."
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
Example:
```python
# Create 2 PeepholeLSTMCells
peephole_lstm_cells = [PeepholeLSTMCell(size) for size in [128, 256]]
# Create a layer composed sequentially of the peephole LSTM cells.
layer = RNN(peephole_lstm_cells)
input = keras.Input((timesteps, input_dim))
output = layer(input)
```
"""
def build(self, input_shape):
super(PeepholeLSTMCell, self).build(input_shape)
# The following are the weight matrices for the peephole connections. These
# are multiplied with the previous internal state during the computation of
# carry and output.
self.input_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='input_gate_peephole_weights',
initializer=self.kernel_initializer)
self.forget_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='forget_gate_peephole_weights',
initializer=self.kernel_initializer)
self.output_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='output_gate_peephole_weights',
initializer=self.kernel_initializer)
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]) +
self.input_gate_peephole_weights * c_tm1)
f = self.recurrent_activation(x_f + K.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]) +
self.forget_gate_peephole_weights * c_tm1)
c = f * c_tm1 + i * self.activation(x_c + K.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))
o = self.recurrent_activation(
x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]) +
self.output_gate_peephole_weights * c)
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0 +
self.input_gate_peephole_weights * c_tm1)
f = self.recurrent_activation(z1 +
self.forget_gate_peephole_weights * c_tm1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3 + self.output_gate_peephole_weights * c)
return c, o
@tf_export(v1=['keras.layers.LSTM'])
class LSTM(RNN):
"""Long Short-Term Memory layer - Hochreiter 1997.
Note that this cell is not optimized for performance on GPU. Please use
`tf.keras.layers.CuDNNLSTM` for better performance on GPU.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn('%s: Note that this layer is not optimized for performance. '
'Please use tf.keras.layers.CuDNNLSTM for better '
'performance on GPU.', self)
cell = LSTMCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation)
super(LSTM, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell._dropout_mask = None
self.cell._recurrent_dropout_mask = None
return super(LSTM, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
@tf_export('keras.layers.LSTM', v1=[])
class UnifiedLSTM(LSTM):
"""Long Short-Term Memory layer - Hochreiter 1997.
`UnifiedLSTM` unifies the implementations between standard `LSTM` layer and
`CuDNNLSTM` layer. Based on available runtime hardware and constrains,
`UnifiedLSTM` will choose different implementations to maximize the
performance. For instance, if GPU is available and all the parameters meet the
requirement of CuDNN kernel, `UnifiedLSTM` will use CuDNN kernel for the
calculation.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`). If you pass `None`, no activation
is applied (ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix, used for
the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state..
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean. If True, add 1 to the bias of the forget gate at
initialization. Setting it to true will also force
`bias_initializer="zeros"`. This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation")..
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the `recurrent_kernel`
weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1. Fraction of the units to drop for the linear
transformation of the inputs.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2. Mode 1 will structure
its operations as a larger number of smaller dot products and additions,
whereas mode 2 will batch them into fewer, larger operations. These modes
will have different performance profiles on different hardware and for
different applications.
return_sequences: Boolean. Whether to return the last output. in the output
sequence, or the full sequence.
return_state: Boolean. Whether to return the last state in addition to the
output.
go_backwards: Boolean (default False). If True, process the input sequence
backwards and return the reversed sequence.
stateful: Boolean (default False). If True, the last state for each sample
at index i in a batch will be used as initial state for the sample of
index i in the following batch.
unroll: Boolean (default False). If True, the network will be unrolled, else
a symbolic loop will be used. Unrolling can speed-up a RNN, although it
tends to be more memory-intensive. Unrolling is only suitable for short
sequences.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
time_major=False,
unroll=False,
**kwargs):
# return_runtime is a flag for testing, which shows the real backend
# implementation chosen by grappler in graph mode.
self.return_runtime = kwargs.pop('return_runtime', False)
super(UnifiedLSTM, self).__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
time_major=time_major,
unroll=unroll,
**kwargs)
self.state_spec = [
InputSpec(shape=(None, dim)) for dim in (self.units, self.units)
]
self._dropout_mask = None
self.could_use_cudnn = (
activation == 'tanh' and recurrent_activation == 'sigmoid' and
recurrent_dropout == 0 and not unroll and use_bias)
def call(self, inputs, mask=None, training=None, initial_state=None):
# LSTM does not support constants. Ignore it during process.
inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
if isinstance(mask, list):
mask = mask[0]
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if mask is not None or not self.could_use_cudnn:
# CuDNN does not support masking, fall back to use the normal LSTM.
kwargs = {'training': training}
def step(inputs, states):
return self.cell.call(inputs, states, **kwargs)
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=None,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
runtime = constant_op.constant(
'unknown', dtype=dtypes.string, name='runtime')
else:
# Use the new defun approach for backend implementation swap.
# Note that different implementations need to have same function
# signature, eg, the tensor parameters need to have same shape and dtypes.
# Since the CuDNN has an extra set of bias, those bias will be passed to
# both normal and CuDNN implementations.
if self.go_backwards:
# Reverse time axis.
inputs = K.reverse(inputs, 0 if self.time_major else 1)
if 0 < self.dropout < 1:
if self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.dropout,
training=training,
count=4)
inputs *= self._dropout_mask[0]
# Each time a defun function is called, we will give a unique identifiable
# API name, so that the grappler won't get confused when it sees multiple
# LSTM layer added into same graph, and it will be able to pair up the
# different implementations across them.
experimental_api_name = 'lstm_' + str(uuid.uuid4())
standard_lstm_attributes = {
'experimental_api_implements': experimental_api_name,
'experimental_api_preferred_device': 'CPU',
}
cudnn_lstm_attributes = {
'experimental_api_implements': experimental_api_name,
'experimental_api_preferred_device': 'GPU',
}
defun_standard_lstm = function.defun_with_attributes(
standard_lstm, attributes=standard_lstm_attributes)
defun_cudnn_lstm = function.defun_with_attributes(
cudnn_lstm, attributes=cudnn_lstm_attributes)
if ops.executing_eagerly_outside_functions():
# Under eager context, the device placement is already known. Prefer the
# GPU implementation here.
if context.num_gpus() > 0:
last_output, outputs, new_h, new_c, runtime = defun_cudnn_lstm(
inputs, initial_state[0], initial_state[1], self.cell.kernel,
self.cell.recurrent_kernel, self.cell.bias, self.time_major)
else:
last_output, outputs, new_h, new_c, runtime = defun_standard_lstm(
inputs, initial_state[0], initial_state[1], self.cell.kernel,
self.cell.recurrent_kernel, self.cell.bias, self.activation,
self.recurrent_activation, self.time_major)
else:
# Call the normal LSTM impl and register the CuDNN impl function. The
# grappler will kick in during session execution to optimize the graph.
last_output, outputs, new_h, new_c, runtime = defun_standard_lstm(
inputs, initial_state[0], initial_state[1], self.cell.kernel,
self.cell.recurrent_kernel, self.cell.bias, self.activation,
self.recurrent_activation, self.time_major)
function.register(defun_cudnn_lstm, inputs, initial_state[0],
initial_state[1], self.cell.kernel,
self.cell.recurrent_kernel, self.cell.bias,
self.time_major)
states = [new_h, new_c]
if self.stateful:
updates = []
for i in range(len(states)):
updates.append(state_ops.assign(self.states[i], states[i]))
self.add_update(updates, inputs)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
return [output] + states
elif self.return_runtime:
return output, runtime
else:
return output
def _canonical_to_params(weights, biases, shape, transpose_weights=False):
"""Utility function convert variable to CuDNN compatible parameter.
Note that Keras weights for kernels are different from the CuDNN format. Eg.:
```
Keras CuDNN
[[0, 1, 2], <---> [[0, 2, 4],
[3, 4, 5]] [1, 3, 5]]
```
If the input weights need to be in a unified format, then set
`transpose_weights=True` to convert the weights.
Args:
weights: list of weights for the individual kernels and recurrent kernels.
biases: list of biases for individual gate.
shape: the shape for the converted variables that will be feed to CuDNN.
transpose_weights: boolean, whether to transpose the weights.
Returns:
The converted weights that can be feed to CuDNN ops as param.
"""
def convert(w):
return array_ops.transpose(w) if transpose_weights else w
weights = [array_ops.reshape(convert(x), shape) for x in weights]
biases = [array_ops.reshape(x, shape) for x in biases]
return array_ops.concat(weights + biases, axis=0)
def standard_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias,
activation, recurrent_activation, time_major):
"""LSTM with standard kernel implementation.
This implementation can be run on all types for hardware.
This implementation lifts out all the layer weights and make them function
parameters. It has same number of tensor input params as the CuDNN
counterpart. The RNN step logic has been simplified, eg dropout and mask is
removed since CuDNN implementation does not support that.
Note that the first half of the bias tensor should be ignored by this impl.
The CuDNN impl need an extra set of input gate bias. In order to make the both
function take same shape of parameter, that extra set of bias is also feed
here.
Args:
inputs: input tensor of LSTM layer.
init_h: initial state tensor for the cell output.
init_c: initial state tensor for the cell hidden state.
kernel: weights for cell kernel.
recurrent_kernel: weights for cell recurrent kernel.
bias: weights for cell kernel bias and recurrent bias. Only recurrent bias
is used in this case.
activation: Activation function to use for output.
recurrent_activation: Activation function to use for hidden recurrent state.
time_major: boolean, whether the inputs are in the format of
[time, batch, feature] or [batch, time, feature].
Returns:
last_output: output tensor for the last timestep, which has shape
[batch, units].
outputs: output tensor for all timesteps, which has shape
[batch, time, units].
state_0: the cell output, which has same shape as init_h.
state_1: the cell hidden state, which has same shape as init_c.
runtime: constant string tensor which indicate real runtime hardware. This
value is for testing purpose and should be used by user.
"""
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if time_major else input_shape[1]
def step(cell_inputs, cell_states):
"""Step function that will be used by Keras RNN backend."""
h_tm1 = cell_states[0] # previous memory state
c_tm1 = cell_states[1] # previous carry state
z = K.dot(cell_inputs, kernel)
z += K.dot(h_tm1, recurrent_kernel)
z = K.bias_add(z, bias)
z0, z1, z2, z3 = array_ops.split(z, 4, axis=1)
i = recurrent_activation(z0)
f = recurrent_activation(z1)
c = f * c_tm1 + i * activation(z2)
o = recurrent_activation(z3)
h = o * activation(c)
return h, [h, c]
last_output, outputs, new_states = K.rnn(
step,
inputs, [init_h, init_c],
constants=None,
unroll=False,
time_major=time_major,
input_length=timesteps)
return last_output, outputs, new_states[0], new_states[
1], constant_op.constant('cpu', dtype=dtypes.string, name='runtime')
def cudnn_lstm(inputs, input_h, input_c, kernel, recurrent_kernel, bias,
time_major):
"""LSTM with CuDNN implementation which is only available for GPU."""
if not time_major:
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
input_h = array_ops.expand_dims(input_h, axis=0)
input_c = array_ops.expand_dims(input_c, axis=0)
weights = array_ops.split(kernel, 4, axis=1)
weights += array_ops.split(recurrent_kernel, 4, axis=1)
# CuDNN has an extra set of bias for inputs, we disable them (setting to 0),
# so that mathematically it is same as the canonical LSTM implementation.
full_bias = array_ops.concat((array_ops.zeros_like(bias), bias), 0)
params = _canonical_to_params(
weights=weights,
biases=array_ops.split(full_bias, 8),
shape=constant_op.constant([-1]),
transpose_weights=True)
outputs, h, c, _ = gen_cudnn_rnn_ops.cudnn_rnn(
inputs, input_h=input_h, input_c=input_c, params=params, is_training=True)
last_output = outputs[-1]
if not time_major:
outputs = array_ops.transpose(outputs, perm=[1, 0, 2])
h = h[0]
c = c[0]
return last_output, outputs, h, c, constant_op.constant(
'cudnn', dtype=dtypes.string, name='runtime')
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return K.dropout(ones, rate)
if count > 1:
return [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(count)
]
return K.in_train_phase(dropped_inputs, ones, training=training)
def _standardize_args(
inputs, initial_state, constants, num_constants, num_inputs=1):
"""Standardizes `__call__` to a single list of tensor inputs.
When running a model loaded from a file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__()` as part
of `inputs` instead of by the dedicated keyword arguments. This method
makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None).
Arguments:
inputs: Tensor or list/tuple of tensors. which may include constants
and initial states. In that case `num_constant` must be specified.
initial_state: Tensor or list of tensors or None, initial states.
constants: Tensor or list of tensors or None, constant tensors.
num_constants: Expected number of constants (if constants are passed as
part of the `inputs` list.
num_inputs: Expected number of real input tensors (exclude initial_states
and constants).
Returns:
inputs: Single tensor or tuple of tensors.
initial_state: List of tensors or None.
constants: List of tensors or None.
"""
if isinstance(inputs, list):
# There are several situations here:
# In the graph mode, __call__ will be only called once. The initial_state
# and constants could be in inputs (from file loading).
# In the eager mode, __call__ will be called twice, once during
# rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be
# model.fit/train_on_batch/predict with real np data. In the second case,
# the inputs will contain initial_state and constants, and more importantly,
# the real inputs will be in a flat list, instead of nested tuple.
#
# For either case, we will use num_inputs to split the input list, and
# restructure the real input into tuple.
assert initial_state is None and constants is None
if num_constants is not None:
constants = inputs[-num_constants:]
inputs = inputs[:-num_constants]
if num_inputs is None:
num_inputs = 1
if len(inputs) > num_inputs:
initial_state = inputs[num_inputs:]
inputs = inputs[:num_inputs]
if len(inputs) > 1:
inputs = tuple(inputs)
else:
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def _is_multiple_state(state_size):
"""Check whether the state_size contains multiple states."""
return (hasattr(state_size, '__len__') and
not isinstance(state_size, tensor_shape.TensorShape))
def _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype):
if inputs is not None:
batch_size = array_ops.shape(inputs)[0]
dtype = inputs.dtype
return _generate_zero_filled_state(batch_size, cell.state_size, dtype)
def _generate_zero_filled_state(batch_size_tensor, state_size, dtype):
"""Generate a zero filled tensor with shape [batch_size, state_size]."""
if batch_size_tensor is None or dtype is None:
raise ValueError(
'batch_size and dtype cannot be None while constructing initial state: '
'batch_size={}, dtype={}'.format(batch_size_tensor, dtype))
def create_zeros(unnested_state_size):
flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()
init_state_size = [batch_size_tensor] + flat_dims
return array_ops.zeros(init_state_size, dtype=dtype)
if nest.is_sequence(state_size):
return nest.map_structure(create_zeros, state_size)
else:
return create_zeros(state_size)
| 39.337641 | 118 | 0.662006 |
30daa693b3201b2fc95673daa82c2cad624596b4 | 937 | py | Python | var/spack/repos/builtin/packages/r-cpp11/package.py | varioustoxins/spack | cab0e4cb240f34891a6d753f3393e512f9a99e9a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/r-cpp11/package.py | varioustoxins/spack | cab0e4cb240f34891a6d753f3393e512f9a99e9a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6 | 2022-01-08T08:41:11.000Z | 2022-03-14T19:28:07.000Z | var/spack/repos/builtin/packages/r-cpp11/package.py | foeroyingur/spack | 5300cbbb2e569190015c72d0970d25425ea38647 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RCpp11(RPackage):
"""cpp11: A C++11 Interface for R's C Interface
Provides a header only, C++11 interface to R's C interface. Compared to
other approaches 'cpp11' strives to be safe against long jumps from the C
API as well as C++ exceptions, conform to normal R function semantics and
supports interaction with 'ALTREP' vectors."""
homepage = "https://github.com/r-lib/cpp11"
cran = "cpp11"
version('0.4.2', sha256='403ce0bf82358d237176053b0fb1e958cb6bfa4d0fb3555bf5801db6a6939b99')
version('0.4.0', sha256='1768fd07dc30dfbbf8f3fb1a1183947cb7e1dfd909165c4d612a63c163a41e87')
version('0.2.5', sha256='6fef9306c0c3043252c987e77c99ef679b2ea46dffafae318dbeb38ad21a2e20')
| 40.73913 | 95 | 0.75667 |
7dcea4aeafabb9c481dccdc1aaf9bd153572bb7d | 8,040 | py | Python | tools/wptrunner/wptrunner/environment.py | nordzilla/wpt | 38ecde806a5f1710d9e5beba700cef7352f7570e | [
"BSD-3-Clause"
] | null | null | null | tools/wptrunner/wptrunner/environment.py | nordzilla/wpt | 38ecde806a5f1710d9e5beba700cef7352f7570e | [
"BSD-3-Clause"
] | null | null | null | tools/wptrunner/wptrunner/environment.py | nordzilla/wpt | 38ecde806a5f1710d9e5beba700cef7352f7570e | [
"BSD-3-Clause"
] | null | null | null | import json
import os
import multiprocessing
import signal
import socket
import sys
import time
from six import iteritems
from mozlog import get_default_logger, handlers, proxy
from .wptlogging import LogLevelRewriter
here = os.path.split(__file__)[0]
repo_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir, os.pardir))
sys.path.insert(0, repo_root)
from tools import localpaths # noqa: F401
from wptserve.handlers import StringHandler
serve = None
def do_delayed_imports(logger, test_paths):
global serve
serve_root = serve_path(test_paths)
sys.path.insert(0, serve_root)
failed = []
try:
from tools.serve import serve
except ImportError:
failed.append("serve")
if failed:
logger.critical(
"Failed to import %s. Ensure that tests path %s contains web-platform-tests" %
(", ".join(failed), serve_root))
sys.exit(1)
def serve_path(test_paths):
return test_paths["/"]["tests_path"]
class TestEnvironmentError(Exception):
pass
class TestEnvironment(object):
"""Context manager that owns the test environment i.e. the http and
websockets servers"""
def __init__(self, test_paths, testharness_timeout_multipler, pause_after_test, debug_info, options, ssl_config, env_extras):
self.test_paths = test_paths
self.server = None
self.config_ctx = None
self.config = None
self.testharness_timeout_multipler = testharness_timeout_multipler
self.pause_after_test = pause_after_test
self.test_server_port = options.pop("test_server_port", True)
self.debug_info = debug_info
self.options = options if options is not None else {}
self.cache_manager = multiprocessing.Manager()
self.stash = serve.stash.StashServer()
self.env_extras = env_extras
self.env_extras_cms = None
self.ssl_config = ssl_config
def __enter__(self):
self.config_ctx = self.build_config()
self.config = self.config_ctx.__enter__()
self.stash.__enter__()
self.cache_manager.__enter__()
self.setup_server_logging()
assert self.env_extras_cms is None, (
"A TestEnvironment object cannot be nested")
self.env_extras_cms = []
for env in self.env_extras:
cm = env(self.options, self.config)
cm.__enter__()
self.env_extras_cms.append(cm)
self.servers = serve.start(self.config,
self.get_routes())
if self.options.get("supports_debugger") and self.debug_info and self.debug_info.interactive:
self.ignore_interrupts()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.process_interrupts()
for scheme, servers in iteritems(self.servers):
for port, server in servers:
server.kill()
for cm in self.env_extras_cms:
cm.__exit__(exc_type, exc_val, exc_tb)
self.env_extras_cms = None
self.cache_manager.__exit__(exc_type, exc_val, exc_tb)
self.stash.__exit__()
self.config_ctx.__exit__(exc_type, exc_val, exc_tb)
def ignore_interrupts(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def process_interrupts(self):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def build_config(self):
override_path = os.path.join(serve_path(self.test_paths), "config.json")
config = serve.ConfigBuilder()
config.ports = {
"http": [8000, 8001],
"https": [8443],
"ws": [8888],
"wss": [8889],
}
if os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
config.update(override_obj)
config.check_subdomains = False
ssl_config = self.ssl_config.copy()
ssl_config["encrypt_after_connect"] = self.options.get("encrypt_after_connect", False)
config.ssl = ssl_config
if "browser_host" in self.options:
config.browser_host = self.options["browser_host"]
if "bind_address" in self.options:
config.bind_address = self.options["bind_address"]
config.server_host = self.options.get("server_host", None)
config.doc_root = serve_path(self.test_paths)
return config
def setup_server_logging(self):
server_logger = get_default_logger(component="wptserve")
assert server_logger is not None
log_filter = handlers.LogLevelFilter(lambda x:x, "info")
# Downgrade errors to warnings for the server
log_filter = LogLevelRewriter(log_filter, ["error"], "warning")
server_logger.component_filter = log_filter
server_logger = proxy.QueuedProxyLogger(server_logger)
try:
#Set as the default logger for wptserve
serve.set_logger(server_logger)
serve.logger = server_logger
except Exception:
# This happens if logging has already been set up for wptserve
pass
def get_routes(self):
route_builder = serve.RoutesBuilder()
for path, format_args, content_type, route in [
("testharness_runner.html", {}, "text/html", "/testharness_runner.html"),
(self.options.get("testharnessreport", "testharnessreport.js"),
{"output": self.pause_after_test,
"timeout_multiplier": self.testharness_timeout_multipler,
"explicit_timeout": "true" if self.debug_info is not None else "false"},
"text/javascript;charset=utf8",
"/resources/testharnessreport.js")]:
path = os.path.normpath(os.path.join(here, path))
# Note that .headers. files don't apply to static routes, so we need to
# readd any static headers here.
headers = {"Cache-Control": "max-age=3600"}
route_builder.add_static(path, format_args, content_type, route,
headers=headers)
data = b""
with open(os.path.join(repo_root, "resources", "testdriver.js"), "rb") as fp:
data += fp.read()
with open(os.path.join(here, "testdriver-extra.js"), "rb") as fp:
data += fp.read()
route_builder.add_handler(b"GET", b"/resources/testdriver.js",
StringHandler(data, "text/javascript"))
for url_base, paths in iteritems(self.test_paths):
if url_base == "/":
continue
route_builder.add_mount_point(url_base, paths["tests_path"])
if "/" not in self.test_paths:
del route_builder.mountpoint_routes["/"]
return route_builder.get_routes()
def ensure_started(self):
# Pause for a while to ensure that the server has a chance to start
total_sleep_secs = 30
each_sleep_secs = 0.5
end_time = time.time() + total_sleep_secs
while time.time() < end_time:
failed = self.test_servers()
if not failed:
return
time.sleep(each_sleep_secs)
raise EnvironmentError("Servers failed to start: %s" %
", ".join("%s:%s" % item for item in failed))
def test_servers(self):
failed = []
host = self.config["server_host"]
for scheme, servers in iteritems(self.servers):
for port, server in servers:
if self.test_server_port:
s = socket.socket()
s.settimeout(0.1)
try:
s.connect((host, port))
except socket.error:
failed.append((host, port))
finally:
s.close()
if not server.is_alive():
failed.append((scheme, port))
return failed
| 33.640167 | 129 | 0.611194 |
ef688174f13fe69b9f91fd37ffd18e508e73d731 | 9,956 | py | Python | monai/utils/misc.py | Irme/MONAI | dc4bf661831b14f4231cb325cc1b15d38c1e406c | [
"Apache-2.0"
] | 1 | 2020-12-03T21:28:09.000Z | 2020-12-03T21:28:09.000Z | monai/utils/misc.py | Irme/MONAI | dc4bf661831b14f4231cb325cc1b15d38c1e406c | [
"Apache-2.0"
] | null | null | null | monai/utils/misc.py | Irme/MONAI | dc4bf661831b14f4231cb325cc1b15d38c1e406c | [
"Apache-2.0"
] | 1 | 2020-06-11T13:03:02.000Z | 2020-06-11T13:03:02.000Z | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import itertools
import random
import time
from ast import literal_eval
from distutils.util import strtobool
from typing import Any, Callable, Optional, Sequence, Tuple, Union
import numpy as np
import torch
_seed = None
_flag_deterministic = torch.backends.cudnn.deterministic
_flag_cudnn_benchmark = torch.backends.cudnn.benchmark
MAX_SEED = np.iinfo(np.uint32).max + 1 # 2**32, the actual seed should be in [0, MAX_SEED - 1] for uint32
def zip_with(op, *vals, mapfunc=map):
"""
Map `op`, using `mapfunc`, to each tuple derived from zipping the iterables in `vals`.
"""
return mapfunc(op, zip(*vals))
def star_zip_with(op, *vals):
"""
Use starmap as the mapping function in zipWith.
"""
return zip_with(op, *vals, mapfunc=itertools.starmap)
def first(iterable, default=None):
"""
Returns the first item in the given iterable or `default` if empty, meaningful mostly with 'for' expressions.
"""
for i in iterable:
return i
return default
def issequenceiterable(obj: Any) -> bool:
"""
Determine if the object is an iterable sequence and is not a string.
"""
if torch.is_tensor(obj):
return int(obj.dim()) > 0 # a 0-d tensor is not iterable
return isinstance(obj, collections.abc.Iterable) and not isinstance(obj, str)
def ensure_tuple(vals: Any) -> Tuple[Any, ...]:
"""
Returns a tuple of `vals`.
"""
if not issequenceiterable(vals):
vals = (vals,)
return tuple(vals)
def ensure_tuple_size(tup: Any, dim: int, pad_val: Any = 0) -> Tuple[Any, ...]:
"""
Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary.
"""
tup = ensure_tuple(tup) + (pad_val,) * dim
return tuple(tup[:dim])
def ensure_tuple_rep(tup: Any, dim: int) -> Tuple[Any, ...]:
"""
Returns a copy of `tup` with `dim` values by either shortened or duplicated input.
Raises:
ValueError: When ``tup`` is a sequence and ``tup`` length is not ``dim``.
Examples::
>>> ensure_tuple_rep(1, 3)
(1, 1, 1)
>>> ensure_tuple_rep(None, 3)
(None, None, None)
>>> ensure_tuple_rep('test', 3)
('test', 'test', 'test')
>>> ensure_tuple_rep([1, 2, 3], 3)
(1, 2, 3)
>>> ensure_tuple_rep(range(3), 3)
(0, 1, 2)
>>> ensure_tuple_rep([1, 2], 3)
ValueError: Sequence must have length 3, got length 2.
"""
if not issequenceiterable(tup):
return (tup,) * dim
elif len(tup) == dim:
return tuple(tup)
raise ValueError(f"Sequence must have length {dim}, got {len(tup)}.")
def fall_back_tuple(user_provided: Any, default: Sequence, func: Callable = lambda x: x and x > 0) -> Tuple[Any, ...]:
"""
Refine `user_provided` according to the `default`, and returns as a validated tuple.
The validation is done for each element in `user_provided` using `func`.
If `func(user_provided[idx])` returns False, the corresponding `default[idx]` will be used
as the fallback.
Typically used when `user_provided` is a tuple of window size provided by the user,
`default` is defined by data, this function returns an updated `user_provided` with its non-positive
components replaced by the corresponding components from `default`.
Args:
user_provided: item to be validated.
default: a sequence used to provided the fallbacks.
func: a Callable to validate every components of `user_provided`.
Examples::
>>> fall_back_tuple((1, 2), (32, 32))
(1, 2)
>>> fall_back_tuple(None, (32, 32))
(32, 32)
>>> fall_back_tuple((-1, 10), (32, 32))
(32, 10)
>>> fall_back_tuple((-1, None), (32, 32))
(32, 32)
>>> fall_back_tuple((1, None), (32, 32))
(1, 32)
>>> fall_back_tuple(0, (32, 32))
(32, 32)
>>> fall_back_tuple(range(3), (32, 64, 48))
(32, 1, 2)
>>> fall_back_tuple([0], (32, 32))
ValueError: Sequence must have length 2, got length 1.
"""
ndim = len(default)
user = ensure_tuple_rep(user_provided, ndim)
return tuple( # use the default values if user provided is not valid
user_c if func(user_c) else default_c for default_c, user_c in zip(default, user)
)
def is_scalar_tensor(val: Any) -> bool:
if torch.is_tensor(val) and val.ndim == 0:
return True
return False
def is_scalar(val: Any) -> bool:
if torch.is_tensor(val) and val.ndim == 0:
return True
return bool(np.isscalar(val))
def progress_bar(index: int, count: int, desc: Optional[str] = None, bar_len: int = 30, newline: bool = False) -> None:
"""print a progress bar to track some time consuming task.
Args:
index: current status in progress.
count: total steps of the progress.
desc: description of the progress bar, if not None, show before the progress bar.
bar_len: the total length of the bar on screen, default is 30 char.
newline: whether to print in a new line for every index.
"""
end = "\r" if newline is False else "\r\n"
filled_len = int(bar_len * index // count)
bar = f"{desc} " if desc is not None else ""
bar += "[" + "=" * filled_len + " " * (bar_len - filled_len) + "]"
print(f"{index}/{count} {bar}", end=end)
if index == count:
print("")
def get_seed() -> Optional[int]:
return _seed
def set_determinism(
seed: Optional[int] = np.iinfo(np.uint32).max,
additional_settings: Optional[Union[Sequence[Callable[[int], Any]], Callable[[int], Any]]] = None,
) -> None:
"""
Set random seed for modules to enable or disable deterministic training.
Args:
seed: the random seed to use, default is np.iinfo(np.int32).max.
It is recommended to set a large seed, i.e. a number that has a good balance
of 0 and 1 bits. Avoid having many 0 bits in the seed.
if set to None, will disable deterministic training.
additional_settings: additional settings
that need to set random seed.
"""
if seed is None:
# cast to 32 bit seed for CUDA
seed_ = torch.default_generator.seed() % (np.iinfo(np.int32).max + 1)
if not torch.cuda._is_in_bad_fork():
torch.cuda.manual_seed_all(seed_)
else:
seed = int(seed) % MAX_SEED
torch.manual_seed(seed)
global _seed
_seed = seed
random.seed(seed)
np.random.seed(seed)
if additional_settings is not None:
additional_settings = ensure_tuple(additional_settings)
for func in additional_settings:
func(seed)
if seed is not None:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else: # restore the original flags
torch.backends.cudnn.deterministic = _flag_deterministic
torch.backends.cudnn.benchmark = _flag_cudnn_benchmark
def list_to_dict(items):
"""
To convert a list of "key=value" pairs into a dictionary.
For examples: items: `["a=1", "b=2", "c=3"]`, return: {"a": "1", "b": "2", "c": "3"}.
If no "=" in the pair, use None as the value, for example: ["a"], return: {"a": None}.
Note that it will remove the blanks around keys and values.
"""
def _parse_var(s):
items = s.split("=", maxsplit=1)
key = items[0].strip(" \n\r\t'")
value = None
if len(items) > 1:
value = items[1].strip(" \n\r\t'")
return key, value
d = dict()
if items:
for item in items:
key, value = _parse_var(item)
try:
if key in d:
raise KeyError(f"encounter duplicated key {key}.")
d[key] = literal_eval(value)
except ValueError:
try:
d[key] = bool(strtobool(str(value)))
except ValueError:
d[key] = value
return d
_torch_to_np_dtype = {
torch.bool: np.bool,
torch.uint8: np.uint8,
torch.int8: np.int8,
torch.int16: np.int16,
torch.int32: np.int32,
torch.int64: np.int64,
torch.float16: np.float16,
torch.float32: np.float32,
torch.float64: np.float64,
torch.complex64: np.complex64,
torch.complex128: np.complex128,
}
_np_to_torch_dtype = {value: key for key, value in _torch_to_np_dtype.items()}
def dtype_torch_to_numpy(dtype):
"""Convert a torch dtype to its numpy equivalent."""
return _torch_to_np_dtype[dtype]
def dtype_numpy_to_torch(dtype):
"""Convert a numpy dtype to its torch equivalent."""
return _np_to_torch_dtype[dtype]
class PerfContext:
"""
Context manager for tracking how much time is spent within context blocks. This uses `time.perf_counter` to
accumulate the total amount of time in seconds in the attribute `total_time` over however many context blocks
the object is used in.
"""
def __init__(self):
self.total_time = 0
self.start_time = None
def __enter__(self):
self.start_time = time.perf_counter()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.total_time += time.perf_counter() - self.start_time
self.start_time = None
| 32.116129 | 119 | 0.631579 |
277552e7f0ed111e6836a8d5bcdd67db7aecfaad | 6,233 | py | Python | model/general_recommender/MF.py | wubinzzu/TNNLS | 1811091edaf4926f202a5a0cdab31814c0310a9e | [
"MIT"
] | null | null | null | model/general_recommender/MF.py | wubinzzu/TNNLS | 1811091edaf4926f202a5a0cdab31814c0310a9e | [
"MIT"
] | null | null | null | model/general_recommender/MF.py | wubinzzu/TNNLS | 1811091edaf4926f202a5a0cdab31814c0310a9e | [
"MIT"
] | null | null | null | """
Reference: Steffen Rendle et al., "BPR: Bayesian Personalized Ranking from Implicit Feedback." in UAI 2009.
GMF: Xiangnan He et al., "Neural Collaborative Filtering." in WWW 2017.
@author: wubin
"""
import tensorflow as tf
import numpy as np
from time import time
from util import learner, tool
from model.AbstractRecommender import AbstractRecommender
from util import timer
from util import l2_loss
from data import PairwiseSampler, PointwiseSampler
class MF(AbstractRecommender):
def __init__(self, sess, dataset, conf):
super(MF, self).__init__(dataset, conf)
self.lr = conf["lr"]
self.embedding_size = conf["embedding_size"]
self.learner = conf["learner"]
self.loss_function = conf["loss_function"]
self.is_pairwise = conf["is_pairwise"]
self.num_epochs = conf["epochs"]
self.reg_mf = conf["reg_mf"]
self.batch_size = conf["batch_size"]
self.verbose = conf["verbose"]
self.num_negatives = conf["num_negatives"]
self.init_method = conf["init_method"]
self.stddev = conf["stddev"]
self.dataset = dataset
self.num_users = dataset.num_users
self.num_items = dataset.num_items
self.sess = sess
def _create_placeholders(self):
with tf.name_scope("input_data"):
self.user_input = tf.placeholder(tf.int32, shape=[None], name="user_input")
self.item_input = tf.placeholder(tf.int32, shape=[None], name="item_input")
if self.is_pairwise is True:
self.item_input_neg = tf.placeholder(tf.int32, shape=[None], name="item_input_neg")
else:
self.labels = tf.placeholder(tf.float32, shape=[None], name="labels")
def _create_variables(self):
with tf.name_scope("embedding"):
initializer = tool.get_initializer(self.init_method, self.stddev)
self.user_embeddings = tf.Variable(initializer([self.num_users, self.embedding_size]),
name='user_embeddings', dtype=tf.float32) # (users, embedding_size)
self.item_embeddings = tf.Variable(initializer([self.num_items, self.embedding_size]),
name='item_embeddings', dtype=tf.float32) # (items, embedding_size)
def _create_inference(self, item_input):
with tf.name_scope("inference"):
# embedding look up
user_embedding = tf.nn.embedding_lookup(self.user_embeddings, self.user_input)
item_embedding = tf.nn.embedding_lookup(self.item_embeddings, item_input)
predict = tf.reduce_sum(tf.multiply(user_embedding, item_embedding), 1)
return user_embedding, item_embedding, predict
def _create_loss(self):
with tf.name_scope("loss"):
# loss for L(Theta)
p1, q1, self.output = self._create_inference(self.item_input)
if self.is_pairwise is True:
_, q2, self.output_neg = self._create_inference(self.item_input_neg)
result = self.output - self.output_neg
self.loss = learner.pairwise_loss(self.loss_function, result) + self.reg_mf * l2_loss(p1, q2, q1)
else:
self.loss = learner.pointwise_loss(self.loss_function, self.labels, self.output) + \
self.reg_mf * l2_loss(p1, q1)
def _create_optimizer(self):
with tf.name_scope("learner"):
self.optimizer = learner.optimizer(self.learner, self.loss, self.lr)
def build_graph(self):
self._create_placeholders()
self._create_variables()
self._create_loss()
self._create_optimizer()
# ---------- training process -------
def train_model(self):
self.logger.info(self.evaluator.metrics_info())
if self.is_pairwise is True:
data_iter = PairwiseSampler(self.dataset, neg_num=1, batch_size=self.batch_size, shuffle=True)
else:
data_iter = PointwiseSampler(self.dataset, neg_num=self.num_negatives, batch_size=self.batch_size, shuffle=True)
for epoch in range(1, self.num_epochs+1):
total_loss = 0.0
training_start_time = time()
if self.is_pairwise is True:
for bat_users, bat_items_pos, bat_items_neg in data_iter:
feed_dict = {self.user_input: bat_users,
self.item_input: bat_items_pos,
self.item_input_neg: bat_items_neg}
loss, _ = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict)
total_loss += loss
else:
for bat_users, bat_items, bat_labels in data_iter:
feed_dict = {self.user_input: bat_users,
self.item_input: bat_items,
self.labels: bat_labels}
loss, _ = self.sess.run((self.loss, self.optimizer), feed_dict=feed_dict)
total_loss += loss
self.logger.info("[iter %d : loss : %f, time: %f]" % (epoch, total_loss/len(data_iter),
time()-training_start_time))
if epoch % self.verbose == 0:
self.logger.info("epoch %d:\t%s" % (epoch, self.evaluate()))
@timer
def evaluate(self):
self._cur_user_embeddings, self._cur_item_embeddings = self.sess.run([self.user_embeddings, self.item_embeddings])
return self.evaluator.evaluate(self)
def predict(self, user_ids, candidate_items=None):
if candidate_items is None:
user_embed = self._cur_user_embeddings[user_ids]
ratings = np.matmul(user_embed, self._cur_item_embeddings.T)
else:
ratings = []
for user_id, items_by_user_id in zip(user_ids, candidate_items):
user_embed = self._cur_user_embeddings[user_id]
items_embed = self._cur_item_embeddings[items_by_user_id]
ratings.append(np.squeeze(np.matmul(user_embed, items_embed.T)))
return ratings
| 47.219697 | 124 | 0.611744 |
fe9f5dcc9042b0bf3815baef85f2b61379b6efa1 | 4,147 | py | Python | analyses/fire_season_dataset_diffs.py | akuhnregnier/wildfire-analysis | a04deada145cec864051d2fb15aec1a53a0246b9 | [
"MIT"
] | null | null | null | analyses/fire_season_dataset_diffs.py | akuhnregnier/wildfire-analysis | a04deada145cec864051d2fb15aec1a53a0246b9 | [
"MIT"
] | null | null | null | analyses/fire_season_dataset_diffs.py | akuhnregnier/wildfire-analysis | a04deada145cec864051d2fb15aec1a53a0246b9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Investigate how fire season estimates differ between datasets.
"""
import logging
import math
import os
import warnings
import iris
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from joblib import parallel_backend
from wildfires.analysis import *
from wildfires.data import *
from wildfires.logging_config import enable_logging
from wildfires.qstat import get_ncpus
from wildfires.utils import *
logger = logging.getLogger(__name__)
enable_logging()
warnings.filterwarnings("ignore", ".*Collapsing a non-contiguous coordinate.*")
warnings.filterwarnings("ignore", ".*DEFAULT_SPHERICAL_EARTH_RADIUS*")
warnings.filterwarnings("ignore", ".*guessing contiguous bounds*")
memory = get_memory("analysis_fire_season_dataset_diffs", verbose=100)
FigureSaver.debug = True
FigureSaver.directory = os.path.expanduser(
os.path.join("~", "tmp", "fire_season_dataset_diffs")
)
os.makedirs(FigureSaver.directory, exist_ok=True)
normal_coast_linewidth = 0.5
mpl.rc("figure", figsize=(14, 6))
mpl.rc("font", size=9.0)
np.random.seed(1)
n_jobs = 5
with parallel_backend(
"loky", n_jobs=n_jobs, inner_max_num_threads=math.floor(get_ncpus() / n_jobs)
):
outputs = thres_fire_season_stats(0.1)
dataset_names = [output[0] for output in outputs]
lengths = [output[3].reshape(1, *output[3].shape) for output in outputs]
# Stack the lengths into one array.
lengths = np.ma.vstack(lengths)
mean_length = np.ma.mean(lengths, axis=0)
# Mean BAs
ba_variable_names = (
"CCI MERIS BA",
"CCI MODIS BA",
"GFED4 BA",
"GFED4s BA",
"MCD64CMQ BA",
)
mean_ba_cubes = (
prepare_selection(
Datasets([globals()[name]() for name in dataset_names]), which="mean"
)
.select_variables(ba_variable_names)
.cubes
)
mean_bas = []
for mean_ba_cube in mean_ba_cubes:
mean_bas.append(
mean_ba_cube.collapsed(
("latitude", "longitude"),
iris.analysis.MEAN,
weights=iris.analysis.cartography.area_weights(mean_ba_cube),
).data
)
# Diffs from the mean.
deviations_cube = dummy_lat_lon_cube(lengths - mean_length.reshape(1, 720, 1440))
deviations = deviations_cube.collapsed(
["latitude", "longitude"],
iris.analysis.MEAN,
weights=iris.analysis.cartography.area_weights(deviations_cube),
).data
deviation_df = pd.DataFrame(
[
(name, deviation, mean_ba)
for name, deviation, mean_ba in zip(dataset_names, deviations, mean_bas)
],
columns=("Name", "Deviation from Mean", "Mean BA"),
).sort_values("Deviation from Mean", ascending=False)
print(
deviation_df.to_string(index=False, float_format="{:0.3f}".format, line_width=200)
)
deviation_df.to_csv(
os.path.join(FigureSaver.directory, f"season_length_mean_deviations.csv"),
index=False,
)
with FigureSaver("mean_length"):
cube_plotting(
mean_length,
coastline_kwargs=dict(linewidth=0.5),
cmap="brewer_RdYlBu_11_r",
label="length (months)",
title="Mean Length",
boundaries=np.arange(0, 12),
)
std_length = np.ma.std(lengths, axis=0)
with FigureSaver("std_length"):
cube_plotting(
std_length,
coastline_kwargs=dict(linewidth=0.5),
cmap="inferno",
label="length (months)",
title="Std Length (Between Datasets)",
)
mean_ba = prepare_selection(Datasets(GFEDv4s()), which="mean").cube
with FigureSaver("std_length_corr_mean_ba_gfedv4s"):
combined_mask = mean_ba.data.mask | std_length.mask
mean_ba.data.mask = combined_mask
std_length.mask = combined_mask
plt.figure()
plt.hexbin(np.log(get_unmasked(mean_ba.data)), get_unmasked(std_length), bins="log")
plt.xlabel("log(Mean BA GFEDv4s)")
plt.ylabel("STD Fire Season Length")
# plt.xscale('log')
plt.show()
with FigureSaver("gfedv4s_length_deviation"):
cube_plotting(
lengths[dataset_names.index("GFEDv4s")] - mean_length,
coastline_kwargs=dict(linewidth=0.5),
cmap="inferno",
label="length (months)",
title="GFED4s Fire Season Length - Mean",
)
| 26.754839 | 88 | 0.701471 |
031b40d3ba85bd6d4106bedb422c96a2015c7f4f | 34,196 | py | Python | tests/test_study.py | liori/optuna | 9f5c5cf5c36df1628567f0b4a9873892d148ad40 | [
"MIT"
] | null | null | null | tests/test_study.py | liori/optuna | 9f5c5cf5c36df1628567f0b4a9873892d148ad40 | [
"MIT"
] | null | null | null | tests/test_study.py | liori/optuna | 9f5c5cf5c36df1628567f0b4a9873892d148ad40 | [
"MIT"
] | null | null | null | import copy
import itertools
import multiprocessing
import pickle
import threading
import time
from unittest.mock import Mock # NOQA
from unittest.mock import patch
import uuid
import _pytest.capture
import joblib
import pandas as pd
import pytest
import optuna
from optuna.testing.storage import StorageSupplier
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from typing import Any # NOQA
from typing import Callable # NOQA
from typing import Dict # NOQA
from typing import Optional # NOQA
from typing import Tuple # NOQA
from _pytest.recwarn import WarningsRecorder # NOQA
CallbackFuncType = Callable[[optuna.study.Study, optuna.trial.FrozenTrial], None]
# TODO(ytsmiling) Add tests for multi-worker settings.
STORAGE_MODES = [
"inmemory",
"sqlite",
"redis",
]
def func(trial, x_max=1.0):
# type: (optuna.trial.Trial, float) -> float
x = trial.suggest_uniform("x", -x_max, x_max)
y = trial.suggest_loguniform("y", 20, 30)
z = trial.suggest_categorical("z", (-1.0, 1.0))
assert isinstance(z, float)
return (x - 2) ** 2 + (y - 25) ** 2 + z
class Func(object):
def __init__(self, sleep_sec=None):
# type: (Optional[float]) -> None
self.n_calls = 0
self.sleep_sec = sleep_sec
self.lock = threading.Lock()
self.x_max = 10.0
def __call__(self, trial):
# type: (optuna.trial.Trial) -> float
with self.lock:
self.n_calls += 1
x_max = self.x_max
self.x_max *= 0.9
# Sleep for testing parallelism
if self.sleep_sec is not None:
time.sleep(self.sleep_sec)
value = func(trial, x_max)
check_params(trial.params)
return value
def check_params(params):
# type: (Dict[str, Any]) -> None
assert sorted(params.keys()) == ["x", "y", "z"]
def check_value(value):
# type: (Optional[float]) -> None
assert isinstance(value, float)
assert -1.0 <= value <= 12.0 ** 2 + 5.0 ** 2 + 1.0
def check_frozen_trial(frozen_trial):
# type: (optuna.trial.FrozenTrial) -> None
if frozen_trial.state == optuna.trial.TrialState.COMPLETE:
check_params(frozen_trial.params)
check_value(frozen_trial.value)
def check_study(study):
# type: (optuna.Study) -> None
for trial in study.trials:
check_frozen_trial(trial)
complete_trials = [t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE]
if len(complete_trials) == 0:
with pytest.raises(ValueError):
study.best_params
with pytest.raises(ValueError):
study.best_value
with pytest.raises(ValueError):
study.best_trial
else:
check_params(study.best_params)
check_value(study.best_value)
check_frozen_trial(study.best_trial)
def test_optimize_trivial_in_memory_new():
# type: () -> None
study = optuna.create_study()
study.optimize(func, n_trials=10)
check_study(study)
def test_optimize_trivial_in_memory_resume():
# type: () -> None
study = optuna.create_study()
study.optimize(func, n_trials=10)
study.optimize(func, n_trials=10)
check_study(study)
def test_optimize_trivial_rdb_resume_study():
# type: () -> None
study = optuna.create_study("sqlite:///:memory:")
study.optimize(func, n_trials=10)
check_study(study)
def test_optimize_with_direction():
# type: () -> None
study = optuna.create_study(direction="minimize")
study.optimize(func, n_trials=10)
assert study.direction == optuna.study.StudyDirection.MINIMIZE
check_study(study)
study = optuna.create_study(direction="maximize")
study.optimize(func, n_trials=10)
assert study.direction == optuna.study.StudyDirection.MAXIMIZE
check_study(study)
with pytest.raises(ValueError):
optuna.create_study(direction="test")
@pytest.mark.parametrize(
"n_trials, n_jobs, storage_mode",
itertools.product(
(0, 1, 20), (1, 2, -1), STORAGE_MODES, # n_trials # n_jobs # storage_mode
),
)
def test_optimize_parallel(n_trials, n_jobs, storage_mode):
# type: (int, int, str)-> None
f = Func()
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(f, n_trials=n_trials, n_jobs=n_jobs)
assert f.n_calls == len(study.trials) == n_trials
check_study(study)
@pytest.mark.parametrize(
"n_trials, n_jobs, storage_mode",
itertools.product(
(0, 1, 20, None), (1, 2, -1), STORAGE_MODES, # n_trials # n_jobs # storage_mode
),
)
def test_optimize_parallel_timeout(n_trials, n_jobs, storage_mode):
# type: (int, int, str) -> None
sleep_sec = 0.1
timeout_sec = 1.0
f = Func(sleep_sec=sleep_sec)
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(f, n_trials=n_trials, n_jobs=n_jobs, timeout=timeout_sec)
assert f.n_calls == len(study.trials)
if n_trials is not None:
assert f.n_calls <= n_trials
# A thread can process at most (timeout_sec / sleep_sec + 1) trials.
n_jobs_actual = n_jobs if n_jobs != -1 else multiprocessing.cpu_count()
max_calls = (timeout_sec / sleep_sec + 1) * n_jobs_actual
assert f.n_calls <= max_calls
check_study(study)
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_optimize_with_catch(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
def func_value_error(_):
# type: (optuna.trial.Trial) -> float
raise ValueError
# Test default exceptions.
with pytest.raises(ValueError):
study.optimize(func_value_error, n_trials=20)
assert len(study.trials) == 1
assert all(trial.state == optuna.trial.TrialState.FAIL for trial in study.trials)
# Test acceptable exception.
study.optimize(func_value_error, n_trials=20, catch=(ValueError,))
assert len(study.trials) == 21
assert all(trial.state == optuna.trial.TrialState.FAIL for trial in study.trials)
# Test trial with unacceptable exception.
with pytest.raises(ValueError):
study.optimize(func_value_error, n_trials=20, catch=(ArithmeticError,))
assert len(study.trials) == 22
assert all(trial.state == optuna.trial.TrialState.FAIL for trial in study.trials)
@pytest.mark.parametrize("catch", [[], [Exception], None, 1])
def test_optimize_with_catch_invalid_type(catch):
# type: (Any) -> None
study = optuna.create_study()
def func_value_error(_):
# type: (optuna.trial.Trial) -> float
raise ValueError
with pytest.raises(TypeError):
study.optimize(func_value_error, n_trials=20, catch=catch)
def test_optimize_parallel_storage_warning(recwarn):
# type: (WarningsRecorder) -> None
study = optuna.create_study()
# Default joblib backend is threading and no warnings will be captured.
study.optimize(lambda t: t.suggest_uniform("x", 0, 1), n_trials=20, n_jobs=2)
assert len(recwarn) == 0
with pytest.warns(UserWarning):
with joblib.parallel_backend("loky"):
study.optimize(lambda t: t.suggest_uniform("x", 0, 1), n_trials=20, n_jobs=2)
@pytest.mark.parametrize(
"n_jobs, storage_mode", itertools.product((2, -1), STORAGE_MODES,), # n_jobs # storage_mode
)
def test_optimize_with_reseeding(n_jobs, storage_mode):
# type: (int, str)-> None
f = Func()
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
sampler = study.sampler
with patch.object(sampler, "reseed_rng", wraps=sampler.reseed_rng) as mock_object:
study.optimize(f, n_trials=1, n_jobs=2)
assert mock_object.call_count == 1
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_study_set_and_get_user_attrs(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.set_user_attr("dataset", "MNIST")
assert study.user_attrs["dataset"] == "MNIST"
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_study_set_and_get_system_attrs(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.set_system_attr("system_message", "test")
assert study.system_attrs["system_message"] == "test"
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_trial_set_and_get_user_attrs(storage_mode):
# type: (str) -> None
def f(trial):
# type: (optuna.trial.Trial) -> float
trial.set_user_attr("train_accuracy", 1)
assert trial.user_attrs["train_accuracy"] == 1
return 0.0
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(f, n_trials=1)
frozen_trial = study.trials[0]
assert frozen_trial.user_attrs["train_accuracy"] == 1
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_trial_set_and_get_system_attrs(storage_mode):
# type: (str) -> None
def f(trial):
# type: (optuna.trial.Trial) -> float
trial.set_system_attr("system_message", "test")
assert trial.system_attrs["system_message"] == "test"
return 0.0
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(f, n_trials=1)
frozen_trial = study.trials[0]
assert frozen_trial.system_attrs["system_message"] == "test"
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_get_all_study_summaries(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(Func(), n_trials=5)
summaries = optuna.get_all_study_summaries(study._storage)
summary = [s for s in summaries if s._study_id == study._study_id][0]
assert summary.study_name == study.study_name
assert summary.n_trials == 5
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_get_all_study_summaries_with_no_trials(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
summaries = optuna.get_all_study_summaries(study._storage)
summary = [s for s in summaries if s._study_id == study._study_id][0]
assert summary.study_name == study.study_name
assert summary.n_trials == 0
assert summary.datetime_start is None
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_run_trial(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
# Test trial without exception.
study._run_trial(func, catch=(Exception,), gc_after_trial=True)
check_study(study)
# Test trial with acceptable exception.
def func_value_error(_):
# type: (optuna.trial.Trial) -> float
raise ValueError
trial = study._run_trial(func_value_error, catch=(ValueError,), gc_after_trial=True)
frozen_trial = study._storage.get_trial(trial._trial_id)
expected_message = "Trial 1 failed because of the following error: ValueError()"
assert frozen_trial.state == optuna.trial.TrialState.FAIL
assert frozen_trial.system_attrs["fail_reason"] == expected_message
# Test trial with unacceptable exception.
with pytest.raises(ValueError):
study._run_trial(func_value_error, catch=(ArithmeticError,), gc_after_trial=True)
# Test trial with invalid objective value: None
def func_none(_):
# type: (optuna.trial.Trial) -> float
return None # type: ignore
trial = study._run_trial(func_none, catch=(Exception,), gc_after_trial=True)
frozen_trial = study._storage.get_trial(trial._trial_id)
expected_message = (
"Trial 3 failed, because the returned "
"value from the objective function cannot be cast to float. "
"Returned value is: None"
)
assert frozen_trial.state == optuna.trial.TrialState.FAIL
assert frozen_trial.system_attrs["fail_reason"] == expected_message
# Test trial with invalid objective value: nan
def func_nan(_):
# type: (optuna.trial.Trial) -> float
return float("nan")
trial = study._run_trial(func_nan, catch=(Exception,), gc_after_trial=True)
frozen_trial = study._storage.get_trial(trial._trial_id)
expected_message = "Trial 4 failed, because the objective function returned nan."
assert frozen_trial.state == optuna.trial.TrialState.FAIL
assert frozen_trial.system_attrs["fail_reason"] == expected_message
# TODO(Yanase): Remove this test function after removing `optuna.structs.TrialPruned`.
@pytest.mark.parametrize(
"trial_pruned_class",
[optuna.TrialPruned, optuna.exceptions.TrialPruned, optuna.structs.TrialPruned],
)
@pytest.mark.parametrize("report_value", [None, 1.2])
def test_run_trial_with_trial_pruned(trial_pruned_class, report_value):
# type: (Callable[[], optuna.exceptions.TrialPruned], Optional[float]) -> None
study = optuna.create_study()
def func_with_trial_pruned(trial):
# type: (optuna.trial.Trial) -> float
if report_value is not None:
trial.report(report_value, 1)
raise trial_pruned_class()
trial = study._run_trial(func_with_trial_pruned, catch=(), gc_after_trial=True)
frozen_trial = study._storage.get_trial(trial._trial_id)
assert frozen_trial.value == report_value
assert frozen_trial.state == optuna.trial.TrialState.PRUNED
def test_study_pickle():
# type: () -> None
study_1 = optuna.create_study()
study_1.optimize(func, n_trials=10)
check_study(study_1)
assert len(study_1.trials) == 10
dumped_bytes = pickle.dumps(study_1)
study_2 = pickle.loads(dumped_bytes)
check_study(study_2)
assert len(study_2.trials) == 10
study_2.optimize(func, n_trials=10)
check_study(study_2)
assert len(study_2.trials) == 20
def test_study_trials_dataframe_with_no_trials():
# type: () -> None
study_with_no_trials = optuna.create_study()
trials_df = study_with_no_trials.trials_dataframe()
assert trials_df.empty
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
@pytest.mark.parametrize(
"attrs",
[
(
"number",
"value",
"datetime_start",
"datetime_complete",
"params",
"user_attrs",
"system_attrs",
"state",
),
(
"number",
"value",
"datetime_start",
"datetime_complete",
"duration",
"params",
"user_attrs",
"system_attrs",
"state",
"intermediate_values",
"_trial_id",
"distributions",
),
],
)
@pytest.mark.parametrize("multi_index", [True, False])
def test_trials_dataframe(storage_mode, attrs, multi_index):
# type: (str, Tuple[str, ...], bool) -> None
def f(trial):
# type: (optuna.trial.Trial) -> float
x = trial.suggest_int("x", 1, 1)
y = trial.suggest_categorical("y", (2.5,))
assert isinstance(y, float)
trial.set_user_attr("train_loss", 3)
trial.set_system_attr("foo", "bar")
value = x + y # 3.5
# Test reported intermediate values, although it in practice is not "intermediate".
trial.report(value, step=0)
return value
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(f, n_trials=3)
df = study.trials_dataframe(attrs=attrs, multi_index=multi_index)
# Change index to access rows via trial number.
if multi_index:
df.set_index(("number", ""), inplace=True, drop=False)
else:
df.set_index("number", inplace=True, drop=False)
assert len(df) == 3
# Number columns are as follows (total of 13):
# non-nested: 6 (number, value, state, datetime_start, datetime_complete, duration)
# params: 2
# distributions: 2
# user_attrs: 1
# system_attrs: 1
# intermediate_values: 1
expected_n_columns = len(attrs)
if "params" in attrs:
expected_n_columns += 1
if "distributions" in attrs:
expected_n_columns += 1
assert len(df.columns) == expected_n_columns
for i in range(3):
assert df.number[i] == i
assert df.state[i] == "COMPLETE"
assert df.value[i] == 3.5
assert isinstance(df.datetime_start[i], pd.Timestamp)
assert isinstance(df.datetime_complete[i], pd.Timestamp)
if multi_index:
if "distributions" in attrs:
assert ("distributions", "x") in df.columns
assert ("distributions", "y") in df.columns
if "_trial_id" in attrs:
assert ("trial_id", "") in df.columns # trial_id depends on other tests.
if "duration" in attrs:
assert ("duration", "") in df.columns
assert df.params.x[i] == 1
assert df.params.y[i] == 2.5
assert df.user_attrs.train_loss[i] == 3
assert df.system_attrs.foo[i] == "bar"
else:
if "distributions" in attrs:
assert "distributions_x" in df.columns
assert "distributions_y" in df.columns
if "_trial_id" in attrs:
assert "trial_id" in df.columns # trial_id depends on other tests.
if "duration" in attrs:
assert "duration" in df.columns
assert df.params_x[i] == 1
assert df.params_y[i] == 2.5
assert df.user_attrs_train_loss[i] == 3
assert df.system_attrs_foo[i] == "bar"
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_trials_dataframe_with_failure(storage_mode):
# type: (str) -> None
def f(trial):
# type: (optuna.trial.Trial) -> float
x = trial.suggest_int("x", 1, 1)
y = trial.suggest_categorical("y", (2.5,))
trial.set_user_attr("train_loss", 3)
raise ValueError()
return x + y # 3.5
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
study.optimize(f, n_trials=3, catch=(ValueError,))
df = study.trials_dataframe()
# Change index to access rows via trial number.
df.set_index("number", inplace=True, drop=False)
assert len(df) == 3
# non-nested: 6, params: 2, user_attrs: 1 system_attrs: 1
assert len(df.columns) == 10
for i in range(3):
assert df.number[i] == i
assert df.state[i] == "FAIL"
assert df.value[i] is None
assert isinstance(df.datetime_start[i], pd.Timestamp)
assert isinstance(df.datetime_complete[i], pd.Timestamp)
assert isinstance(df.duration[i], pd.Timedelta)
assert df.params_x[i] == 1
assert df.params_y[i] == 2.5
assert df.user_attrs_train_loss[i] == 3
assert "system_attrs_fail_reason" in df.columns
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_create_study(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
# Test creating a new study.
study = optuna.create_study(storage=storage, load_if_exists=False)
# Test `load_if_exists=True` with existing study.
optuna.create_study(study_name=study.study_name, storage=storage, load_if_exists=True)
with pytest.raises(optuna.exceptions.DuplicatedStudyError):
optuna.create_study(study_name=study.study_name, storage=storage, load_if_exists=False)
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_load_study(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
if storage is None:
# `InMemoryStorage` can not be used with `load_study` function.
return
study_name = str(uuid.uuid4())
with pytest.raises(KeyError):
# Test loading an unexisting study.
optuna.study.load_study(study_name=study_name, storage=storage)
# Create a new study.
created_study = optuna.study.create_study(study_name=study_name, storage=storage)
# Test loading an existing study.
loaded_study = optuna.study.load_study(study_name=study_name, storage=storage)
assert created_study._study_id == loaded_study._study_id
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_delete_study(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
# Get storage object because delete_study does not accept None.
storage = optuna.storages.get_storage(storage=storage)
assert storage is not None
# Test deleting a non-existing study.
with pytest.raises(KeyError):
optuna.delete_study("invalid-study-name", storage)
# Test deleting an existing study.
study = optuna.create_study(storage=storage, load_if_exists=False)
optuna.delete_study(study.study_name, storage)
# Test failed to delete the study which is already deleted.
with pytest.raises(KeyError):
optuna.delete_study(study.study_name, storage)
def test_nested_optimization():
# type: () -> None
def objective(trial):
# type: (optuna.trial.Trial) -> float
with pytest.raises(RuntimeError):
trial.study.optimize(lambda _: 0.0, n_trials=1)
return 1.0
study = optuna.create_study()
study.optimize(objective, n_trials=10, catch=())
def test_stop_in_objective() -> None:
def objective(trial: optuna.trial.Trial, threshold_number: int) -> float:
if trial.number >= threshold_number:
trial.study.stop()
return trial.number
# Test stopping the optimization: it should stop once the trial number reaches 4.
study = optuna.create_study()
study.optimize(lambda x: objective(x, 4), n_trials=10)
assert len(study.trials) == 5
# Test calling `optimize` again: it should stop once the trial number reaches 11.
study.optimize(lambda x: objective(x, 11), n_trials=10)
assert len(study.trials) == 12
def test_stop_in_callback() -> None:
def callback(study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:
if trial.number >= 4:
study.stop()
# Test stopping the optimization inside a callback.
study = optuna.create_study()
study.optimize(lambda _: 1.0, n_trials=10, callbacks=[callback])
assert len(study.trials) == 5
def test_stop_n_jobs() -> None:
def callback(study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:
if trial.number >= 4:
study.stop()
study = optuna.create_study()
study.optimize(lambda _: 1.0, n_trials=None, callbacks=[callback], n_jobs=2)
assert 5 <= len(study.trials) <= 6
def test_stop_outside_optimize() -> None:
# Test stopping outside the optimization: it should raise `RuntimeError`.
study = optuna.create_study()
with pytest.raises(RuntimeError):
study.stop()
# Test calling `optimize` after the `RuntimeError` is caught.
study.optimize(lambda _: 1.0, n_trials=1)
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_append_trial(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
assert len(study.trials) == 0
trial_id = study._append_trial(value=0.8)
assert study.trials[0]._trial_id == trial_id
assert len(study.trials) == 1
assert study.best_value == 0.8
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_enqueue_trial_properly_sets_param_values(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
assert len(study.trials) == 0
study.enqueue_trial(params={"x": -5, "y": 5})
study.enqueue_trial(params={"x": -1, "y": 0})
def objective(trial):
# type: (optuna.trial.Trial) -> float
x = trial.suggest_int("x", -10, 10)
y = trial.suggest_int("y", -10, 10)
return x ** 2 + y ** 2
study.optimize(objective, n_trials=2)
t0 = study.trials[0]
assert t0.params["x"] == -5
assert t0.params["y"] == 5
t1 = study.trials[1]
assert t1.params["x"] == -1
assert t1.params["y"] == 0
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_enqueue_trial_with_unfixed_parameters(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
assert len(study.trials) == 0
study.enqueue_trial(params={"x": -5})
def objective(trial):
# type: (optuna.trial.Trial) -> float
x = trial.suggest_int("x", -10, 10)
y = trial.suggest_int("y", -10, 10)
return x ** 2 + y ** 2
study.optimize(objective, n_trials=1)
t = study.trials[0]
assert t.params["x"] == -5
assert -10 <= t.params["y"] <= 10
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_enqueue_trial_with_out_of_range_parameters(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
assert len(study.trials) == 0
study.enqueue_trial(params={"x": 11})
def objective(trial):
# type: (optuna.trial.Trial) -> float
return trial.suggest_int("x", -10, 10)
with pytest.warns(UserWarning):
study.optimize(objective, n_trials=1)
t = study.trials[0]
assert -10 <= t.params["x"] <= 10
# Internal logic might differ when distribution contains a single element.
# Test it explicitly.
with StorageSupplier(storage_mode) as storage:
study = optuna.create_study(storage=storage)
assert len(study.trials) == 0
study.enqueue_trial(params={"x": 11})
def objective(trial):
# type: (optuna.trial.Trial) -> float
return trial.suggest_int("x", 1, 1) # Single element.
with pytest.warns(UserWarning):
study.optimize(objective, n_trials=1)
t = study.trials[0]
assert t.params["x"] == 1
@patch("optuna.study.gc.collect")
def test_optimize_with_gc(collect_mock):
# type: (Mock) -> None
study = optuna.create_study()
study.optimize(func, n_trials=10, gc_after_trial=True)
check_study(study)
assert collect_mock.call_count == 10
@patch("optuna.study.gc.collect")
def test_optimize_without_gc(collect_mock):
# type: (Mock) -> None
study = optuna.create_study()
study.optimize(func, n_trials=10, gc_after_trial=False)
check_study(study)
assert collect_mock.call_count == 0
@pytest.mark.parametrize("n_jobs", [1, 4])
def test_callbacks(n_jobs):
# type: (int) -> None
lock = threading.Lock()
def with_lock(f):
# type: (CallbackFuncType) -> CallbackFuncType
def callback(study, trial):
# type: (optuna.study.Study, optuna.trial.FrozenTrial) -> None
with lock:
f(study, trial)
return callback
study = optuna.create_study()
def objective(trial):
# type: (optuna.trial.Trial) -> float
return trial.suggest_int("x", 1, 1)
# Empty callback list.
study.optimize(objective, callbacks=[], n_trials=10, n_jobs=n_jobs)
# A callback.
values = []
callbacks = [with_lock(lambda study, trial: values.append(trial.value))]
study.optimize(objective, callbacks=callbacks, n_trials=10, n_jobs=n_jobs)
assert values == [1] * 10
# Two callbacks.
values = []
params = []
callbacks = [
with_lock(lambda study, trial: values.append(trial.value)),
with_lock(lambda study, trial: params.append(trial.params)),
]
study.optimize(objective, callbacks=callbacks, n_trials=10, n_jobs=n_jobs)
assert values == [1] * 10
assert params == [{"x": 1}] * 10
# If a trial is failed with an exception and the exception is caught by the study,
# callbacks are invoked.
states = []
callbacks = [with_lock(lambda study, trial: states.append(trial.state))]
study.optimize(
lambda t: 1 / 0,
callbacks=callbacks,
n_trials=10,
n_jobs=n_jobs,
catch=(ZeroDivisionError,),
)
assert states == [optuna.trial.TrialState.FAIL] * 10
# If a trial is failed with an exception and the exception isn't caught by the study,
# callbacks aren't invoked.
states = []
callbacks = [with_lock(lambda study, trial: states.append(trial.state))]
with pytest.raises(ZeroDivisionError):
study.optimize(lambda t: 1 / 0, callbacks=callbacks, n_trials=10, n_jobs=n_jobs, catch=())
assert states == []
@pytest.mark.parametrize("storage_mode", STORAGE_MODES)
def test_get_trials(storage_mode):
# type: (str) -> None
with StorageSupplier(storage_mode) as storage:
storage = optuna.storages.get_storage(storage=storage)
study = optuna.create_study(storage=storage)
study.optimize(lambda t: t.suggest_int("x", 1, 5), n_trials=5)
with patch("copy.deepcopy", wraps=copy.deepcopy) as mock_object:
trials0 = study.get_trials(deepcopy=False)
assert mock_object.call_count == 0
assert len(trials0) == 5
trials1 = study.get_trials(deepcopy=True)
assert mock_object.call_count > 0
assert trials0 == trials1
# `study.trials` is equivalent to `study.get_trials(deepcopy=True)`.
old_count = mock_object.call_count
trials2 = study.trials
assert mock_object.call_count > old_count
assert trials0 == trials2
def test_study_summary_eq_ne():
# type: () -> None
storage = optuna.storages.RDBStorage("sqlite:///:memory:")
optuna.create_study(storage=storage)
study = optuna.create_study(storage=storage)
summaries = study._storage.get_all_study_summaries()
assert len(summaries) == 2
assert summaries[0] == copy.deepcopy(summaries[0])
assert not summaries[0] != copy.deepcopy(summaries[0])
assert not summaries[0] == summaries[1]
assert summaries[0] != summaries[1]
assert not summaries[0] == 1
assert summaries[0] != 1
def test_study_summary_lt_le():
# type: () -> None
storage = optuna.storages.RDBStorage("sqlite:///:memory:")
optuna.create_study(storage=storage)
study = optuna.create_study(storage=storage)
summaries = study._storage.get_all_study_summaries()
assert len(summaries) == 2
summary_0 = summaries[0]
summary_1 = summaries[1]
assert summary_0 < summary_1
assert not summary_1 < summary_0
with pytest.raises(TypeError):
summary_0 < 1
assert summary_0 <= summary_0
assert not summary_1 <= summary_0
with pytest.raises(TypeError):
summary_0 <= 1
# A list of StudySummaries is sortable.
summaries.reverse()
summaries.sort()
assert summaries[0] == summary_0
assert summaries[1] == summary_1
def test_log_completed_trial(capsys: _pytest.capture.CaptureFixture) -> None:
# We need to reconstruct our default handler to properly capture stderr.
optuna.logging._reset_library_root_logger()
optuna.logging.set_verbosity(optuna.logging.INFO)
study = optuna.create_study()
study.optimize(lambda _: 1.0, n_trials=1)
_, err = capsys.readouterr()
assert "Trial 0" in err
optuna.logging.set_verbosity(optuna.logging.WARNING)
study.optimize(lambda _: 1.0, n_trials=1)
_, err = capsys.readouterr()
assert "Trial 1" not in err
optuna.logging.set_verbosity(optuna.logging.DEBUG)
study.optimize(lambda _: 1.0, n_trials=1)
_, err = capsys.readouterr()
assert "Trial 2" in err
def test_log_completed_trial_skip_storage_access() -> None:
study = optuna.create_study()
# Create a trial to retrieve it as the `study.best_trial`.
study.optimize(lambda _: 0.0, n_trials=1)
trial = optuna.Trial(study, study._storage.create_new_trial(study._study_id))
storage = study._storage
with patch.object(storage, "get_best_trial", wraps=storage.get_best_trial) as mock_object:
study._log_completed_trial(trial, 1.0)
# Trial.best_trial and Trial.best_params access storage.
assert mock_object.call_count == 2
optuna.logging.set_verbosity(optuna.logging.WARNING)
with patch.object(storage, "get_best_trial", wraps=storage.get_best_trial) as mock_object:
study._log_completed_trial(trial, 1.0)
assert mock_object.call_count == 0
optuna.logging.set_verbosity(optuna.logging.DEBUG)
with patch.object(storage, "get_best_trial", wraps=storage.get_best_trial) as mock_object:
study._log_completed_trial(trial, 1.0)
assert mock_object.call_count == 2
| 32.199623 | 99 | 0.651626 |
6ae0041ec06abb5f41acc8d9e0ad54c9727be449 | 39,758 | py | Python | rmgpy/reactionTest.py | Lyle-zhang/RMG-Py | 273eb51fa3c175562056c85d7d61814d5fa2986d | [
"MIT"
] | null | null | null | rmgpy/reactionTest.py | Lyle-zhang/RMG-Py | 273eb51fa3c175562056c85d7d61814d5fa2986d | [
"MIT"
] | null | null | null | rmgpy/reactionTest.py | Lyle-zhang/RMG-Py | 273eb51fa3c175562056c85d7d61814d5fa2986d | [
"MIT"
] | 1 | 2021-08-14T13:47:18.000Z | 2021-08-14T13:47:18.000Z | #!/usr/bin/env python
# encoding: utf-8 -*-
"""
This module contains unit tests of the rmgpy.reaction module.
"""
import numpy
import unittest
from external.wip import work_in_progress
from rmgpy.species import Species, TransitionState
from rmgpy.reaction import Reaction
from rmgpy.statmech.translation import Translation, IdealGasTranslation
from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor
from rmgpy.statmech.vibration import Vibration, HarmonicOscillator
from rmgpy.statmech.torsion import Torsion, HinderedRotor
from rmgpy.statmech.conformer import Conformer
from rmgpy.kinetics import Arrhenius
from rmgpy.thermo import Wilhoit
import rmgpy.constants as constants
################################################################################
class PseudoSpecies:
"""
Can be used in place of a :class:`rmg.species.Species` for isomorphism checks.
PseudoSpecies('a') is isomorphic with PseudoSpecies('A')
but nothing else.
"""
def __init__(self, label):
self.label = label
def __repr__(self):
return "PseudoSpecies('{0}')".format(self.label)
def __str__(self):
return self.label
def isIsomorphic(self, other):
return self.label.lower() == other.label.lower()
class TestReactionIsomorphism(unittest.TestCase):
"""
Contains unit tests of the isomorphism testing of the Reaction class.
"""
def makeReaction(self,reaction_string):
""""
Make a Reaction (containing PseudoSpecies) of from a string like 'Ab=CD'
"""
reactants, products = reaction_string.split('=')
reactants = [PseudoSpecies(i) for i in reactants]
products = [PseudoSpecies(i) for i in products]
return Reaction(reactants=reactants, products=products)
def test1to1(self):
r1 = self.makeReaction('A=B')
self.assertTrue(r1.isIsomorphic(self.makeReaction('a=B')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('b=A')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('B=a'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('A=C')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('A=BB')))
def test1to2(self):
r1 = self.makeReaction('A=BC')
self.assertTrue(r1.isIsomorphic(self.makeReaction('a=Bc')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('cb=a')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('a=cb'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('bc=a'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('a=c')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=c')))
def test2to2(self):
r1 = self.makeReaction('AB=CD')
self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cd')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=dc'),eitherDirection=False))
self.assertTrue(r1.isIsomorphic(self.makeReaction('dc=ba')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('cd=ab'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=ab')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=cde')))
def test2to3(self):
r1 = self.makeReaction('AB=CDE')
self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cde')))
self.assertTrue(r1.isIsomorphic(self.makeReaction('ba=edc'),eitherDirection=False))
self.assertTrue(r1.isIsomorphic(self.makeReaction('dec=ba')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('cde=ab'),eitherDirection=False))
self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=abc')))
self.assertFalse(r1.isIsomorphic(self.makeReaction('abe=cde')))
class TestReaction(unittest.TestCase):
"""
Contains unit tests of the Reaction class.
"""
def setUp(self):
"""
A method that is called prior to each unit test in this class.
"""
ethylene = Species(
label = 'C2H4',
conformer = Conformer(
E0 = (44.7127, 'kJ/mol'),
modes = [
IdealGasTranslation(
mass = (28.0313, 'amu'),
),
NonlinearRotor(
inertia = (
[3.41526, 16.6498, 20.065],
'amu*angstrom^2',
),
symmetry = 4,
),
HarmonicOscillator(
frequencies = (
[828.397, 970.652, 977.223, 1052.93, 1233.55, 1367.56, 1465.09, 1672.25, 3098.46, 3111.7, 3165.79, 3193.54],
'cm^-1',
),
),
],
spinMultiplicity = 1,
opticalIsomers = 1,
),
)
hydrogen = Species(
label = 'H',
conformer = Conformer(
E0 = (211.794, 'kJ/mol'),
modes = [
IdealGasTranslation(
mass = (1.00783, 'amu'),
),
],
spinMultiplicity = 2,
opticalIsomers = 1,
),
)
ethyl = Species(
label = 'C2H5',
conformer = Conformer(
E0 = (111.603, 'kJ/mol'),
modes = [
IdealGasTranslation(
mass = (29.0391, 'amu'),
),
NonlinearRotor(
inertia = (
[4.8709, 22.2353, 23.9925],
'amu*angstrom^2',
),
symmetry = 1,
),
HarmonicOscillator(
frequencies = (
[482.224, 791.876, 974.355, 1051.48, 1183.21, 1361.36, 1448.65, 1455.07, 1465.48, 2688.22, 2954.51, 3033.39, 3101.54, 3204.73],
'cm^-1',
),
),
HinderedRotor(
inertia = (1.11481, 'amu*angstrom^2'),
symmetry = 6,
barrier = (0.244029, 'kJ/mol'),
semiclassical = None,
),
],
spinMultiplicity = 2,
opticalIsomers = 1,
),
)
TS = TransitionState(
label = 'TS',
conformer = Conformer(
E0 = (266.694, 'kJ/mol'),
modes = [
IdealGasTranslation(
mass = (29.0391, 'amu'),
),
NonlinearRotor(
inertia = (
[6.78512, 22.1437, 22.2114],
'amu*angstrom^2',
),
symmetry = 1,
),
HarmonicOscillator(
frequencies = (
[412.75, 415.206, 821.495, 924.44, 982.714, 1024.16, 1224.21, 1326.36, 1455.06, 1600.35, 3101.46, 3110.55, 3175.34, 3201.88],
'cm^-1',
),
),
],
spinMultiplicity = 2,
opticalIsomers = 1,
),
frequency = (-750.232, 'cm^-1'),
)
self.reaction = Reaction(
reactants = [hydrogen, ethylene],
products = [ethyl],
kinetics = Arrhenius(
A = (501366000.0, 'cm^3/(mol*s)'),
n = 1.637,
Ea = (4.32508, 'kJ/mol'),
T0 = (1, 'K'),
Tmin = (300, 'K'),
Tmax = (2500, 'K'),
),
transitionState = TS,
)
# CC(=O)O[O]
acetylperoxy = Species(
label='acetylperoxy',
thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(21.0*constants.R,"J/(mol*K)"), a0=-3.95, a1=9.26, a2=-15.6, a3=8.55, B=(500.0,"K"), H0=(-6.151e+04,"J/mol"), S0=(-790.2,"J/(mol*K)")),
)
# C[C]=O
acetyl = Species(
label='acetyl',
thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(15.5*constants.R,"J/(mol*K)"), a0=0.2541, a1=-0.4712, a2=-4.434, a3=2.25, B=(500.0,"K"), H0=(-1.439e+05,"J/mol"), S0=(-524.6,"J/(mol*K)")),
)
# [O][O]
oxygen = Species(
label='oxygen',
thermo=Wilhoit(Cp0=(3.5*constants.R,"J/(mol*K)"), CpInf=(4.5*constants.R,"J/(mol*K)"), a0=-0.9324, a1=26.18, a2=-70.47, a3=44.12, B=(500.0,"K"), H0=(1.453e+04,"J/mol"), S0=(-12.19,"J/(mol*K)")),
)
self.reaction2 = Reaction(
reactants=[acetyl, oxygen],
products=[acetylperoxy],
kinetics = Arrhenius(
A = (2.65e12, 'cm^3/(mol*s)'),
n = 0.0,
Ea = (0.0, 'kJ/mol'),
T0 = (1, 'K'),
Tmin = (300, 'K'),
Tmax = (2000, 'K'),
),
)
def testIsIsomerization(self):
"""
Test the Reaction.isIsomerization() method.
"""
isomerization = Reaction(reactants=[Species()], products=[Species()])
association = Reaction(reactants=[Species(),Species()], products=[Species()])
dissociation = Reaction(reactants=[Species()], products=[Species(),Species()])
bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()])
self.assertTrue(isomerization.isIsomerization())
self.assertFalse(association.isIsomerization())
self.assertFalse(dissociation.isIsomerization())
self.assertFalse(bimolecular.isIsomerization())
def testIsAssociation(self):
"""
Test the Reaction.isAssociation() method.
"""
isomerization = Reaction(reactants=[Species()], products=[Species()])
association = Reaction(reactants=[Species(),Species()], products=[Species()])
dissociation = Reaction(reactants=[Species()], products=[Species(),Species()])
bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()])
self.assertFalse(isomerization.isAssociation())
self.assertTrue(association.isAssociation())
self.assertFalse(dissociation.isAssociation())
self.assertFalse(bimolecular.isAssociation())
def testIsDissociation(self):
"""
Test the Reaction.isDissociation() method.
"""
isomerization = Reaction(reactants=[Species()], products=[Species()])
association = Reaction(reactants=[Species(),Species()], products=[Species()])
dissociation = Reaction(reactants=[Species()], products=[Species(),Species()])
bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()])
self.assertFalse(isomerization.isDissociation())
self.assertFalse(association.isDissociation())
self.assertTrue(dissociation.isDissociation())
self.assertFalse(bimolecular.isDissociation())
def testHasTemplate(self):
"""
Test the Reaction.hasTemplate() method.
"""
reactants = self.reaction.reactants[:]
products = self.reaction.products[:]
self.assertTrue(self.reaction.hasTemplate(reactants, products))
self.assertTrue(self.reaction.hasTemplate(products, reactants))
self.assertFalse(self.reaction2.hasTemplate(reactants, products))
self.assertFalse(self.reaction2.hasTemplate(products, reactants))
reactants.reverse()
products.reverse()
self.assertTrue(self.reaction.hasTemplate(reactants, products))
self.assertTrue(self.reaction.hasTemplate(products, reactants))
self.assertFalse(self.reaction2.hasTemplate(reactants, products))
self.assertFalse(self.reaction2.hasTemplate(products, reactants))
reactants = self.reaction2.reactants[:]
products = self.reaction2.products[:]
self.assertFalse(self.reaction.hasTemplate(reactants, products))
self.assertFalse(self.reaction.hasTemplate(products, reactants))
self.assertTrue(self.reaction2.hasTemplate(reactants, products))
self.assertTrue(self.reaction2.hasTemplate(products, reactants))
reactants.reverse()
products.reverse()
self.assertFalse(self.reaction.hasTemplate(reactants, products))
self.assertFalse(self.reaction.hasTemplate(products, reactants))
self.assertTrue(self.reaction2.hasTemplate(reactants, products))
self.assertTrue(self.reaction2.hasTemplate(products, reactants))
def testEnthalpyOfReaction(self):
"""
Test the Reaction.getEnthalpyOfReaction() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Hlist0 = [float(v) for v in ['-146007', '-145886', '-144195', '-141973', '-139633', '-137341', '-135155', '-133093', '-131150', '-129316']]
Hlist = self.reaction2.getEnthalpiesOfReaction(Tlist)
for i in range(len(Tlist)):
self.assertAlmostEqual(Hlist[i] / 1000., Hlist0[i] / 1000., 2)
def testEntropyOfReaction(self):
"""
Test the Reaction.getEntropyOfReaction() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Slist0 = [float(v) for v in ['-156.793', '-156.872', '-153.504', '-150.317', '-147.707', '-145.616', '-143.93', '-142.552', '-141.407', '-140.441']]
Slist = self.reaction2.getEntropiesOfReaction(Tlist)
for i in range(len(Tlist)):
self.assertAlmostEqual(Slist[i], Slist0[i], 2)
def testFreeEnergyOfReaction(self):
"""
Test the Reaction.getFreeEnergyOfReaction() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Glist0 = [float(v) for v in ['-114648', '-83137.2', '-52092.4', '-21719.3', '8073.53', '37398.1', '66346.8', '94990.6', '123383', '151565']]
Glist = self.reaction2.getFreeEnergiesOfReaction(Tlist)
for i in range(len(Tlist)):
self.assertAlmostEqual(Glist[i] / 1000., Glist0[i] / 1000., 2)
def testEquilibriumConstantKa(self):
"""
Test the Reaction.getEquilibriumConstant() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Kalist0 = [float(v) for v in ['8.75951e+29', '7.1843e+10', '34272.7', '26.1877', '0.378696', '0.0235579', '0.00334673', '0.000792389', '0.000262777', '0.000110053']]
Kalist = self.reaction2.getEquilibriumConstants(Tlist, type='Ka')
for i in range(len(Tlist)):
self.assertAlmostEqual(Kalist[i] / Kalist0[i], 1.0, 4)
def testEquilibriumConstantKc(self):
"""
Test the Reaction.getEquilibriumConstant() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Kclist0 = [float(v) for v in ['1.45661e+28', '2.38935e+09', '1709.76', '1.74189', '0.0314866', '0.00235045', '0.000389568', '0.000105413', '3.93273e-05', '1.83006e-05']]
Kclist = self.reaction2.getEquilibriumConstants(Tlist, type='Kc')
for i in range(len(Tlist)):
self.assertAlmostEqual(Kclist[i] / Kclist0[i], 1.0, 4)
def testEquilibriumConstantKp(self):
"""
Test the Reaction.getEquilibriumConstant() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
Kplist0 = [float(v) for v in ['8.75951e+24', '718430', '0.342727', '0.000261877', '3.78696e-06', '2.35579e-07', '3.34673e-08', '7.92389e-09', '2.62777e-09', '1.10053e-09']]
Kplist = self.reaction2.getEquilibriumConstants(Tlist, type='Kp')
for i in range(len(Tlist)):
self.assertAlmostEqual(Kplist[i] / Kplist0[i], 1.0, 4)
def testStoichiometricCoefficient(self):
"""
Test the Reaction.getStoichiometricCoefficient() method.
"""
for reactant in self.reaction.reactants:
self.assertEqual(self.reaction.getStoichiometricCoefficient(reactant), -1)
for product in self.reaction.products:
self.assertEqual(self.reaction.getStoichiometricCoefficient(product), 1)
for reactant in self.reaction2.reactants:
self.assertEqual(self.reaction.getStoichiometricCoefficient(reactant), 0)
for product in self.reaction2.products:
self.assertEqual(self.reaction.getStoichiometricCoefficient(product), 0)
def testRateCoefficient(self):
"""
Test the Reaction.getRateCoefficient() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
self.assertAlmostEqual(self.reaction.getRateCoefficient(T, P) / self.reaction.kinetics.getRateCoefficient(T), 1.0, 6)
def testGenerateReverseRateCoefficient(self):
"""
Test the Reaction.generateReverseRateCoefficient() method.
"""
Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
P = 1e5
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
for T in Tlist:
kr0 = self.reaction2.getRateCoefficient(T, P) / self.reaction2.getEquilibriumConstant(T)
kr = reverseKinetics.getRateCoefficient(T)
self.assertAlmostEqual(kr0 / kr, 1.0, 0)
def testGenerateReverseRateCoefficientArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the Arrhenius format.
"""
original_kinetics = Arrhenius(
A = (2.65e12, 'cm^3/(mol*s)'),
n = 0.0,
Ea = (0.0, 'kJ/mol'),
T0 = (1, 'K'),
Tmin = (300, 'K'),
Tmax = (2000, 'K'),
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(original_kinetics.Tmin.value_si, original_kinetics.Tmax.value_si, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
@work_in_progress
def testGenerateReverseRateCoefficientArrheniusEP(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the ArrheniusEP format.
"""
from rmgpy.kinetics import ArrheniusEP
original_kinetics = ArrheniusEP(
A = (2.65e12, 'cm^3/(mol*s)'),
n = 0.0,
alpha = 0.5,
E0 = (41.84, 'kJ/mol'),
Tmin = (300, 'K'),
Tmax = (2000, 'K'),
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(original_kinetics.Tmin, original_kinetics.Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientPDepArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the PDepArrhenius format.
"""
from rmgpy.kinetics import PDepArrhenius
arrhenius0 = Arrhenius(
A = (1.0e6,"s^-1"),
n = 1.0,
Ea = (10.0,"kJ/mol"),
T0 = (300.0,"K"),
Tmin = (300.0,"K"),
Tmax = (2000.0,"K"),
comment = """This data is completely made up""",
)
arrhenius1 = Arrhenius(
A = (1.0e12,"s^-1"),
n = 1.0,
Ea = (20.0,"kJ/mol"),
T0 = (300.0,"K"),
Tmin = (300.0,"K"),
Tmax = (2000.0,"K"),
comment = """This data is completely made up""",
)
pressures = numpy.array([0.1, 10.0])
arrhenius = [arrhenius0, arrhenius1]
Tmin = 300.0
Tmax = 2000.0
Pmin = 0.1
Pmax = 10.0
comment = """This data is completely made up"""
original_kinetics = PDepArrhenius(
pressures = (pressures,"bar"),
arrhenius = arrhenius,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientMultiArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the MultiArrhenius format.
"""
from rmgpy.kinetics import MultiArrhenius
pressures = numpy.array([0.1, 10.0])
Tmin = 300.0
Tmax = 2000.0
Pmin = 0.1
Pmax = 10.0
comment = """This data is completely made up"""
arrhenius = [
Arrhenius(
A = (9.3e-14,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
Arrhenius(
A = (1.4e-9,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
]
original_kinetics = MultiArrhenius(
arrhenius = arrhenius,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientMultiPDepArrhenius(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the MultiPDepArrhenius format.
"""
from rmgpy.kinetics import PDepArrhenius, MultiPDepArrhenius
Tmin = 350.
Tmax = 1500.
Pmin = 1e-1
Pmax = 1e1
pressures = numpy.array([1e-1,1e1])
comment = 'CH3 + C2H6 <=> CH4 + C2H5 (Baulch 2005)'
arrhenius = [
PDepArrhenius(
pressures = (pressures,"bar"),
arrhenius = [
Arrhenius(
A = (9.3e-16,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
Arrhenius(
A = (9.3e-14,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (4740*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
],
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
),
PDepArrhenius(
pressures = (pressures,"bar"),
arrhenius = [
Arrhenius(
A = (1.4e-11,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
Arrhenius(
A = (1.4e-9,"cm^3/(molecule*s)"),
n = 0.0,
Ea = (11200*constants.R*0.001,"kJ/mol"),
T0 = (1,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
comment = comment,
),
],
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
),
]
original_kinetics = MultiPDepArrhenius(
arrhenius = arrhenius,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
comment = comment,
)
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientThirdBody(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the ThirdBody format.
"""
from rmgpy.kinetics import ThirdBody
arrheniusLow = Arrhenius(
A = (2.62e+33,"cm^6/(mol^2*s)"),
n = -4.76,
Ea = (10.21,"kJ/mol"),
T0 = (1,"K"),
)
efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2}
Tmin = 300.
Tmax = 2000.
Pmin = 0.01
Pmax = 100.
comment = """H + CH3 -> CH4"""
thirdBody = ThirdBody(
arrheniusLow = arrheniusLow,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
efficiencies = efficiencies,
comment = comment,
)
original_kinetics = thirdBody
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientLindemann(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the Lindemann format.
"""
from rmgpy.kinetics import Lindemann
arrheniusHigh = Arrhenius(
A = (1.39e+16,"cm^3/(mol*s)"),
n = -0.534,
Ea = (2.243,"kJ/mol"),
T0 = (1,"K"),
)
arrheniusLow = Arrhenius(
A = (2.62e+33,"cm^6/(mol^2*s)"),
n = -4.76,
Ea = (10.21,"kJ/mol"),
T0 = (1,"K"),
)
efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2}
Tmin = 300.
Tmax = 2000.
Pmin = 0.01
Pmax = 100.
comment = """H + CH3 -> CH4"""
lindemann = Lindemann(
arrheniusHigh = arrheniusHigh,
arrheniusLow = arrheniusLow,
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
efficiencies = efficiencies,
comment = comment,
)
original_kinetics = lindemann
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testGenerateReverseRateCoefficientTroe(self):
"""
Test the Reaction.generateReverseRateCoefficient() method works for the Troe format.
"""
from rmgpy.kinetics import Troe
arrheniusHigh = Arrhenius(
A = (1.39e+16,"cm^3/(mol*s)"),
n = -0.534,
Ea = (2.243,"kJ/mol"),
T0 = (1,"K"),
)
arrheniusLow = Arrhenius(
A = (2.62e+33,"cm^6/(mol^2*s)"),
n = -4.76,
Ea = (10.21,"kJ/mol"),
T0 = (1,"K"),
)
alpha = 0.783
T3 = 74
T1 = 2941
T2 = 6964
efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2}
Tmin = 300.
Tmax = 2000.
Pmin = 0.01
Pmax = 100.
comment = """H + CH3 -> CH4"""
troe = Troe(
arrheniusHigh = arrheniusHigh,
arrheniusLow = arrheniusLow,
alpha = alpha,
T3 = (T3,"K"),
T1 = (T1,"K"),
T2 = (T2,"K"),
Tmin = (Tmin,"K"),
Tmax = (Tmax,"K"),
Pmin = (Pmin,"bar"),
Pmax = (Pmax,"bar"),
efficiencies = efficiencies,
comment = comment,
)
original_kinetics = troe
self.reaction2.kinetics = original_kinetics
reverseKinetics = self.reaction2.generateReverseRateCoefficient()
self.reaction2.kinetics = reverseKinetics
# reverse reactants, products to ensure Keq is correctly computed
self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants
reversereverseKinetics = self.reaction2.generateReverseRateCoefficient()
# check that reverting the reverse yields the original
Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64)
P = 1e5
for T in Tlist:
korig = original_kinetics.getRateCoefficient(T, P)
krevrev = reversereverseKinetics.getRateCoefficient(T, P)
self.assertAlmostEqual(korig / krevrev, 1.0, 0)
def testTSTCalculation(self):
"""
A test of the transition state theory k(T) calculation function,
using the reaction H + C2H4 -> C2H5.
"""
Tlist = 1000.0/numpy.arange(0.4, 3.35, 0.01)
klist = numpy.array([self.reaction.calculateTSTRateCoefficient(T) for T in Tlist])
arrhenius = Arrhenius().fitToData(Tlist, klist, kunits='m^3/(mol*s)')
klist2 = numpy.array([arrhenius.getRateCoefficient(T) for T in Tlist])
# Check that the correct Arrhenius parameters are returned
self.assertAlmostEqual(arrhenius.A.value_si, 2265.2488, delta=1e-2)
self.assertAlmostEqual(arrhenius.n.value_si, 1.45419, delta=1e-4)
self.assertAlmostEqual(arrhenius.Ea.value_si, 6645.24, delta=1e-2)
# Check that the fit is satisfactory (defined here as always within 5%)
for i in range(len(Tlist)):
self.assertAlmostEqual(klist[i], klist2[i], delta=5e-2 * klist[i])
def testPickle(self):
"""
Test that a Reaction object can be successfully pickled and
unpickled with no loss of information.
"""
import cPickle
reaction = cPickle.loads(cPickle.dumps(self.reaction,-1))
self.assertEqual(len(self.reaction.reactants), len(reaction.reactants))
self.assertEqual(len(self.reaction.products), len(reaction.products))
for reactant0, reactant in zip(self.reaction.reactants, reaction.reactants):
self.assertAlmostEqual(reactant0.conformer.E0.value_si / 1e6, reactant.conformer.E0.value_si / 1e6, 2)
self.assertEqual(reactant0.conformer.E0.units, reactant.conformer.E0.units)
for product0, product in zip(self.reaction.products, reaction.products):
self.assertAlmostEqual(product0.conformer.E0.value_si / 1e6, product.conformer.E0.value_si / 1e6, 2)
self.assertEqual(product0.conformer.E0.units, product.conformer.E0.units)
self.assertAlmostEqual(self.reaction.transitionState.conformer.E0.value_si / 1e6, reaction.transitionState.conformer.E0.value_si / 1e6, 2)
self.assertEqual(self.reaction.transitionState.conformer.E0.units, reaction.transitionState.conformer.E0.units)
self.assertAlmostEqual(self.reaction.transitionState.frequency.value_si, reaction.transitionState.frequency.value_si, 2)
self.assertEqual(self.reaction.transitionState.frequency.units, reaction.transitionState.frequency.units)
self.assertAlmostEqual(self.reaction.kinetics.A.value_si, reaction.kinetics.A.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.n.value_si, reaction.kinetics.n.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.T0.value_si, reaction.kinetics.T0.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.Ea.value_si, reaction.kinetics.Ea.value_si, delta=1e-6)
self.assertEqual(self.reaction.kinetics.comment, reaction.kinetics.comment)
self.assertEqual(self.reaction.duplicate, reaction.duplicate)
self.assertEqual(self.reaction.degeneracy, reaction.degeneracy)
def testOutput(self):
"""
Test that a Reaction object can be successfully reconstructed
from its repr() output with no loss of information.
"""
exec('reaction = %r' % (self.reaction))
self.assertEqual(len(self.reaction.reactants), len(reaction.reactants))
self.assertEqual(len(self.reaction.products), len(reaction.products))
for reactant0, reactant in zip(self.reaction.reactants, reaction.reactants):
self.assertAlmostEqual(reactant0.conformer.E0.value_si / 1e6, reactant.conformer.E0.value_si / 1e6, 2)
self.assertEqual(reactant0.conformer.E0.units, reactant.conformer.E0.units)
for product0, product in zip(self.reaction.products, reaction.products):
self.assertAlmostEqual(product0.conformer.E0.value_si / 1e6, product.conformer.E0.value_si / 1e6, 2)
self.assertEqual(product0.conformer.E0.units, product.conformer.E0.units)
self.assertAlmostEqual(self.reaction.transitionState.conformer.E0.value_si / 1e6, reaction.transitionState.conformer.E0.value_si / 1e6, 2)
self.assertEqual(self.reaction.transitionState.conformer.E0.units, reaction.transitionState.conformer.E0.units)
self.assertAlmostEqual(self.reaction.transitionState.frequency.value_si, reaction.transitionState.frequency.value_si, 2)
self.assertEqual(self.reaction.transitionState.frequency.units, reaction.transitionState.frequency.units)
self.assertAlmostEqual(self.reaction.kinetics.A.value_si, reaction.kinetics.A.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.n.value_si, reaction.kinetics.n.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.T0.value_si, reaction.kinetics.T0.value_si, delta=1e-6)
self.assertAlmostEqual(self.reaction.kinetics.Ea.value_si, reaction.kinetics.Ea.value_si, delta=1e-6)
self.assertEqual(self.reaction.kinetics.comment, reaction.kinetics.comment)
self.assertEqual(self.reaction.duplicate, reaction.duplicate)
self.assertEqual(self.reaction.degeneracy, reaction.degeneracy)
################################################################################
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| 42.567452 | 208 | 0.566452 |
ae5d278f6c2333d46da84a2f398579bd3e3fda8c | 6,087 | py | Python | ignoredialog.py | StinGer-ShoGuN/PySpy | 14b1c93baff6a6b93588ca36f9754a3292e12658 | [
"MIT"
] | null | null | null | ignoredialog.py | StinGer-ShoGuN/PySpy | 14b1c93baff6a6b93588ca36f9754a3292e12658 | [
"MIT"
] | null | null | null | ignoredialog.py | StinGer-ShoGuN/PySpy | 14b1c93baff6a6b93588ca36f9754a3292e12658 | [
"MIT"
] | null | null | null | # !/usr/local/bin/python3.6
# MIT licensed
# Copyright (c) 2018 White Russsian
# Github: <https://github.com/Eve-PySpy/PySpy>**********************
''' Dialog to view and remove entities from PySpy's list of ignored
characters, corporations and alliances.
'''
# **********************************************************************
import logging
import wx
from wx.lib.mixins.listctrl import CheckListCtrlMixin, ListCtrlAutoWidthMixin
import config
import sortarray
import statusmsg
# cSpell Checker - Correct Words****************************************
# // cSpell:words russsian, ccp's, pyperclip, chkversion, clpbd, gui
# **********************************************************************
Logger = logging.getLogger(__name__)
# Example call: Logger.info("Something badhappened", exc_info=True) ****
class CheckListCtrl(wx.ListCtrl, CheckListCtrlMixin, ListCtrlAutoWidthMixin):
def __init__(self, parent):
wx.ListCtrl.__init__(self, parent, wx.ID_ANY, style=wx.LC_REPORT |
wx.SUNKEN_BORDER)
CheckListCtrlMixin.__init__(self)
ListCtrlAutoWidthMixin.__init__(self)
class IgnoreDialog(wx.Frame):
def __init__(self, parent, *args, **kwds):
kwds["style"] = (kwds.get("style", 0) | wx.CAPTION | wx.CLIP_CHILDREN |
wx.CLOSE_BOX | wx.FRAME_FLOAT_ON_PARENT | wx.RESIZE_BORDER)
wx.Frame.__init__(self, parent, *args, **kwds)
self.Font = self.Font.Scaled(config.OPTIONS_OBJECT.Get("FontScale", 1))
self.SetName("IgnoreDialog")
self.SetSize((400, 300))
self.ignoredList = CheckListCtrl(self)
self.ignoredList.InsertColumn(0, 'Name', width=180)
self.ignoredList.InsertColumn(1, 'ID', width=0)
self.ignoredList.InsertColumn(2, 'Type')
self.buttonPanel = wx.Panel(self, wx.ID_ANY)
self.appBtn = wx.Button(self.buttonPanel, wx.ID_OK, "Delete Selected Entries")
self.cnclBtn = wx.Button(self.buttonPanel, wx.ID_CANCEL, "Cancel Changes")
self.Bind(wx.EVT_BUTTON, self.OnApply, id=self.appBtn.GetId())
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=self.cnclBtn.GetId())
self.Bind(wx.EVT_CHAR_HOOK, self.OnHook)
self.ignored_entities = config.OPTIONS_OBJECT.Get("ignoredList", default=[])
self._populateList()
self.__set_properties()
self.__do_layout()
if config.OPTIONS_OBJECT.Get("StayOnTop", True):
self.Parent.ToggleWindowStyle(wx.STAY_ON_TOP)
self.ToggleWindowStyle(wx.STAY_ON_TOP)
def __set_properties(self):
self.SetTitle("Review Ignored Entities")
# Colour Scheme Dictionaries
self.dark_dict = config.DARK_MODE
self.normal_dict = config.NORMAL_MODE
# Colour Scheme
if not config.OPTIONS_OBJECT.Get("DarkMode", False):
self.bg_colour = self.normal_dict["BG"]
self.txt_colour = self.normal_dict["TXT"]
self.lne_colour = self.normal_dict["LNE"]
self.hl1_colour = self.normal_dict["HL1"]
else:
self.bg_colour = self.dark_dict["BG"]
self.txt_colour = self.dark_dict["TXT"]
self.lne_colour = self.dark_dict["LNE"]
self.hl1_colour = self.dark_dict["HL1"]
# Set default colors
self.SetBackgroundColour(self.bg_colour)
self.SetForegroundColour(self.txt_colour)
self.ignoredList.SetBackgroundColour(self.bg_colour)
self.ignoredList.SetForegroundColour(self.txt_colour)
# Window icon
icon = wx.Icon()
icon.CopyFromBitmap(wx.Bitmap(config.ICON_FILE, wx.BITMAP_TYPE_ANY))
self.SetIcon(icon)
def __do_layout(self):
main = wx.BoxSizer(wx.VERTICAL)
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
instrLbl = wx.StaticText(self, wx.ID_ANY, "Select entities to be removed from ignore list:", style=wx.ALIGN_LEFT)
main.Add(instrLbl, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 10)
main.Add(self.ignoredList, 1, wx.ALL | wx.EXPAND, 10)
buttonSizer.Add(self.appBtn, 1, wx.RIGHT, 5)
buttonSizer.Add(self.cnclBtn, 1, wx.LEFT, 5)
self.buttonPanel.SetSizer(buttonSizer)
if config.wx_minor < 1:
main.Add(self.buttonPanel, 0, wx.ALIGN_BOTTOM | wx.BOTTOM | wx.EXPAND | wx.LEFT | wx.RIGHT, 10)
else:
main.Add(self.buttonPanel, 0, wx.BOTTOM | wx.EXPAND | wx.LEFT | wx.RIGHT, 10)
self.SetSizer(main)
self.Layout()
self.Centre()
def _populateList(self):
idx = 0
if self.ignored_entities == []:
return
if len(self.ignored_entities) > 1:
self.ignored_entities = sortarray.sort_array(self.ignored_entities, 2, 1)
for i in self.ignored_entities:
index = self.ignoredList.InsertItem(idx, i[1])
self.ignoredList.SetItem(index, 1, str(i[0]))
self.ignoredList.SetItem(index, 2, i[2])
idx += 1
def OnHook(self, event):
if event.GetKeyCode() == wx.WXK_ESCAPE:
self.OnCancel(event)
if event.GetKeyCode() == wx.WXK_RETURN:
self.OnApply(event)
else:
event.Skip()
def OnApply(self, event):
num = self.ignoredList.GetItemCount()
for i in range(num):
if self.ignoredList.IsChecked(i):
id = int(self.ignoredList.GetItemText(i, 1))
n = 0
for r in self.ignored_entities:
if r[0] == id:
del self.ignored_entities[n]
n += 1
config.OPTIONS_OBJECT.Set("ignoredList", self.ignored_entities)
self.Parent.updateList(config.OPTIONS_OBJECT.Get("outlist"))
self.Close()
def OnCancel(self, event):
if config.OPTIONS_OBJECT.Get("StayOnTop", True):
self.Parent.ToggleWindowStyle(wx.STAY_ON_TOP)
self.Close()
def showIgnoreDialog(parent, evt=None):
app = wx.App(False)
frame = IgnoreDialog(parent=parent)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
| 38.770701 | 121 | 0.617546 |
6d7d207975aa8f93e264abbeee4e37969f9d553f | 393 | py | Python | todjando/wsgi.py | jerhow/todjando | 38d9039ec81425e07f7e031c3fcd1e7c31a40d29 | [
"MIT"
] | null | null | null | todjando/wsgi.py | jerhow/todjando | 38d9039ec81425e07f7e031c3fcd1e7c31a40d29 | [
"MIT"
] | null | null | null | todjando/wsgi.py | jerhow/todjando | 38d9039ec81425e07f7e031c3fcd1e7c31a40d29 | [
"MIT"
] | null | null | null | """
WSGI config for todjando project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'todjando.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
903db9a9ddfc85fe40e096e565bf82aba1d383a3 | 1,946 | py | Python | aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/DescribeCustomMetricListRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/DescribeCustomMetricListRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/DescribeCustomMetricListRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeCustomMetricListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'DescribeCustomMetricList','cms')
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_MetricName(self):
return self.get_query_params().get('MetricName')
def set_MetricName(self,MetricName):
self.add_query_param('MetricName',MetricName)
def get_Dimension(self):
return self.get_query_params().get('Dimension')
def set_Dimension(self,Dimension):
self.add_query_param('Dimension',Dimension)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_Md5(self):
return self.get_query_params().get('Md5')
def set_Md5(self,Md5):
self.add_query_param('Md5',Md5) | 32.433333 | 83 | 0.755396 |
c2ac64c028ccfa837d8d90119da88bc174c623ef | 3,290 | py | Python | tests/generate_test_pipelines.py | ramitsurana/zenml | 6994ea7ec0a38e9495306d55c3069107cdd5e65f | [
"Apache-2.0"
] | 1 | 2021-08-13T03:07:53.000Z | 2021-08-13T03:07:53.000Z | tests/generate_test_pipelines.py | ramitsurana/zenml | 6994ea7ec0a38e9495306d55c3069107cdd5e65f | [
"Apache-2.0"
] | null | null | null | tests/generate_test_pipelines.py | ramitsurana/zenml | 6994ea7ec0a38e9495306d55c3069107cdd5e65f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) maiot GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
from pathlib import Path
import zenml
from zenml.datasources import CSVDatasource
from zenml.exceptions import AlreadyExistsException
from zenml.logger import get_logger
from zenml.pipelines import TrainingPipeline
from zenml.repo import GlobalConfig
from zenml.repo import Repository
from zenml.steps.preprocesser import StandardPreprocesser
from zenml.steps.split import CategoricalDomainSplit
from zenml.steps.trainer import TFFeedForwardTrainer
from zenml.utils import path_utils
logger = get_logger(__name__)
# reset pipeline root to redirect to tests so that it writes the yamls there
ZENML_ROOT = str(Path(zenml.__path__[0]).parent)
TEST_ROOT = os.path.join(ZENML_ROOT, "tests")
# Set analytics to false BEFORE init_repo
global_config = GlobalConfig.get_instance()
global_config.set_analytics_opt_in(False)
Repository.init_repo(TEST_ROOT, analytics_opt_in=False)
pipeline_root = os.path.join(TEST_ROOT, "pipelines")
csv_root = os.path.join(TEST_ROOT, "test_data")
image_root = os.path.join(csv_root, "images")
repo: Repository = Repository.get_instance()
if path_utils.is_dir(pipeline_root):
path_utils.rm_dir(pipeline_root)
repo.zenml_config.set_pipelines_dir(pipeline_root)
try:
for i in range(1, 6):
training_pipeline = TrainingPipeline(name='csvtest{0}'.format(i))
try:
# Add a datasource. This will automatically track and version it.
ds = CSVDatasource(name='my_csv_datasource',
path=os.path.join(csv_root, "my_dataframe.csv"))
except AlreadyExistsException:
ds = repo.get_datasource_by_name("my_csv_datasource")
training_pipeline.add_datasource(ds)
# Add a split
training_pipeline.add_split(CategoricalDomainSplit(
categorical_column="name",
split_map={'train': ["arnold"],
'eval': ["lülük"],
'test': ["nicholas"]}))
# Add a preprocessing unit
training_pipeline.add_preprocesser(
StandardPreprocesser(
features=["name", "age"],
labels=['gpa'],
overwrite={'gpa': {
'transform': [
{'method': 'no_transform', 'parameters': {}}]}}
))
# Add a trainer
training_pipeline.add_trainer(TFFeedForwardTrainer(
batch_size=1,
loss='binary_crossentropy',
last_activation='sigmoid',
output_units=1,
metrics=['accuracy'],
epochs=i))
# Run the pipeline locally
training_pipeline.run()
except Exception as e:
logger.error(e)
| 35 | 79 | 0.682675 |
7cc9e77c46d752ae7017078317b72fc159a5e5eb | 823 | py | Python | p2p/forms.py | kutuhal/oracle-r12-accounting | 4f21368aa3ecbc6c645691f1917c0df6c6f2cbf2 | [
"BSD-3-Clause"
] | null | null | null | p2p/forms.py | kutuhal/oracle-r12-accounting | 4f21368aa3ecbc6c645691f1917c0df6c6f2cbf2 | [
"BSD-3-Clause"
] | null | null | null | p2p/forms.py | kutuhal/oracle-r12-accounting | 4f21368aa3ecbc6c645691f1917c0df6c6f2cbf2 | [
"BSD-3-Clause"
] | null | null | null | from django import forms
class P2PForm(forms.Form):
ITEM_TYPES = ( ('Expense','Expense'), ('Inventory','Inventory'))
YES_OR_NO = ((True, 'Yes'), (False, 'No'))
item_type = forms.ChoiceField( label = 'Item Type' ,initial='Expense',
choices=ITEM_TYPES,
widget = forms.Select( attrs={'class': 'form-control'}))
period_end_accrual = forms.ChoiceField(label = 'Period End Accrual', initial=False, required= False,
choices= YES_OR_NO,
widget = forms.Select (attrs={'class': 'form-control'},
))
allow_recon_accounting = forms.ChoiceField(label = 'Allow Reconciliation Accounting', initial=False, required= False,
choices= YES_OR_NO,
widget = forms.Select( attrs={'class': 'form-control'} ))
| 48.411765 | 124 | 0.606318 |
55b5b304aed17ce07ec68a8ff6b39b11eb134806 | 656 | py | Python | tools/create_classes.py | jpsantos-mf/ezdxf | 2b542a551b2cfc3c0920a5dbf302ff58cea90fbd | [
"MIT"
] | 1 | 2021-06-05T09:15:15.000Z | 2021-06-05T09:15:15.000Z | tools/create_classes.py | jpsantos-mf/ezdxf | 2b542a551b2cfc3c0920a5dbf302ff58cea90fbd | [
"MIT"
] | null | null | null | tools/create_classes.py | jpsantos-mf/ezdxf | 2b542a551b2cfc3c0920a5dbf302ff58cea90fbd | [
"MIT"
] | null | null | null | from pathlib import Path
import ezdxf
FILENAME = Path('xxx.dxf')
doc = ezdxf.readfile(FILENAME) if FILENAME.exists() else ezdxf.new('R2018')
FMT = """ '{name}': ['{cpp}', '{app}', {flags}, {proxy}, {entity}],\n"""
with open('class_definitions.txt', mode='wt') as f:
f.write('CLASSES = {\n')
for cls in doc.classes:
f.write(FMT.format(
name=cls.dxf.name,
cpp=cls.dxf.cpp_class_name,
app=cls.dxf.app_name,
flags=cls.get_dxf_attrib('flags', 0),
proxy=cls.get_dxf_attrib('was_a_proxy', 0),
entity=cls.get_dxf_attrib('is_an_entity', 0),
))
f.write('}\n')
| 31.238095 | 75 | 0.579268 |
4721ac1127ea0b4f0336b616720b14c5a59c7e11 | 12,092 | py | Python | build_model_from_species/src/build_model_from_species_BB/assets/build_model_from_species/pypath_functions.py | PerMedCoE/BuildingBlocks | 975780dee7cd6f519238933555b01ed1c46768ee | [
"Apache-2.0"
] | 1 | 2022-02-14T11:14:59.000Z | 2022-02-14T11:14:59.000Z | build_model_from_species/src/build_model_from_species_BB/assets/build_model_from_species/pypath_functions.py | PerMedCoE/BuildingBlocks | 975780dee7cd6f519238933555b01ed1c46768ee | [
"Apache-2.0"
] | null | null | null | build_model_from_species/src/build_model_from_species_BB/assets/build_model_from_species/pypath_functions.py | PerMedCoE/BuildingBlocks | 975780dee7cd6f519238933555b01ed1c46768ee | [
"Apache-2.0"
] | null | null | null | import omnipath as op
from pypath.utils import mapping
import igraph
import itertools
import os
def generate_dict(gene_list, graph):
gene_dict = {}
for gene in gene_list:
name1= list(mapping.map_name(gene, 'genesymbol', 'uniprot'))[0]
name2 = list(mapping.map_name(name1, 'uniprot', 'genesymbol'))[0]
if len(name1) != 0:
try:
graph.vs.find(name=name1)
gene_dict[name2] = name1
except:
try:
name2 = list(mapping.map_name(gene, 'uniprot', 'genesymbol'))[0]
name1 = list(mapping.map_name(name2, 'genesymbol', 'uniprot'))[0]
graph.vs.find(name=name1)
gene_dict[name2] = name1
except:
print(name1, " not present in the databases")
else:
print("Can't find genesymbol: ", name1, " try to look on genecards for other names")
return gene_dict
# sometimes the function above is not working, here is an alternative using directly the omnipath database:
def get_code_from_annotations(gene_list):
HPA_compare = op.requests.Annotations.get(
proteins=gene_list,
resources='HPA_tissue'
)
genes_code = HPA_compare.groupby(['uniprot', 'genesymbol']).size().reset_index()
dict_gene_code = {}
for i in range(len(genes_code.index)):
# print(genes_code.at[i, 'uniprot'], genes_code.at[i, 'genesymbol'])
dict_gene_code[genes_code.at[i, 'genesymbol']] = genes_code.at[i, 'uniprot']
return dict_gene_code
def load_network_from_pickle(pw_legacy, pickle):
path_to_pickle = os.path.isfile(pickle)
if path_to_pickle == False:
print("Path error: No pickle file found")
return
pw_legacy.init_network(pickle_file=pickle)
return pw_legacy.graph
#the function below returns an average between the possible interections
#taken from the different databases
def get_consensus_edges(direction, gene_dict):
a = direction.consensus_edges()
code1 = a[0][0]
code2 = a[0][1]
for symbol, code in gene_dict.items():
#print(code, code1)
if code==code1:
a[0][0] = symbol
for symbol, code in gene_dict.items():
if code==code2:
a[0][1] = symbol
return a
#it is possible to visualize the dataframe containing all the informations about each edge:
def show_edge_dataframe(graph, gene_dict):
df = graph.get_edge_dataframe()
df_vert = graph.get_vertex_dataframe()
for gene in gene_dict.keys():
df_vert = df_vert.replace(gene_dict[gene], gene)
df['source'].replace(df_vert['name'], inplace=True)
df['target'].replace(df_vert['name'], inplace=True)
df_vert.set_index('name', inplace=True) # Optional
return df
#the following function is similar to complete_dict, but I am adding the interactions JUST for the node that are NOT already connected
def complete_connection(graph, gene_dict, depth, pw_legacy):
list_genes = list(gene_dict.keys())
for node in graph.vs:
if node.degree() == 0: #select the node with degree == 0
for node2 in graph.vs:
if node2 == node: #loop again for all the other nodes but exclude the node already selected
continue
else:
node_1 = pw_legacy.vs.find(label=node['label'])
node_2 = pw_legacy.vs.find(label=node2['label'])
for paths in pw_legacy.find_all_paths(node_1.index, node_2.index, mode='ALL',
maxlen=depth): # do not use graph index, for each graph the indexes are different
#print(paths)
for i in range(1, len(paths) - 1):
if str(pw_legacy.vs[paths[i]]['name'])[:7] == "COMPLEX":
break
elif pw_legacy.vs[paths[i]]['label'] in list_genes:
break
else:
#print(pw_legacy.vs[paths[i]]['label'], end=' ')
list_genes.append(pw_legacy.vs[paths[i]]['label'])
#new_dict[pw_legacy.vs[paths[i]]['label']] = \
#list(mapping.map_name(pw_legacy.vs[paths[i]]['label'], 'genesymbol', 'uniprot'))[0]
new_dict = generate_dict(list_genes, pw_legacy) #print('\n')
return new_dict
def get_complete_dict(graph, gene_dict, depth, pw_legacy):
complete_dict = gene_dict.copy()
for node1, node2 in itertools.combinations(graph.vs, 2):
#print(graph.are_connected(gene_dict[node1['label']], gene_dict[node2['label']]))
#path = graph.get_all_shortest_paths(gene_dict[node1['label']], gene_dict[node2['label']])
dist = graph.shortest_paths(gene_dict[node1['label']], gene_dict[node2['label']], mode='all')
#print(path)
if dist[0][0] > depth: # if node disconnected, the distance is inf which should be > depth
node_1 = pw_legacy.vs.find(label=node1['label'])
node_2 = pw_legacy.vs.find(label=node2['label'])
for paths in pw_legacy.find_all_paths(node_1.index, node_2.index, mode='ALL', maxlen=depth): #do not use graph index, for each graph the indexes are different
#print(paths)
for i in range(1, len(paths)-1):
if str(pw_legacy.vs[paths[i]]['name'])[:7] == "COMPLEX":
break
if pw_legacy.vs[paths[i]]['label'] in complete_dict.keys():
break
else:
print(pw_legacy.vs[paths[i]]['label'], end=' ')
complete_dict[pw_legacy.vs[paths[i]]['label']] = list(mapping.map_name(pw_legacy.vs[paths[i]]['label'], 'genesymbol', 'uniprot'))[0]
#print('\n')
return complete_dict
def filter_by_node_degree(graph, degree, pw_legacy):
filtered_dict = {}
label_tmp = [node if d > degree else '\n' for node, d in zip(graph.vs['label'], graph.degree())]
labels = [label for label in label_tmp if label != '\n']
for node in labels:
filtered_dict[node] = list(mapping.map_name(node, 'genesymbol', 'uniprot'))[0]
subg = pw_legacy.graph.induced_subgraph([pw_legacy.vs.find(name = filtered_dict[e]) for e in filtered_dict.keys()])
graph_obj = igraph.plot(subg, target='network_degree_filtered.pdf',
layout=subg.layout_auto(),
vertex_size=subg.degree(), edge_width=0.3, edge_color='purple',
vertex_color='#97BE73', vertex_frame_width=0,
vertex_label_size=7,
vertex_label_color='red', inline=True, margin=20)
return graph_obj
#if I want to know which are the neighbors of a particular node in the subgraph that I just built:
def search_neigh_interactions(gene, gene_dict, graph):
for neigh in graph.neighbors(gene_dict[gene]):
eid = graph.get_eid(gene_dict[gene], neigh)
print(gene, ' -- ', graph.vs[neigh]['label'])
print(graph.es[eid]['dirs'])
print(graph.es[eid]['references'])
#with this function, you can select two node of one of your graph, and check which are the shortest paths
#that link the two node, and then plot them
def write_bnet(graph, gene_dict, name="logic_formula.bnet"):
# database = ['SIGNOR', "Adhesome"] # ==> insert name of the database
edge_df = show_edge_dataframe(graph, gene_dict)
# df_signor = edge_df[pd.DataFrame(edge_df.sources.tolist()).isin(database).any(1).values] # I have filtered the dictionary to have directed interaction from signor
node_list = []
for element in edge_df["attrs"]:
if element.consensus_edges() != []:
node_list.append(element.consensus_edges()[0][
0].label) # I am storing into a list the labels (genesymbol) of the genes in "sources", I will use it later
node_list.append(element.consensus_edges()[0][
1].label) # I am storing into a list the labels (genesymbol) of the genes in "target", I will use it later
node_list = list(dict.fromkeys(
node_list)) # now I have collected in a list all the genes that I have found with directed interactions and without duplicates
# print(node_list)
with open(name, "w") as f:
# f.write("# model in BoolNet format\n")
# f.write("# the header targets, factors is mandatory to be importable in the R package BoolNet\n")
# f.write("\n")
# f.write(
# "targets, factors\n") # this is the standard label that I found in the biolqm github, is it ok? lo scopriremo solo vivendo
for node in node_list:
formula_ON = []
formula_OFF = []
for element in edge_df["attrs"]:
interactions = element.consensus_edges()
for interaction in interactions: # iterate over the possible interactions
if interaction: # I don't know why, but some interactions are empty... so I am using this to skip the empty interactions
if interaction[
1].label == node: # that's tricky one... when you write a bnet file (gene, formula) the gene is not the source, but the target! so I have to iterate between the targets
# print(element.consensus_edges()[0][1].label, " ", node ) # used to check
if interaction[2] == "directed" and interaction[
3] == "positive": # checking if the interaction is positive
source = interaction[0].label # if it is, store the source of the positive interaction
formula_ON.append(source) # append the gene into the list
elif interaction[2] == "directed" and interaction[
3] == "negative": # checking if the interaction is negative
source = interaction[0].label # storing
formula_OFF.append(source) # append it to the formula with "!"
else:
print("there is an undirected interaction that was dismissed: ", interaction[0].label,
" and ", interaction[1].label) # this should never happen, ma non si sa mai...
formula = formula_ON + formula_OFF
commons = list(set(formula_ON).intersection(set(formula_OFF)))
# print(shared)
for common in commons:
print("Two possible opposite interactions found for: ", common, " and ", node)
formula_OFF.remove(common)
f.write(node + ",")
offset = 16 - len(node) # nice offset so the visualization is understandable
f.write(" " * offset)
if not formula:
f.write(" ( ")
f.write(node)
f.write(" ) ")
f.write("\n")
if formula_ON:
f.write(" ( ")
f.write(" | ".join(formula_ON)) # writing the first parenthesis with all the positive interactions
f.write(" ) ")
if not formula_OFF:
f.write("\n")
if formula_ON != [] and formula_OFF != []:
f.write(" & ")
f.write(" !( ")
f.write(" | ".join(formula_OFF)) # writing the first parenthesis with all the positive interactions
f.write(" ) ")
f.write("\n")
if formula_ON == [] and formula_OFF != []:
f.write(" !( ")
f.write(" | ".join(formula_OFF)) # writing the first parenthesis with all the positive interactions
f.write(" ) ")
f.write("\n")
f.close # good to go
return
| 51.675214 | 197 | 0.578234 |
ccb196c86af70b0671efd39c18972e498e4829b4 | 11,538 | py | Python | sqlshare_rest/util/query_queue.py | uw-it-aca/sqlshare-rest | e441ce9286a915586a68a0bfa3105f122d6ae18f | [
"Apache-2.0"
] | null | null | null | sqlshare_rest/util/query_queue.py | uw-it-aca/sqlshare-rest | e441ce9286a915586a68a0bfa3105f122d6ae18f | [
"Apache-2.0"
] | 83 | 2015-05-19T21:47:15.000Z | 2018-03-06T14:40:38.000Z | sqlshare_rest/util/query_queue.py | uw-it-aca/sqlshare-rest | e441ce9286a915586a68a0bfa3105f122d6ae18f | [
"Apache-2.0"
] | 1 | 2015-03-31T22:06:55.000Z | 2015-03-31T22:06:55.000Z | from sqlshare_rest.util.db import get_backend
from sqlshare_rest.models import Query
from sqlshare_rest.dao.dataset import reset_dataset_account_access
from django.utils import timezone
from django.conf import settings
from django import db
from time import sleep
from sqlshare_rest.util.queue_triggers import trigger_query_queue_processing
from sqlshare_rest.util.queue_triggers import QUERY_QUEUE_PORT_NUMBER
from sqlshare_rest.logger import getLogger
from django.db.utils import DatabaseError
from decimal import Decimal
import datetime
import atexit
import signal
import json
import time
import sys
import os
import socket
from threading import Thread
import six
TERMINATE_TRIGGER_FILE = getattr(settings,
"SQLSHARE_TERMINATE_QUERY_QUEUE_PATH",
"/tmp/sqlshare_terminate_query_queue")
def process_queue(thread_count=0, run_once=True, verbose=False):
# Make sure only one instance is running at a time:
if trigger_query_queue_processing():
return
def start_query(query, background=True):
query.is_started = True
query.save()
query_id = query.pk
if background:
from django.db import connection
connection.close()
pid1 = os.fork()
if pid1:
os.waitpid(pid1, 0)
# This is the main process
return
os.setsid()
if os.fork():
# Double fork the daemon
sys.exit(0)
try:
process_query(query_id)
except Exception as ex:
try:
query = Query.objects.get(pk=query_id)
query.has_error = True
query.error = str(ex)
query.is_finished = True
query.save()
except:
# That try is just trying to get info out to the user, it's
# relatively ok if that fails
pass
logger = getLogger(__name__)
logger.error("Error on %s: %s" % (query_id, str(ex)))
if background:
sys.exit(0)
def get_column_names_from_cursor(cursor):
index = 0
names = []
for col in cursor.description:
index += 1
column_name = col[0]
if column_name == "":
column_name = "COLUMN%s" % index
names.append(column_name)
return names
def custom_encode(obj):
if isinstance(obj, datetime.datetime):
return str(obj)
def process_query(query_id):
logger = getLogger(__name__)
query = Query.objects.get(pk=query_id)
# queries can be cancelled before we see them. clean it up now.
if query.terminated:
query.is_finished = True
query.has_error = True
query.error = "Query cancelled"
query.save()
return
pid = os.getpid()
query.process_queue_id = pid
query.save()
msg = "Processing query id %s, in process %s" % (
query.pk,
pid
)
logger.info(msg)
if verbose:
print(msg)
user = query.owner
row_count = 0
backend = get_backend()
try:
start = timezone.now()
query_plan = backend.get_query_plan(query.sql, user)
t1 = time.time()
sql = query.sql
if query.is_ui_preview:
sql = backend.get_preview_sql_for_query(sql)
cursor = backend.run_query(sql,
user,
return_cursor=True,
query=query)
t2 = time.time()
try:
all_data = []
decimal_test = type(Decimal())
def map_decimal(d):
if type(d) == decimal_test:
return str(d)
return d
for row in cursor:
# Need to manually map Decimal values to strings,
# otherwise json turns them into None
all_data.append(map(map_decimal, list(row)))
row_count += 1
columns = get_column_names_from_cursor(cursor)
formatted = json.dumps({"columns": columns, "data": all_data},
default=custom_encode)
query.preview_content = formatted
t3 = time.time()
query.query_time = t2-t1
query.total_time = t3-t1
query.query_plan = query_plan
query.save()
end = timezone.now()
except:
raise
except Exception as ex:
msg = "Error running query %s: %s" % (query.pk,
str(ex))
logger.error(msg)
query.has_error = True
query.error = str(ex)
finally:
backend.close_user_connection(user)
try:
query.is_finished = True
query.date_finished = timezone.now()
query.rows_total = row_count
query.save()
if query.is_preview_for:
dataset = query.is_preview_for
dataset.preview_is_finished = True
dataset.preview_error = query.error
# Make sure all current users can see the preview table
reset_dataset_account_access(dataset)
dataset.save()
except Exception as ex:
logger.error("Error: %s" % str(ex))
msg = "Finished query id %s." % query.pk
logger.info(msg)
def periodic_check():
"""
Every 5 seconds, do a check for new queries. Just in case something
needs processing, but didn't call trigger_processing() itself.
"""
logger = getLogger(__name__)
while True:
sleep(5)
msg = "Triggering periodic processing."
logger.debug(msg)
if verbose:
print(msg)
trigger_query_queue_processing()
filtered = Query.objects.filter(is_started=False)
if run_once:
try:
oldest_query = filtered.order_by('id')[:1].get()
except Query.DoesNotExist:
return
start_query(oldest_query, background=False)
else:
# Start with any queries already in the queue:
for query in filtered:
start_query(query)
# Just in case things get off the rails - maybe a connection to the
# server gets blocked? - periodically trigger a check for new queries
kicker = Thread(target=periodic_check)
kicker.setDaemon(True)
kicker.start()
# Start the socket server for getting notifications of new queries
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Make it so we can run the server right away after killing it
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('localhost', QUERY_QUEUE_PORT_NUMBER))
# Make sure we close our socket when we're killed.
def close_socket():
server.close()
atexit.register(close_socket)
def kill_query(query):
logger = getLogger(__name__)
pid = query.process_queue_id
query.is_started = True
query.is_finished = True
query.has_error = True
query.error = "Query cancelled"
query.save()
try:
backend = get_backend()
backend.kill_query(query)
except:
# This is optional
pass
logger.info("Cancelling query: %s" % query.pk)
os.kill(pid, signal.SIGKILL)
server.listen(5)
while True:
(clientsocket, address) = server.accept()
# Check to see if we should exit...
if os.path.isfile(TERMINATE_TRIGGER_FILE):
sys.exit(0)
# We don't actually have a protocol to speak...
clientsocket.close()
try:
terminate_list = Query.objects.filter(terminated=True,
is_finished=False)
for query in terminate_list:
kill_query(query)
queries = Query.objects.filter(is_started=False)
for query in queries:
start_query(query)
except Exception as ex:
# This was originally DatabaseError - but then there were also
# pyodbc.Error exceptions... and pyodbc isn't a hard
# requirement.
ex_str = str(ex)
# If there's just, say, a network glitch, carry on.
# Or, say, a server restart
# If it's anything else, re-raise the error.
is_reset_error = False
if ex_str.find("Read from the server failed") >= 0:
is_reset_error = True
if ex_str.find("Write to the server failed") >= 0:
is_reset_error = True
if ex_str.find("Communication link failure") >= 0:
is_reset_error = True
adaptive = "Adaptive Server is unavailable or does not exist"
if ex_str.find(adaptive) >= 0:
is_reset_error = True
if is_reset_error:
try:
db.close_old_connections()
except Exception as ex:
ex_str = str(ex)
is_expected = False
rollback_err = "Could not perform COMMIT or ROLLBACK"
if ex_str.find(rollback_err) >= 0:
# db.close_connection tries to end transactions
# pyodbc was absolutely unable to recover from that
# because it wasn't reconnecting to be able to do
# the rollback...
from django.db import connections
for conn in connections:
connections[conn].close()
else:
logger = getLogger(__name__)
logger.error("Error in query queue: %s" % ex_str)
raise
else:
logger = getLogger(__name__)
logger.error("Error in query queue: %s" % ex_str)
raise
def kill_query_queue():
# Create the file that triggers the termination
f = open(TERMINATE_TRIGGER_FILE, "w")
f.write("OK")
f.close()
# Trigger the check...
trigger_query_queue_processing()
# Just a quick pause before polling
time.sleep(0.3)
# Poll to see if the process is still running...
for i in range(10):
try:
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('localhost', QUERY_QUEUE_PORT_NUMBER))
time.sleep(1)
except socket.error as ex:
os.remove(TERMINATE_TRIGGER_FILE)
return True
os.remove(TERMINATE_TRIGGER_FILE)
return False
| 33.540698 | 79 | 0.530855 |
492019881d10c6aa0734497e5236ed9537bd35b4 | 330 | py | Python | Mundo2/ex053.py | NOBarbosa/Exercicios_Python | f5769d331a38030281b351ee8812e7f75fcf0f87 | [
"MIT"
] | null | null | null | Mundo2/ex053.py | NOBarbosa/Exercicios_Python | f5769d331a38030281b351ee8812e7f75fcf0f87 | [
"MIT"
] | null | null | null | Mundo2/ex053.py | NOBarbosa/Exercicios_Python | f5769d331a38030281b351ee8812e7f75fcf0f87 | [
"MIT"
] | null | null | null | #Detector de Palíndromo
frase = str(input('Digite uma frase: ')).strip().upper()
palavra = frase.split()
junto = ''.join(palavra)
inverso = ''
for letra in range(len(junto)-1, -1, -1):
inverso += junto[letra]
if junto == inverso:
print('{} é palindromo'.format(frase))
else:
print('{} não é palindromo'.format(frase)) | 27.5 | 56 | 0.651515 |
c4e797972787c68522a3bcf1b2eeee9236434877 | 1,147 | py | Python | configs/deepim/lmCropBlenderSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_lmCropBlender_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_Blender_06_cat.py | THU-DA-6D-Pose-Group/self6dpp | c267cfa55e440e212136a5e9940598720fa21d16 | [
"Apache-2.0"
] | 33 | 2021-12-15T07:11:47.000Z | 2022-03-29T08:58:32.000Z | configs/deepim/lmCropBlenderSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_lmCropBlender_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_Blender_06_cat.py | THU-DA-6D-Pose-Group/self6dpp | c267cfa55e440e212136a5e9940598720fa21d16 | [
"Apache-2.0"
] | 3 | 2021-12-15T11:39:54.000Z | 2022-03-29T07:24:23.000Z | configs/deepim/lmCropBlenderSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_lmCropBlender_SO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_Blender_06_cat.py | THU-DA-6D-Pose-Group/self6dpp | c267cfa55e440e212136a5e9940598720fa21d16 | [
"Apache-2.0"
] | null | null | null | _base_ = "./FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_Blender_01_ape.py"
OUTPUT_DIR = "output/deepim/lmCropBlenderSO/FlowNet512_1.5AugCosyAAEGray_AggressiveV2_Flat_lmCropBlender_SO/cat"
DATASETS = dict(TRAIN=("lm_blender_cat_train",), TEST=("lm_crop_cat_test",))
# iter0
# objects cat Avg(1)
# ad_2 2.97 2.97
# ad_5 22.46 22.46
# ad_10 35.17 35.17
# rete_2 22.46 22.46
# rete_5 50.42 50.42
# rete_10 66.53 66.53
# re_2 24.58 24.58
# re_5 66.53 66.53
# re_10 84.32 84.32
# te_2 38.56 38.56
# te_5 53.39 53.39
# te_10 69.49 69.49
# proj_2 16.95 16.95
# proj_5 73.73 73.73
# proj_10 91.95 91.95
# re 9.27 9.27
# te 0.07 0.07
# iter4
# objects cat Avg(1)
# ad_2 11.44 11.44
# ad_5 55.08 55.08
# ad_10 83.47 83.47
# rete_2 67.37 67.37
# rete_5 93.64 93.64
# rete_10 96.61 96.61
# re_2 70.76 70.76
# re_5 93.64 93.64
# re_10 96.61 96.61
# te_2 90.68 90.68
# te_5 97.03 97.03
# te_10 97.03 97.03
# proj_2 79.24 79.24
# proj_5 95.76 95.76
# proj_10 96.61 96.61
# re 5.52 5.52
# te 0.02 0.02
| 26.068182 | 112 | 0.600697 |
65defea801213cf3208ece53c555f0c85369ce5f | 36,440 | py | Python | test/dialect/postgresql/test_query.py | atugushev/sqlalchemy | 3cb614009ee87a115ec7230949c031402efb17c1 | [
"MIT"
] | 1 | 2020-02-08T20:04:42.000Z | 2020-02-08T20:04:42.000Z | test/dialect/postgresql/test_query.py | atugushev/sqlalchemy | 3cb614009ee87a115ec7230949c031402efb17c1 | [
"MIT"
] | null | null | null | test/dialect/postgresql/test_query.py | atugushev/sqlalchemy | 3cb614009ee87a115ec7230949c031402efb17c1 | [
"MIT"
] | null | null | null | # coding: utf-8
import datetime
from sqlalchemy import and_
from sqlalchemy import bindparam
from sqlalchemy import Column
from sqlalchemy import Date
from sqlalchemy import DateTime
from sqlalchemy import exc
from sqlalchemy import extract
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import literal
from sqlalchemy import literal_column
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import Time
from sqlalchemy import tuple_
from sqlalchemy.dialects import postgresql
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import AssertsExecutionResults
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertsql import CursorSQL
from sqlalchemy.testing.assertsql import DialectSQL
matchtable = cattable = None
class InsertTest(fixtures.TestBase, AssertsExecutionResults):
__only_on__ = "postgresql"
__backend__ = True
@classmethod
def setup_class(cls):
cls.metadata = MetaData(testing.db)
def teardown(self):
self.metadata.drop_all()
self.metadata.clear()
def test_compiled_insert(self):
table = Table(
"testtable",
self.metadata,
Column("id", Integer, primary_key=True),
Column("data", String(30)),
)
self.metadata.create_all()
ins = table.insert(
inline=True, values={"data": bindparam("x")}
).compile()
ins.execute({"x": "five"}, {"x": "seven"})
eq_(table.select().execute().fetchall(), [(1, "five"), (2, "seven")])
def test_foreignkey_missing_insert(self):
Table("t1", self.metadata, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
self.metadata,
Column("id", Integer, ForeignKey("t1.id"), primary_key=True),
)
self.metadata.create_all()
# want to ensure that "null value in column "id" violates not-
# null constraint" is raised (IntegrityError on psycoopg2, but
# ProgrammingError on pg8000), and not "ProgrammingError:
# (ProgrammingError) relationship "t2_id_seq" does not exist".
# the latter corresponds to autoincrement behavior, which is not
# the case here due to the foreign key.
for eng in [
engines.testing_engine(options={"implicit_returning": False}),
engines.testing_engine(options={"implicit_returning": True}),
]:
with expect_warnings(
".*has no Python-side or server-side default.*"
):
assert_raises(
(exc.IntegrityError, exc.ProgrammingError),
eng.execute,
t2.insert(),
)
def test_sequence_insert(self):
table = Table(
"testtable",
self.metadata,
Column("id", Integer, Sequence("my_seq"), primary_key=True),
Column("data", String(30)),
)
self.metadata.create_all()
self._assert_data_with_sequence(table, "my_seq")
@testing.requires.returning
def test_sequence_returning_insert(self):
table = Table(
"testtable",
self.metadata,
Column("id", Integer, Sequence("my_seq"), primary_key=True),
Column("data", String(30)),
)
self.metadata.create_all()
self._assert_data_with_sequence_returning(table, "my_seq")
def test_opt_sequence_insert(self):
table = Table(
"testtable",
self.metadata,
Column(
"id",
Integer,
Sequence("my_seq", optional=True),
primary_key=True,
),
Column("data", String(30)),
)
self.metadata.create_all()
self._assert_data_autoincrement(table)
@testing.requires.returning
def test_opt_sequence_returning_insert(self):
table = Table(
"testtable",
self.metadata,
Column(
"id",
Integer,
Sequence("my_seq", optional=True),
primary_key=True,
),
Column("data", String(30)),
)
self.metadata.create_all()
self._assert_data_autoincrement_returning(table)
def test_autoincrement_insert(self):
table = Table(
"testtable",
self.metadata,
Column("id", Integer, primary_key=True),
Column("data", String(30)),
)
self.metadata.create_all()
self._assert_data_autoincrement(table)
@testing.requires.returning
def test_autoincrement_returning_insert(self):
table = Table(
"testtable",
self.metadata,
Column("id", Integer, primary_key=True),
Column("data", String(30)),
)
self.metadata.create_all()
self._assert_data_autoincrement_returning(table)
def test_noautoincrement_insert(self):
table = Table(
"testtable",
self.metadata,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("data", String(30)),
)
self.metadata.create_all()
self._assert_data_noautoincrement(table)
def _assert_data_autoincrement(self, table):
engine = engines.testing_engine(options={"implicit_returning": False})
with self.sql_execution_asserter(engine) as asserter:
with engine.connect() as conn:
# execute with explicit id
r = conn.execute(table.insert(), {"id": 30, "data": "d1"})
eq_(r.inserted_primary_key, [30])
# execute with prefetch id
r = conn.execute(table.insert(), {"data": "d2"})
eq_(r.inserted_primary_key, [1])
# executemany with explicit ids
conn.execute(
table.insert(),
{"id": 31, "data": "d3"},
{"id": 32, "data": "d4"},
)
# executemany, uses SERIAL
conn.execute(table.insert(), {"data": "d5"}, {"data": "d6"})
# single execute, explicit id, inline
conn.execute(
table.insert(inline=True), {"id": 33, "data": "d7"}
)
# single execute, inline, uses SERIAL
conn.execute(table.insert(inline=True), {"data": "d8"})
asserter.assert_(
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
{"id": 30, "data": "d1"},
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
{"id": 1, "data": "d2"},
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
[{"id": 31, "data": "d3"}, {"id": 32, "data": "d4"}],
),
DialectSQL(
"INSERT INTO testtable (data) VALUES (:data)",
[{"data": "d5"}, {"data": "d6"}],
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
[{"id": 33, "data": "d7"}],
),
DialectSQL(
"INSERT INTO testtable (data) VALUES (:data)", [{"data": "d8"}]
),
)
with engine.connect() as conn:
eq_(
conn.execute(table.select()).fetchall(),
[
(30, "d1"),
(1, "d2"),
(31, "d3"),
(32, "d4"),
(2, "d5"),
(3, "d6"),
(33, "d7"),
(4, "d8"),
],
)
conn.execute(table.delete())
# test the same series of events using a reflected version of
# the table
m2 = MetaData(engine)
table = Table(table.name, m2, autoload=True)
with self.sql_execution_asserter(engine) as asserter:
with engine.connect() as conn:
conn.execute(table.insert(), {"id": 30, "data": "d1"})
r = conn.execute(table.insert(), {"data": "d2"})
eq_(r.inserted_primary_key, [5])
conn.execute(
table.insert(),
{"id": 31, "data": "d3"},
{"id": 32, "data": "d4"},
)
conn.execute(table.insert(), {"data": "d5"}, {"data": "d6"})
conn.execute(
table.insert(inline=True), {"id": 33, "data": "d7"}
)
conn.execute(table.insert(inline=True), {"data": "d8"})
asserter.assert_(
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
{"id": 30, "data": "d1"},
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
{"id": 5, "data": "d2"},
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
[{"id": 31, "data": "d3"}, {"id": 32, "data": "d4"}],
),
DialectSQL(
"INSERT INTO testtable (data) VALUES (:data)",
[{"data": "d5"}, {"data": "d6"}],
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
[{"id": 33, "data": "d7"}],
),
DialectSQL(
"INSERT INTO testtable (data) VALUES (:data)", [{"data": "d8"}]
),
)
with engine.connect() as conn:
eq_(
conn.execute(table.select()).fetchall(),
[
(30, "d1"),
(5, "d2"),
(31, "d3"),
(32, "d4"),
(6, "d5"),
(7, "d6"),
(33, "d7"),
(8, "d8"),
],
)
conn.execute(table.delete())
def _assert_data_autoincrement_returning(self, table):
engine = engines.testing_engine(options={"implicit_returning": True})
with self.sql_execution_asserter(engine) as asserter:
with engine.connect() as conn:
# execute with explicit id
r = conn.execute(table.insert(), {"id": 30, "data": "d1"})
eq_(r.inserted_primary_key, [30])
# execute with prefetch id
r = conn.execute(table.insert(), {"data": "d2"})
eq_(r.inserted_primary_key, [1])
# executemany with explicit ids
conn.execute(
table.insert(),
{"id": 31, "data": "d3"},
{"id": 32, "data": "d4"},
)
# executemany, uses SERIAL
conn.execute(table.insert(), {"data": "d5"}, {"data": "d6"})
# single execute, explicit id, inline
conn.execute(
table.insert(inline=True), {"id": 33, "data": "d7"}
)
# single execute, inline, uses SERIAL
conn.execute(table.insert(inline=True), {"data": "d8"})
asserter.assert_(
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
{"id": 30, "data": "d1"},
),
DialectSQL(
"INSERT INTO testtable (data) VALUES (:data) RETURNING "
"testtable.id",
{"data": "d2"},
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
[{"id": 31, "data": "d3"}, {"id": 32, "data": "d4"}],
),
DialectSQL(
"INSERT INTO testtable (data) VALUES (:data)",
[{"data": "d5"}, {"data": "d6"}],
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
[{"id": 33, "data": "d7"}],
),
DialectSQL(
"INSERT INTO testtable (data) VALUES (:data)", [{"data": "d8"}]
),
)
with engine.connect() as conn:
eq_(
conn.execute(table.select()).fetchall(),
[
(30, "d1"),
(1, "d2"),
(31, "d3"),
(32, "d4"),
(2, "d5"),
(3, "d6"),
(33, "d7"),
(4, "d8"),
],
)
conn.execute(table.delete())
# test the same series of events using a reflected version of
# the table
m2 = MetaData(engine)
table = Table(table.name, m2, autoload=True)
with self.sql_execution_asserter(engine) as asserter:
with engine.connect() as conn:
conn.execute(table.insert(), {"id": 30, "data": "d1"})
r = conn.execute(table.insert(), {"data": "d2"})
eq_(r.inserted_primary_key, [5])
conn.execute(
table.insert(),
{"id": 31, "data": "d3"},
{"id": 32, "data": "d4"},
)
conn.execute(table.insert(), {"data": "d5"}, {"data": "d6"})
conn.execute(
table.insert(inline=True), {"id": 33, "data": "d7"}
)
conn.execute(table.insert(inline=True), {"data": "d8"})
asserter.assert_(
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
{"id": 30, "data": "d1"},
),
DialectSQL(
"INSERT INTO testtable (data) VALUES (:data) RETURNING "
"testtable.id",
{"data": "d2"},
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
[{"id": 31, "data": "d3"}, {"id": 32, "data": "d4"}],
),
DialectSQL(
"INSERT INTO testtable (data) VALUES (:data)",
[{"data": "d5"}, {"data": "d6"}],
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
[{"id": 33, "data": "d7"}],
),
DialectSQL(
"INSERT INTO testtable (data) VALUES (:data)", [{"data": "d8"}]
),
)
with engine.connect() as conn:
eq_(
conn.execute(table.select()).fetchall(),
[
(30, "d1"),
(5, "d2"),
(31, "d3"),
(32, "d4"),
(6, "d5"),
(7, "d6"),
(33, "d7"),
(8, "d8"),
],
)
conn.execute(table.delete())
def _assert_data_with_sequence(self, table, seqname):
engine = engines.testing_engine(options={"implicit_returning": False})
with self.sql_execution_asserter(engine) as asserter:
with engine.connect() as conn:
conn.execute(table.insert(), {"id": 30, "data": "d1"})
conn.execute(table.insert(), {"data": "d2"})
conn.execute(
table.insert(),
{"id": 31, "data": "d3"},
{"id": 32, "data": "d4"},
)
conn.execute(table.insert(), {"data": "d5"}, {"data": "d6"})
conn.execute(
table.insert(inline=True), {"id": 33, "data": "d7"}
)
conn.execute(table.insert(inline=True), {"data": "d8"})
asserter.assert_(
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
{"id": 30, "data": "d1"},
),
CursorSQL("select nextval('my_seq')", consume_statement=False),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
{"id": 1, "data": "d2"},
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
[{"id": 31, "data": "d3"}, {"id": 32, "data": "d4"}],
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
":data)" % seqname,
[{"data": "d5"}, {"data": "d6"}],
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
[{"id": 33, "data": "d7"}],
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
":data)" % seqname,
[{"data": "d8"}],
),
)
with engine.connect() as conn:
eq_(
conn.execute(table.select()).fetchall(),
[
(30, "d1"),
(1, "d2"),
(31, "d3"),
(32, "d4"),
(2, "d5"),
(3, "d6"),
(33, "d7"),
(4, "d8"),
],
)
# cant test reflection here since the Sequence must be
# explicitly specified
def _assert_data_with_sequence_returning(self, table, seqname):
engine = engines.testing_engine(options={"implicit_returning": True})
with self.sql_execution_asserter(engine) as asserter:
with engine.connect() as conn:
conn.execute(table.insert(), {"id": 30, "data": "d1"})
conn.execute(table.insert(), {"data": "d2"})
conn.execute(
table.insert(),
{"id": 31, "data": "d3"},
{"id": 32, "data": "d4"},
)
conn.execute(table.insert(), {"data": "d5"}, {"data": "d6"})
conn.execute(
table.insert(inline=True), {"id": 33, "data": "d7"}
)
conn.execute(table.insert(inline=True), {"data": "d8"})
asserter.assert_(
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
{"id": 30, "data": "d1"},
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES "
"(nextval('my_seq'), :data) RETURNING testtable.id",
{"data": "d2"},
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
[{"id": 31, "data": "d3"}, {"id": 32, "data": "d4"}],
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
":data)" % seqname,
[{"data": "d5"}, {"data": "d6"}],
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (:id, :data)",
[{"id": 33, "data": "d7"}],
),
DialectSQL(
"INSERT INTO testtable (id, data) VALUES (nextval('%s'), "
":data)" % seqname,
[{"data": "d8"}],
),
)
with engine.connect() as conn:
eq_(
conn.execute(table.select()).fetchall(),
[
(30, "d1"),
(1, "d2"),
(31, "d3"),
(32, "d4"),
(2, "d5"),
(3, "d6"),
(33, "d7"),
(4, "d8"),
],
)
# cant test reflection here since the Sequence must be
# explicitly specified
def _assert_data_noautoincrement(self, table):
engine = engines.testing_engine(options={"implicit_returning": False})
with engine.connect() as conn:
conn.execute(table.insert(), {"id": 30, "data": "d1"})
with expect_warnings(
".*has no Python-side or server-side default.*"
):
assert_raises(
(exc.IntegrityError, exc.ProgrammingError),
conn.execute,
table.insert(),
{"data": "d2"},
)
with expect_warnings(
".*has no Python-side or server-side default.*"
):
assert_raises(
(exc.IntegrityError, exc.ProgrammingError),
conn.execute,
table.insert(),
{"data": "d2"},
{"data": "d3"},
)
with expect_warnings(
".*has no Python-side or server-side default.*"
):
assert_raises(
(exc.IntegrityError, exc.ProgrammingError),
conn.execute,
table.insert(),
{"data": "d2"},
)
with expect_warnings(
".*has no Python-side or server-side default.*"
):
assert_raises(
(exc.IntegrityError, exc.ProgrammingError),
conn.execute,
table.insert(),
{"data": "d2"},
{"data": "d3"},
)
conn.execute(
table.insert(),
{"id": 31, "data": "d2"},
{"id": 32, "data": "d3"},
)
conn.execute(table.insert(inline=True), {"id": 33, "data": "d4"})
eq_(
conn.execute(table.select()).fetchall(),
[(30, "d1"), (31, "d2"), (32, "d3"), (33, "d4")],
)
conn.execute(table.delete())
# test the same series of events using a reflected version of
# the table
m2 = MetaData(engine)
table = Table(table.name, m2, autoload=True)
with engine.connect() as conn:
conn.execute(table.insert(), {"id": 30, "data": "d1"})
with expect_warnings(
".*has no Python-side or server-side default.*"
):
assert_raises(
(exc.IntegrityError, exc.ProgrammingError),
conn.execute,
table.insert(),
{"data": "d2"},
)
with expect_warnings(
".*has no Python-side or server-side default.*"
):
assert_raises(
(exc.IntegrityError, exc.ProgrammingError),
conn.execute,
table.insert(),
{"data": "d2"},
{"data": "d3"},
)
conn.execute(
table.insert(),
{"id": 31, "data": "d2"},
{"id": 32, "data": "d3"},
)
conn.execute(table.insert(inline=True), {"id": 33, "data": "d4"})
eq_(
conn.execute(table.select()).fetchall(),
[(30, "d1"), (31, "d2"), (32, "d3"), (33, "d4")],
)
class MatchTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "postgresql >= 8.3"
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, cattable, matchtable
metadata = MetaData(testing.db)
cattable = Table(
"cattable",
metadata,
Column("id", Integer, primary_key=True),
Column("description", String(50)),
)
matchtable = Table(
"matchtable",
metadata,
Column("id", Integer, primary_key=True),
Column("title", String(200)),
Column("category_id", Integer, ForeignKey("cattable.id")),
)
metadata.create_all()
cattable.insert().execute(
[
{"id": 1, "description": "Python"},
{"id": 2, "description": "Ruby"},
]
)
matchtable.insert().execute(
[
{
"id": 1,
"title": "Agile Web Development with Rails",
"category_id": 2,
},
{"id": 2, "title": "Dive Into Python", "category_id": 1},
{
"id": 3,
"title": "Programming Matz's Ruby",
"category_id": 2,
},
{
"id": 4,
"title": "The Definitive Guide to Django",
"category_id": 1,
},
{"id": 5, "title": "Python in a Nutshell", "category_id": 1},
]
)
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.fails_on("postgresql+pg8000", "uses positional")
def test_expression_pyformat(self):
self.assert_compile(
matchtable.c.title.match("somstr"),
"matchtable.title @@ to_tsquery(%(title_1)s" ")",
)
@testing.fails_on("postgresql+psycopg2", "uses pyformat")
@testing.fails_on("postgresql+pypostgresql", "uses pyformat")
@testing.fails_on("postgresql+pygresql", "uses pyformat")
@testing.fails_on("postgresql+psycopg2cffi", "uses pyformat")
def test_expression_positional(self):
self.assert_compile(
matchtable.c.title.match("somstr"),
"matchtable.title @@ to_tsquery(%s)",
)
def test_simple_match(self):
results = (
matchtable.select()
.where(matchtable.c.title.match("python"))
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([2, 5], [r.id for r in results])
def test_not_match(self):
results = (
matchtable.select()
.where(~matchtable.c.title.match("python"))
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([1, 3, 4], [r.id for r in results])
def test_simple_match_with_apostrophe(self):
results = (
matchtable.select()
.where(matchtable.c.title.match("Matz's"))
.execute()
.fetchall()
)
eq_([3], [r.id for r in results])
def test_simple_derivative_match(self):
results = (
matchtable.select()
.where(matchtable.c.title.match("nutshells"))
.execute()
.fetchall()
)
eq_([5], [r.id for r in results])
def test_or_match(self):
results1 = (
matchtable.select()
.where(
or_(
matchtable.c.title.match("nutshells"),
matchtable.c.title.match("rubies"),
)
)
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([3, 5], [r.id for r in results1])
results2 = (
matchtable.select()
.where(matchtable.c.title.match("nutshells | rubies"))
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([3, 5], [r.id for r in results2])
def test_and_match(self):
results1 = (
matchtable.select()
.where(
and_(
matchtable.c.title.match("python"),
matchtable.c.title.match("nutshells"),
)
)
.execute()
.fetchall()
)
eq_([5], [r.id for r in results1])
results2 = (
matchtable.select()
.where(matchtable.c.title.match("python & nutshells"))
.execute()
.fetchall()
)
eq_([5], [r.id for r in results2])
def test_match_across_joins(self):
results = (
matchtable.select()
.where(
and_(
cattable.c.id == matchtable.c.category_id,
or_(
cattable.c.description.match("Ruby"),
matchtable.c.title.match("nutshells"),
),
)
)
.order_by(matchtable.c.id)
.execute()
.fetchall()
)
eq_([1, 3, 5], [r.id for r in results])
class TupleTest(fixtures.TestBase):
__only_on__ = "postgresql"
__backend__ = True
def test_tuple_containment(self):
for test, exp in [
([("a", "b")], True),
([("a", "c")], False),
([("f", "q"), ("a", "b")], True),
([("f", "q"), ("a", "c")], False),
]:
eq_(
testing.db.execute(
select(
[
tuple_(
literal_column("'a'"), literal_column("'b'")
).in_(
[
tuple_(
*[
literal_column("'%s'" % letter)
for letter in elem
]
)
for elem in test
]
)
]
)
).scalar(),
exp,
)
class ExtractTest(fixtures.TablesTest):
"""The rationale behind this test is that for many years we've had a system
of embedding type casts into the expressions rendered by visit_extract()
on the postgreql platform. The reason for this cast is not clear.
So here we try to produce a wide range of cases to ensure that these casts
are not needed; see [ticket:2740].
"""
__only_on__ = "postgresql"
__backend__ = True
run_inserts = "once"
run_deletes = None
@classmethod
def setup_bind(cls):
from sqlalchemy import event
eng = engines.testing_engine()
@event.listens_for(eng, "connect")
def connect(dbapi_conn, rec):
cursor = dbapi_conn.cursor()
cursor.execute("SET SESSION TIME ZONE 0")
cursor.close()
return eng
@classmethod
def define_tables(cls, metadata):
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("dtme", DateTime),
Column("dt", Date),
Column("tm", Time),
Column("intv", postgresql.INTERVAL),
Column("dttz", DateTime(timezone=True)),
)
@classmethod
def insert_data(cls):
# TODO: why does setting hours to anything
# not affect the TZ in the DB col ?
class TZ(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=4)
cls.bind.execute(
cls.tables.t.insert(),
{
"dtme": datetime.datetime(2012, 5, 10, 12, 15, 25),
"dt": datetime.date(2012, 5, 10),
"tm": datetime.time(12, 15, 25),
"intv": datetime.timedelta(seconds=570),
"dttz": datetime.datetime(
2012, 5, 10, 12, 15, 25, tzinfo=TZ()
),
},
)
def _test(self, expr, field="all", overrides=None):
t = self.tables.t
if field == "all":
fields = {
"year": 2012,
"month": 5,
"day": 10,
"epoch": 1336652125.0,
"hour": 12,
"minute": 15,
}
elif field == "time":
fields = {"hour": 12, "minute": 15, "second": 25}
elif field == "date":
fields = {"year": 2012, "month": 5, "day": 10}
elif field == "all+tz":
fields = {
"year": 2012,
"month": 5,
"day": 10,
"epoch": 1336637725.0,
"hour": 8,
"timezone": 0,
}
else:
fields = field
if overrides:
fields.update(overrides)
for field in fields:
result = self.bind.scalar(
select([extract(field, expr)]).select_from(t)
)
eq_(result, fields[field])
def test_one(self):
t = self.tables.t
self._test(t.c.dtme, "all")
def test_two(self):
t = self.tables.t
self._test(
t.c.dtme + t.c.intv,
overrides={"epoch": 1336652695.0, "minute": 24},
)
def test_three(self):
self.tables.t
actual_ts = self.bind.scalar(
func.current_timestamp()
) - datetime.timedelta(days=5)
self._test(
func.current_timestamp() - datetime.timedelta(days=5),
{
"hour": actual_ts.hour,
"year": actual_ts.year,
"month": actual_ts.month,
},
)
def test_four(self):
t = self.tables.t
self._test(
datetime.timedelta(days=5) + t.c.dt,
overrides={
"day": 15,
"epoch": 1337040000.0,
"hour": 0,
"minute": 0,
},
)
def test_five(self):
t = self.tables.t
self._test(
func.coalesce(t.c.dtme, func.current_timestamp()),
overrides={"epoch": 1336652125.0},
)
def test_six(self):
t = self.tables.t
self._test(
t.c.tm + datetime.timedelta(seconds=30),
"time",
overrides={"second": 55},
)
def test_seven(self):
self._test(
literal(datetime.timedelta(seconds=10))
- literal(datetime.timedelta(seconds=10)),
"all",
overrides={
"hour": 0,
"minute": 0,
"month": 0,
"year": 0,
"day": 0,
"epoch": 0,
},
)
def test_eight(self):
t = self.tables.t
self._test(
t.c.tm + datetime.timedelta(seconds=30),
{"hour": 12, "minute": 15, "second": 55},
)
def test_nine(self):
self._test(text("t.dt + t.tm"))
def test_ten(self):
t = self.tables.t
self._test(t.c.dt + t.c.tm)
def test_eleven(self):
self._test(
func.current_timestamp() - func.current_timestamp(),
{"year": 0, "month": 0, "day": 0, "hour": 0},
)
def test_twelve(self):
t = self.tables.t
actual_ts = self.bind.scalar(func.current_timestamp()).replace(
tzinfo=None
) - datetime.datetime(2012, 5, 10, 12, 15, 25)
self._test(
func.current_timestamp()
- func.coalesce(t.c.dtme, func.current_timestamp()),
{"day": actual_ts.days},
)
def test_thirteen(self):
t = self.tables.t
self._test(t.c.dttz, "all+tz")
def test_fourteen(self):
t = self.tables.t
self._test(t.c.tm, "time")
def test_fifteen(self):
t = self.tables.t
self._test(
datetime.timedelta(days=5) + t.c.dtme,
overrides={"day": 15, "epoch": 1337084125.0},
)
| 32.477718 | 79 | 0.449259 |
4f252ab67e09c566a17a2653015b63ff47b5509b | 3,839 | py | Python | vmtkScripts/vmtksurfacetransform.py | CemrgDevelopers/vmtk | a555b3c1686e70dcad0346acaaed9b3481dcb338 | [
"Apache-2.0"
] | 1 | 2017-02-23T09:31:53.000Z | 2017-02-23T09:31:53.000Z | vmtkScripts/vmtksurfacetransform.py | haehn/vmtk | e8e2ee9f9bea6a1839a75b57caf82f6a86944db0 | [
"Apache-2.0"
] | null | null | null | vmtkScripts/vmtksurfacetransform.py | haehn/vmtk | e8e2ee9f9bea6a1839a75b57caf82f6a86944db0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtksurfacetransform.py,v $
## Language: Python
## Date: $Date: 2005/09/14 09:49:59 $
## Version: $Revision: 1.4 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENCE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this class was improved by
## Hugo Gratama van Andel
## Academic Medical Centre - University of Amsterdam
## Dept. Biomedical Engineering & Physics
import vtk
import sys
import pypes
vmtksurfacetransform = 'vmtkSurfaceTransform'
class vmtkSurfaceTransform(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.MatrixCoefficients = []
self.InvertMatrix = 0
self.Matrix4x4 = None
self.Rotation = [0.0,0.0,0.0]
self.Translation = [0.0,0.0,0.0]
self.Scaling = [1.0,1.0,1.0]
#TODO: define covariant vector array names
self.SetScriptName('vmtksurfacetransform')
self.SetScriptDoc('transform a surface with a provided matrix')
self.SetInputMembers([
['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'],
['Matrix4x4','matrix4x4','vtkMatrix4x4',1,'','the input transform matrix'],
['MatrixCoefficients','matrix','float',16,'','coefficients of transform matrix'],
['InvertMatrix','invert','bool',1,'','invert matrix before applying transformation'],
['Rotation','rotation','float',3,'','rotations around the x-,y- and z-axis'],
['Translation','translation','float',3,'','translation in the x-,y- and z-directions'],
['Scaling','scaling','float',3,'','scaling of the x-,y- and z-directions']
])
self.SetOutputMembers([
['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter']
])
def Execute(self):
if (self.Surface == None):
self.PrintError('Error: no Surface.')
if not self.Matrix4x4:
self.Matrix4x4 = vtk.vtkMatrix4x4()
if self.MatrixCoefficients != []:
self.PrintLog('Setting up transform matrix using specified coefficients')
self.Matrix4x4.DeepCopy(self.MatrixCoefficients)
elif self.Translation != [0.0,0.0,0.0] or self.Rotation != [0.0,0.0,0.0] or self.Scaling != [1.0,1.0,1.0]:
self.PrintLog('Setting up transform matrix using specified translation, rotation and/or scaling')
transform = vtk.vtkTransform()
transform.RotateX(self.Rotation[0])
transform.RotateY(self.Rotation[1])
transform.RotateZ(self.Rotation[2])
transform.Translate(self.Translation[0], self.Translation[1], self.Translation[2])
transform.Scale(self.Scaling[0], self.Scaling[1], self.Scaling[2])
self.Matrix4x4.DeepCopy(transform.GetMatrix())
if self.InvertMatrix:
self.Matrix4x4.Invert()
transform = vtk.vtkMatrixToLinearTransform()
transform.SetInput(self.Matrix4x4)
transformFilter = vtk.vtkTransformPolyDataFilter()
transformFilter.SetInput(self.Surface)
transformFilter.SetTransform(transform)
transformFilter.Update()
self.Surface = transformFilter.GetOutput()
if self.Surface.GetSource():
self.Surface.GetSource().UnRegisterAllOutputs()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 37.637255 | 118 | 0.625423 |
78c14979217c58526882004790aaeafd7d53b3d8 | 6,422 | py | Python | model/dcrnn_supervisor.py | lichunown/DCRNN | 847594332b87d3c296dcb8af574f7bb498cb5b32 | [
"MIT"
] | 9 | 2018-06-02T02:42:18.000Z | 2022-02-22T07:52:49.000Z | model/dcrnn_supervisor.py | lichunown/DCRNN | 847594332b87d3c296dcb8af574f7bb498cb5b32 | [
"MIT"
] | 1 | 2019-04-22T05:54:45.000Z | 2019-04-23T15:54:42.000Z | model/dcrnn_supervisor.py | lichunown/DCRNN | 847594332b87d3c296dcb8af574f7bb498cb5b32 | [
"MIT"
] | 3 | 2019-08-07T10:45:07.000Z | 2022-02-22T07:52:31.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import pandas as pd
import tensorflow as tf
from lib.utils import generate_graph_seq2seq_io_data_with_time
from model.dcrnn_model import DCRNNModel
from model.tf_model_supervisor import TFModelSupervisor
class DCRNNSupervisor(TFModelSupervisor):
"""
Do experiments using Graph Random Walk RNN model.
"""
def __init__(self, traffic_reading_df, adj_mx, config):
self._adj_mx = adj_mx
super(DCRNNSupervisor, self).__init__(config, df_data=traffic_reading_df)
def _prepare_train_val_test_data(self):
# Parsing model parameters.
batch_size = self._get_config('batch_size')
horizon = self._get_config('horizon')
seq_len = self._get_config('seq_len')
test_batch_size = 1
add_time_in_day = self._get_config('add_time_in_day')
num_nodes = self._df_train.shape[-1]
x_train, y_train = generate_graph_seq2seq_io_data_with_time(self._df_train,
batch_size=batch_size,
seq_len=seq_len,
horizon=horizon,
num_nodes=num_nodes,
scaler=self._scaler,
add_time_in_day=add_time_in_day,
add_day_in_week=False)
x_val, y_val = generate_graph_seq2seq_io_data_with_time(self._df_val, batch_size=batch_size,
seq_len=seq_len,
horizon=horizon,
num_nodes=num_nodes,
scaler=self._scaler,
add_time_in_day=add_time_in_day,
add_day_in_week=False)
x_test, y_test = generate_graph_seq2seq_io_data_with_time(self._df_test,
batch_size=test_batch_size,
seq_len=seq_len,
horizon=horizon,
num_nodes=num_nodes,
scaler=self._scaler,
add_time_in_day=add_time_in_day,
add_day_in_week=False)
return x_train, y_train, x_val, y_val, x_test, y_test
def _build_train_val_test_models(self):
# Builds the model.
input_dim = self._x_train.shape[-1]
num_nodes = self._df_test.shape[-1]
output_dim = self._y_train.shape[-1]
test_batch_size = self._get_config('test_batch_size')
train_config = dict(self._config)
train_config.update({
'input_dim': input_dim,
'num_nodes': num_nodes,
'output_dim': output_dim,
})
test_config = dict(self._config)
test_config.update({
'batch_size': test_batch_size,
'input_dim': input_dim,
'num_nodes': num_nodes,
'output_dim': output_dim,
})
with tf.name_scope('Train'):
with tf.variable_scope('DCRNN', reuse=False):
train_model = DCRNNModel(is_training=True, config=train_config, scaler=self._scaler,
adj_mx=self._adj_mx)
with tf.name_scope('Val'):
with tf.variable_scope('DCRNN', reuse=True):
val_model = DCRNNModel(is_training=False, config=train_config, scaler=self._scaler,
adj_mx=self._adj_mx)
with tf.name_scope('Test'):
with tf.variable_scope('DCRNN', reuse=True):
test_model = DCRNNModel(is_training=False, config=test_config, scaler=self._scaler,
adj_mx=self._adj_mx)
return train_model, val_model, test_model
def _convert_model_outputs_to_eval_df(self, y_preds):
y_preds = np.stack(y_preds, axis=1)
# y_preds: (batch_size, epoch_size, horizon, num_nodes, output_dim)
# horizon = y_preds.shape[2]
horizon = self._get_config('horizon')
num_nodes = self._df_train.shape[-1]
df_preds = {}
for horizon_i in range(horizon):
y_pred = np.reshape(y_preds[:, :, horizon_i, :, 0], self._eval_dfs[horizon_i].shape)
df_pred = pd.DataFrame(self._scaler.inverse_transform(y_pred), index=self._eval_dfs[horizon_i].index,
columns=self._eval_dfs[horizon_i].columns)
df_preds[horizon_i] = df_pred
return df_preds
@staticmethod
def _generate_run_id(config):
batch_size = config.get('batch_size')
dropout = config.get('dropout')
learning_rate = config.get('learning_rate')
loss_func = config.get('loss_func')
max_diffusion_step = config['max_diffusion_step']
num_rnn_layers = config.get('num_rnn_layers')
rnn_units = config.get('rnn_units')
seq_len = config.get('seq_len')
structure = '-'.join(
['%d' % rnn_units for _ in range(num_rnn_layers)])
horizon = config.get('horizon')
filter_type = config.get('filter_type')
filter_type_abbr = 'L'
if filter_type == 'random_walk':
filter_type_abbr = 'R'
elif filter_type == 'dual_random_walk':
filter_type_abbr = 'DR'
run_id = 'dcrnn_%s_%d_h_%d_%s_lr_%g_bs_%d_d_%.2f_sl_%d_%s_%s/' % (
filter_type_abbr, max_diffusion_step, horizon,
structure, learning_rate, batch_size,
dropout, seq_len, loss_func,
time.strftime('%m%d%H%M%S'))
return run_id
| 47.220588 | 113 | 0.52289 |
7e40f4021a086699111334394e58dc7331b654eb | 869 | py | Python | tir/technologies/core/numexec.py | Wilson08/tir | 98fc068ceebe6132831a6fe0c210f6d15fea5026 | [
"MIT"
] | 1 | 2021-03-12T20:08:18.000Z | 2021-03-12T20:08:18.000Z | tir/technologies/core/numexec.py | Wilson08/tir | 98fc068ceebe6132831a6fe0c210f6d15fea5026 | [
"MIT"
] | null | null | null | tir/technologies/core/numexec.py | Wilson08/tir | 98fc068ceebe6132831a6fe0c210f6d15fea5026 | [
"MIT"
] | 1 | 2020-06-11T18:50:40.000Z | 2020-06-11T18:50:40.000Z | from tir.technologies.core.config import ConfigLoader
import requests
import json
import time
class NumExec:
def __init__(self):
self.config = ConfigLoader()
def post_exec(self, url):
status = None
endtime = time.time() + 120
while(time.time() < endtime and status != 200):
data = {'num_exec': self.config.num_exec,'ip_exec': self.config.ipExec}
response = requests.post(url.strip(), json=data)
json_data = json.loads(response.text)
status = json_data["status"]
if status != 200:
time.sleep(12)
print(f"Num exec. status: {status} Url: {url}")
if status != 200:
with open(f"E:\\smart_test\\logs_tir\\{time.time()}_json_data_response.txt", "w") as json_log:
json_log.write(str(f"STATUS: {status}")) | 25.558824 | 106 | 0.588032 |
9a32134b3a9442a5ffb0b61021dd1c1fdcc80218 | 6,064 | py | Python | learners/abstract_learner.py | Liam-zzZ/PocketFlow | 24122ed5fbadacbf7886d6030ff46ff0e4f8c2f5 | [
"Apache-2.0"
] | 1 | 2019-01-30T02:55:28.000Z | 2019-01-30T02:55:28.000Z | learners/abstract_learner.py | Liam-zzZ/PocketFlow | 24122ed5fbadacbf7886d6030ff46ff0e4f8c2f5 | [
"Apache-2.0"
] | null | null | null | learners/abstract_learner.py | Liam-zzZ/PocketFlow | 24122ed5fbadacbf7886d6030ff46ff0e4f8c2f5 | [
"Apache-2.0"
] | null | null | null | # Tencent is pleased to support the open source community by making PocketFlow available.
#
# Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Abstract class for learners."""
from abc import ABC
from abc import abstractmethod
import os
import shutil
import subprocess
import tensorflow as tf
from utils.misc_utils import auto_barrier as auto_barrier_impl
from utils.misc_utils import is_primary_worker as is_primary_worker_impl
from utils.multi_gpu_wrapper import MultiGpuWrapper as mgw
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('model_http_url', None, 'HTTP/HTTPS url for remote model files')
tf.app.flags.DEFINE_integer('summ_step', 100, 'summarizaton step size')
tf.app.flags.DEFINE_integer('save_step', 10000, 'model saving step size')
tf.app.flags.DEFINE_string('save_path', './models/model.ckpt', 'model\'s save path')
tf.app.flags.DEFINE_string('save_path_eval', './models_eval/model.ckpt',
'model\'s save path for evaluation')
tf.app.flags.DEFINE_boolean('enbl_dst', False, 'enable the distillation loss for training')
tf.app.flags.DEFINE_boolean('enbl_warm_start', False, 'enable warm start for training')
class AbstractLearner(ABC): # pylint: disable=too-many-instance-attributes
"""Abstract class for learners.
A learner should take a ModelHelper object as input, which includes the data input pipeline and
model definition, and perform either training or evaluation with its specific algorithm.
The execution mode is specified by the <is_train> argument:
* If <is_train> is True, then the learner will train a model with specified data & network
architecture. The model will be saved to local files periodically.
* If <is_train> is False, then the learner will restore a model from local files and
measure its performance on the evaluation subset.
All functions marked with "@abstractmethod" must be explicitly implemented in the sub-class.
"""
def __init__(self, sm_writer, model_helper):
"""Constructor function.
Args:
* sm_writer: TensorFlow's summary writer
* model_helper: model helper with definitions of model & dataset
"""
# initialize attributes
self.sm_writer = sm_writer
self.data_scope = 'data'
self.model_scope = 'model'
# initialize Horovod / TF-Plus for multi-gpu training
if FLAGS.enbl_multi_gpu:
mgw.init()
from mpi4py import MPI
self.mpi_comm = MPI.COMM_WORLD
else:
self.mpi_comm = None
# obtain the function interface provided by the model helper
self.build_dataset_train = model_helper.build_dataset_train
self.build_dataset_eval = model_helper.build_dataset_eval
self.forward_train = model_helper.forward_train
self.forward_eval = model_helper.forward_eval
self.calc_loss = model_helper.calc_loss
self.setup_lrn_rate = model_helper.setup_lrn_rate
self.warm_start = model_helper.warm_start
self.dump_n_eval = model_helper.dump_n_eval
self.model_name = model_helper.model_name
self.dataset_name = model_helper.dataset_name
# checkpoint path determined by model's & dataset's names
self.ckpt_file = 'models_%s_at_%s.tar.gz' % (self.model_name, self.dataset_name)
@abstractmethod
def train(self):
"""Train a model and periodically produce checkpoint files.
Model parameters should be saved periodically for future evaluation.
"""
pass
@abstractmethod
def evaluate(self):
"""Restore a model from the latest checkpoint files and then evaluate it."""
pass
def download_model(self):
"""Download remote model files and then uncompress.
Note: All files in FLAGS.save_path will be removed and replaced by the pre-trained model.
"""
# early return if local model files exist
if tf.train.latest_checkpoint(os.path.dirname(FLAGS.save_path)) is not None:
return
# download remote model files
if FLAGS.model_http_url is None:
raise ValueError('local model files do not exist and <model_http_url> is not set')
subprocess.call(['wget', os.path.join(FLAGS.model_http_url, self.ckpt_file)])
if os.path.exists(self.ckpt_file):
if os.path.isdir(os.path.dirname(FLAGS.save_path)):
shutil.rmtree(os.path.dirname(FLAGS.save_path))
subprocess.call(['tar', '-xvf', self.ckpt_file])
else:
raise FileNotFoundError(
'pre-trained model not avaialable: {} / {}'.format(self.model_name, self.dataset_name))
def auto_barrier(self):
"""Automatically insert a barrier for multi-GPU training, or pass for single-GPU training."""
auto_barrier_impl(self.mpi_comm)
@classmethod
def is_primary_worker(cls, scope='global'):
"""Check whether is the primary worker of all nodes (global) or the current node (local).
Args:
* scope: check scope ('global' OR 'local')
Returns:
* flag: whether is the primary worker
"""
return is_primary_worker_impl(scope)
@property
def vars(self):
"""List of all global variables."""
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.model_scope)
@property
def trainable_vars(self):
"""List of all trainable variables."""
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.model_scope)
@property
def update_ops(self):
"""List of all update operations."""
return tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=self.model_scope)
| 38.379747 | 97 | 0.728067 |
403b7fa524d9b089c19b541f6daf69673e4b1e70 | 5,648 | py | Python | config.py | alekseynp/ontario_sunshine_list | 1898ec4c8d10964589789d70907db0e698d15d91 | [
"MIT"
] | 3 | 2017-01-29T17:17:26.000Z | 2018-08-09T20:38:45.000Z | config.py | alekseynp/ontario_sunshine_list | 1898ec4c8d10964589789d70907db0e698d15d91 | [
"MIT"
] | 1 | 2015-03-07T23:13:53.000Z | 2015-03-07T23:14:13.000Z | config.py | alekseynp/ontario_sunshine_list | 1898ec4c8d10964589789d70907db0e698d15d91 | [
"MIT"
] | 1 | 2020-09-02T12:24:46.000Z | 2020-09-02T12:24:46.000Z | configuration = {
2014:{
'html_dir':'scrape/pssd/',
'addenda':['addenda_14.html'],
'core_exclude':[],
},
2013:{
'html_dir':'scrape/pssd/',
'addenda':['addenda_13.html'],
'core_exclude':['nosalaries.php&organization=nosalaries&year=2012'],
},
2012:{
'html_dir':'scrape/2012/',
'addenda':['addenda_12.html'],
'core_exclude':['addenda_12.html', 'nosal12.html', 'pssdguide.html'],
},
2011:{
'html_dir':'scrape/2011/',
'addenda':['addenda_11.html'],
'core_exclude':['addenda_11.html', 'nosal11.html', 'pssdguide.html'],
},
2010:{
'html_dir':'scrape/2010/',
'addenda':['addenda_10.html'],
'core_exclude':['addenda_10.html', 'nosal10.html', 'pssdguide.html'],
},
2009:{
'html_dir':'scrape/2009/',
'addenda':['addenda_09.html'],
'core_exclude':['addenda_09.html', 'nosal09.html', 'pssdguide.html'],
},
2008:{
'html_dir':'scrape/2008/',
'addenda':['addenda_08.html'],
'core_exclude':['addenda_08.html', 'nosal08.html', 'pssdguide.html'],
},
2007:{
'html_dir':'scrape/2007/',
'addenda':['addenda_07.html','addenda_07_2nd.html'],
'core_exclude':['addenda_07.html','addenda_07_2nd.html', 'nosal07.html', 'pssdeg.html'],
},
2006:{
'html_dir':'scrape/2006/',
'addenda':['addenda1_06.html'],
'core_exclude':['addenda1_06.html', 'nosal06.html', 'pssdeg.html'],
},
2005:{
'html_dir':'scrape/2005/',
'addenda':['addenda1_05.html'],
'core_exclude':['addenda1_05.html', 'nosal05.html', 'pssdeg.html'],
},
2004:{
'html_dir':'scrape/2004/',
'addenda':['addenda1_04.html'],
'core_exclude':['addenda1_04.html', 'nosal04.html'],
},
2003:{
'html_dir':'scrape/2003/',
'addenda':['addenda3_03.html'],
'core_exclude':['addenda3_03.html', 'nosal03.html', 'intro03.html'],
},
2002:{
'html_dir':'scrape/2002/',
'addenda':['psadd_02.html'],
'core_exclude':['psadd_02.html', 'nosal02.html', 'intro02.html'],
},
2001:{
'html_dir':'scrape/2001/',
'addenda':['pssdad01.html'],
'core_exclude':['pssdad01.html', 'nosal01.html', 'intro01.html'],
},
2000:{
'html_dir':'scrape/2000/',
'addenda':['adden00.html'],
'core_exclude':['adden00.html', 'nosal00.html', 'intro00.html'],
},
1999:{
'html_dir':'scrape/1999/',
'addenda':['addendum.html'],
'core_exclude':['addendum.html', 'nosalari.html', 'intro99.html'],
},
1998:{
'html_dir':'scrape/1998/',
'addenda':['adden98a.html'],
'core_exclude':['adden98a.html', 'nosal98a.html'],
},
1997:{
'html_dir':'scrape/1997/',
'addenda':['addend1.html','saddend2.html','addend3.html'],
'core_exclude':['addend1.html','addend2.html','addend3.html'],
}
}
column_translator ={
u'Taxable Benefits / Avantages imposables':u'Benefits',
u'Ministry / Minist\xe8re':u'Ministry',
u'Given Name / Pr\xe9nom':u'Given Name',
u'Position / Poste':u'Position',
u'Seconded Position / Poste combl\xe9 par la personne d\xe9tach\xe9e':u'Position',
u'Taxable Benefits / Avantages imposables Avantages imposables*':u'Benefits',
u'Surname / Nom de famille':u'Surname',
u'Salary Paid / Traitement*':u'Salary',
u'Employer / Employeur':u'Employer',
u'Salary Paid / Traitement':u'Salary',
u'Public Sector Organization Employer / Employeur - Organisme du secteur public':u'Employer',
u'Employer':u'Employer',
u'Surname':u'Surname',
u'Given Name':u'Given Name',
u'Salary':u'Salary',
u'Benefits':u'Benefits',
u'Position':u'Position',
u'Ministry':u'Ministry',
u'dummy':u'dummy',
u'Taxable Benefits / Avantages imposables*':u'Benefits',
u'Given\r\n \t\t\t\t\t\t\t\tName / Pr\xe9nom':u'Given Name',
u'Salary\r\n \t\t\t\t\t\t\t\tPaid / Traitement':u'Salary',
u'Sector':u'Category',
}
stripped_contains_column_translator = {
#u'TaxableBenefits':u'Benefits',
u'Benefits':u'Benefits',
u'Ministry':u'Ministry',
u'GivenName':u'Given Name',
u'FirstName':u'Given Name',
u'Position':u'Position',
u'Surname':u'Surname',
#u'SalaryPaid':u'Salary',
u'Salary':u'Salary',
u'Employer':u'Employer',
}
category_translator = {
'agencies':'Crown Agencies',
'colleg':'Colleges',
'crown':'Crown Agencies',
'electric':'Hydro One and Ontario Power Generation',
'hospit':'Hospitals and Boards of Public Health',
'judiciary':'Judiciary',
'legassembly':'Legislative Assembly and Offices',
'legislative':'Legislative Assembly and Offices',
'ministries':'Ministries',
'munic':'Municipalities and Services',
'other':'Other Public Sector Employers',
'schbd':'School Boards',
'schoolboards':'School Boards',
'schoolbd':'School Boards',
'unive':'Universities',
'ontpub':'Ontario Public Service',
'ops':'Ontario Public Services',
'seconded':'Ministries'
}
core_columns = [u'Employer', u'Surname', u'Given Name', u'Position', u'Salary', u'Benefits', u'Category'] | 36.675325 | 105 | 0.556126 |
f126c1d55e4f9c74a2f558b8d246ed040ecb4279 | 635 | py | Python | src/signature.py | akarinS/aaapi-tipkoto | 58781c8286480351b2653d51a0a0fa7fa2e93094 | [
"MIT"
] | 5 | 2018-07-02T11:08:42.000Z | 2018-08-20T19:03:58.000Z | src/signature.py | akarinS/aaapi-tipkoto | 58781c8286480351b2653d51a0a0fa7fa2e93094 | [
"MIT"
] | null | null | null | src/signature.py | akarinS/aaapi-tipkoto | 58781c8286480351b2653d51a0a0fa7fa2e93094 | [
"MIT"
] | null | null | null | import base64
import hashlib
import hmac
class Signature(object):
def __init__(self, consumer_secret):
self.consumer_secret = consumer_secret.encode()
def _sign(self, msg): # msg must be "<class 'bytes'>"
sha256_hash_digest = hmac.new(self.consumer_secret, msg = msg, digestmod = hashlib.sha256).digest()
return 'sha256=' + base64.b64encode(sha256_hash_digest).decode()
def get_response_token(self, crc_token):
return self._sign(crc_token)
def validate(self, x_twitter_webhooks_signature, data):
return hmac.compare_digest(x_twitter_webhooks_signature, self._sign(data))
| 31.75 | 107 | 0.72126 |
37a5d9385d5029f003357e1441d7ff533db5b67a | 1,302 | py | Python | main.py | ytyaru/Hatena.Blog.API.Service.Get.201702281505 | cf57019f3d37766852c1f32854970db60e700c0d | [
"CC0-1.0"
] | null | null | null | main.py | ytyaru/Hatena.Blog.API.Service.Get.201702281505 | cf57019f3d37766852c1f32854970db60e700c0d | [
"CC0-1.0"
] | null | null | null | main.py | ytyaru/Hatena.Blog.API.Service.Get.201702281505 | cf57019f3d37766852c1f32854970db60e700c0d | [
"CC0-1.0"
] | null | null | null | #!python3
#encoding:utf-8
import xmltodict
from collections import OrderedDict
from requests_oauthlib import OAuth1Session
from bs4 import BeautifulSoup
import datetime
CREDENTIALS = {
'client_key': 'aaaaaaa',
'client_secret': 'bbbbbbbb',
'resource_owner_key': 'ccccccccc',
'resource_owner_secret': 'ddddddddd'
}
class HatenaClient(object):
ENDPOINT = ('https://blog.hatena.ne.jp/'
'{user}/{blog}/atom/entry')
def __init__(self, **args):
self.set_client(**args)
def set_client(self, **args):
self.client = OAuth1Session(**args)
def get_service(self, user, blog):
url = self.ENDPOINT.format(user=user, blog=blog)
res = self.client.get(url)
self.__check_response(res)
self.__output_file("{0}.{1}.Services.xml".format(user, blog), res.text)
def __check_response(self, response):
if not response.ok:
response.raise_for_status()
print('status code: {}'.format(response.status_code))
def __output_file(self, file_name, content, encoding='utf-8'):
with open(file_name, mode='w', encoding=encoding) as f:
f.write(content)
if __name__ == '__main__':
client = HatenaClient(**CREDENTIALS)
client.get_service('ytyaru', 'ytyaru.hatenablog.com')
| 28.304348 | 79 | 0.66129 |
cf3b96db364acc06901c1e09e133fe07860a43cf | 3,467 | py | Python | widgets/leftdockwidgets/ldw_plot.py | ostash-group/GeneticPlotter | 09389407c6d89b8f525f247e7a8af6ff0de1d665 | [
"MIT"
] | 3 | 2021-02-02T18:10:18.000Z | 2021-07-27T14:20:20.000Z | widgets/leftdockwidgets/ldw_plot.py | ostash-group/GeneticPlotter | 09389407c6d89b8f525f247e7a8af6ff0de1d665 | [
"MIT"
] | null | null | null | widgets/leftdockwidgets/ldw_plot.py | ostash-group/GeneticPlotter | 09389407c6d89b8f525f247e7a8af6ff0de1d665 | [
"MIT"
] | null | null | null | import random
from PyQt5 import uic
from PyQt5.QtWidgets import QTableWidgetItem, QTableWidget, QWidget, QLabel, QColorDialog
from PyQt5.QtGui import QIcon, QPixmap, QPalette, QColor, QImage
from PyQt5.QtCore import Qt, QAbstractItemModel, QModelIndex, QDir, QSize
from widgets.leftdockwidgets.ui.plot_dockwidget import Ui_mainWidget
import resources
START_POINTS_COUNT=50
class ImgWidget(QLabel):
def __init__(self, path, color):
super(ImgWidget, self).__init__()
icon = QIcon(path)
pic = icon.pixmap(QSize(20, 20))
self.setPixmap(pic)
self.setStyleSheet("background-color: %s; width: 20px; height: 20px;" %
color.name())
class ImgWidgetDel(QLabel):
def __init__(self, path):
super(ImgWidgetDel, self).__init__()
icon = QIcon(path)
pic = icon.pixmap(QSize(16, 16))
self.setPixmap(pic)
self.setStyleSheet("width: 20px; height: 20px; padding:1;")
class LeftDockWidgetPlot(QWidget,Ui_mainWidget):
def __init__(self):
super(LeftDockWidgetPlot,self).__init__()
#uic.loadUi(r'widgets\leftdockwidgets\ui\plot_dockwidget.ui', self)
self.setupUi(self)
self.table.setColumnWidth(1, 20)
self.table.setColumnWidth(2, 20)
self.colors = ["#ff0000","#00aa00","#0000ff","#ff00ff","#aaaa7f","#00ff00","#aa5500",
"#ffaa00","#808000","#008080","#800080","#800000"]
self.color_ind=0
self.spinBox.setValue(START_POINTS_COUNT)
self.pointsSlider.setValue(START_POINTS_COUNT)
self.pointsSlider.sliderMoved.connect(self.spinBox.setValue)
self.spinBox.valueChanged.connect(self.pointsSlider.setValue)
self.table.cellClicked.connect(self.cellWasClicked)
self.addButton.clicked.connect(self.pushSubseqToTable)
def addTableItem(self,str):
#random color
color = QColor( self.colors[self.color_ind] )
self.color_ind = (self.color_ind+1) if (self.color_ind<len(self.colors)-1) else 0
rowPosition = self.table.rowCount()
self.table.insertRow(rowPosition)
self.table.setCellWidget(rowPosition , 0, QLabel(str))
self.table.setCellWidget(rowPosition,1,ImgWidget(":resources/e_round.png",color))
self.table.setCellWidget(rowPosition,2,ImgWidgetDel(":resources/close.png"))
def cellWasClicked(self, row, column):
if (column==1):
color = QColorDialog.getColor()
self.table.setCellWidget(row,1,ImgWidget(":resources/e_round.png",color))
if (column==2):
self.table.removeRow(row)
def pushSubseqToTable(self):
self.addTableItem(self.subseqEdit.text())
self.subseqEdit.clear()
def getSubseqList(self):
self.subseq = []
for row in range(self.table.rowCount()):
t_item = self.table.cellWidget(row,0)
self.subseq.append(t_item.text())
return(self.subseq)
def getColorsList(self):
self.colors_list = []
for row in range(self.table.rowCount()):
w = self.table.cellWidget(row,1)
color = w.palette().color(QPalette.Background)
#print (color.red(), color.green(), color.blue())
self.colors_list.append(color)
return(self.colors_list)
def getPointsCount(self):
return(self.spinBox.value())
| 30.955357 | 93 | 0.640611 |
4b0f976abd938edf139b31916bbc8ab30767901a | 814 | py | Python | tests/xsd/schema_test.py | jocassid/soapfish | 7926335089ec286d3f4f491f72d84ec7096c79c9 | [
"BSD-3-Clause"
] | null | null | null | tests/xsd/schema_test.py | jocassid/soapfish | 7926335089ec286d3f4f491f72d84ec7096c79c9 | [
"BSD-3-Clause"
] | 1 | 2018-02-19T22:40:44.000Z | 2018-02-19T22:40:44.000Z | tests/xsd/schema_test.py | jocassid/soapfish | 7926335089ec286d3f4f491f72d84ec7096c79c9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from pythonic_testcase import PythonicTestCase, assert_equals, assert_none
from soapfish import xsd
class SchemaTest(PythonicTestCase):
def test_can_lookup_element_by_name(self):
ns = 'http://soap.example/schema.xsd'
class CodeType(xsd.String):
pattern = r'[0-9]{5}'
schema = xsd.Schema(ns,
location=ns,
elementFormDefault=xsd.ElementFormDefault.QUALIFIED,
simpleTypes=[CodeType],
elements={'code': xsd.Element(CodeType)}
)
schema_element = schema.get_element_by_name('code')
assert_equals(CodeType, schema_element._passed_type)
assert_none(schema.get_element_by_name('invalid'))
| 33.916667 | 80 | 0.593366 |
0de3c5cf810fddce635b1baa4b46cb990c9552e8 | 3,313 | py | Python | examples/text_frontend/test_textnorm.py | Jackwaterveg/Parakeet | e75a07076ba5766206a6cd1fb2e5f82b0ba3842c | [
"Apache-2.0"
] | 7 | 2020-11-03T10:05:20.000Z | 2022-03-29T06:37:17.000Z | examples/text_frontend/test_textnorm.py | Jackwaterveg/Parakeet | e75a07076ba5766206a6cd1fb2e5f82b0ba3842c | [
"Apache-2.0"
] | null | null | null | examples/text_frontend/test_textnorm.py | Jackwaterveg/Parakeet | e75a07076ba5766206a6cd1fb2e5f82b0ba3842c | [
"Apache-2.0"
] | 1 | 2022-02-12T08:35:35.000Z | 2022-02-12T08:35:35.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
from pathlib import Path
from parakeet.frontend.cn_normalization.text_normlization import TextNormalizer
from parakeet.utils.error_rate import char_errors
# delete english characters
# e.g. "你好aBC" -> "你 好"
def del_en_add_space(input: str):
output = re.sub('[a-zA-Z]', '', input)
output = [char + " " for char in output]
output = "".join(output).strip()
return output
def get_avg_cer(raw_dict, ref_dict, text_normalizer, output_dir):
edit_distances = []
ref_lens = []
wf_ref = open(output_dir / "text.ref.clean", "w")
wf_tn = open(output_dir / "text.tn", "w")
for text_id in raw_dict:
if text_id not in ref_dict:
continue
raw_text = raw_dict[text_id]
gt_text = ref_dict[text_id]
textnorm_text = text_normalizer.normalize_sentence(raw_text)
gt_text = del_en_add_space(gt_text)
textnorm_text = del_en_add_space(textnorm_text)
wf_ref.write(gt_text + "(" + text_id + ")" + "\n")
wf_tn.write(textnorm_text + "(" + text_id + ")" + "\n")
edit_distance, ref_len = char_errors(gt_text, textnorm_text)
edit_distances.append(edit_distance)
ref_lens.append(ref_len)
return sum(edit_distances) / sum(ref_lens)
def main():
parser = argparse.ArgumentParser(description="text normalization example.")
parser.add_argument(
"--input-dir",
default="data/textnorm",
type=str,
help="directory to preprocessed test data.")
parser.add_argument(
"--output-dir",
default="exp/textnorm",
type=str,
help="directory to save textnorm results.")
args = parser.parse_args()
input_dir = Path(args.input_dir).expanduser()
output_dir = Path(args.output_dir).expanduser()
output_dir.mkdir(parents=True, exist_ok=True)
assert input_dir.is_dir()
raw_dict, ref_dict = dict(), dict()
raw_path = input_dir / "text"
ref_path = input_dir / "text.ref"
with open(raw_path, "r") as rf:
for line in rf:
line = line.strip()
line_list = line.split(" ")
text_id, raw_text = line_list[0], " ".join(line_list[1:])
raw_dict[text_id] = raw_text
with open(ref_path, "r") as rf:
for line in rf:
line = line.strip()
line_list = line.split(" ")
text_id, normed_text = line_list[0], " ".join(line_list[1:])
ref_dict[text_id] = normed_text
text_normalizer = TextNormalizer()
avg_cer = get_avg_cer(raw_dict, ref_dict, text_normalizer, output_dir)
print("The avg CER of text normalization is:", avg_cer)
if __name__ == "__main__":
main()
| 33.806122 | 79 | 0.66073 |
a5e8effc79b2502a552f75713b5ffd7f16dfebae | 3,053 | py | Python | Lib/site-packages/tensorboard/main.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | 4 | 2020-09-02T16:13:51.000Z | 2021-06-05T08:45:59.000Z | Lib/site-packages/tensorboard/main.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2020-06-30T16:26:47.000Z | 2020-06-30T16:27:38.000Z | Lib/site-packages/tensorboard/main.py | caiyongji/tf2.3.1-py3.7.9-full-built | ace4efcbf05b2b494388739718a18c13eab83c71 | [
"CNRI-Python-GPL-Compatible"
] | 1 | 2020-06-04T23:23:09.000Z | 2020-06-04T23:23:09.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard main module.
This module ties together `tensorboard.program` and
`tensorboard.default_plugins` to provide standard TensorBoard. It's
meant to be tiny and act as little other than a config file. Those
wishing to customize the set of plugins or static assets that
TensorBoard uses can swap out this file with their own.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# TF versions prior to 1.15.0 included default GCS filesystem caching logic
# that interacted pathologically with the pattern of reads used by TensorBoard
# for logdirs. See: https://github.com/tensorflow/tensorboard/issues/1225
# The problematic behavior was fixed in 1.15.0 by
# https://github.com/tensorflow/tensorflow/commit/e43b94649d3e1ac5d538e4eca9166b899511d681
# but for older versions of TF, we avoid a regression by setting this env var to
# disable the cache, which must be done before the first import of tensorflow.
os.environ["GCS_READ_CACHE_DISABLED"] = "1"
import sys
from tensorboard import default
from tensorboard import program
from tensorboard.compat import tf
from tensorboard.plugins import base_plugin
from tensorboard.uploader import uploader_subcommand
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
def run_main():
"""Initializes flags and calls main()."""
program.setup_environment()
if getattr(tf, "__version__", "stub") == "stub":
print(
"TensorFlow installation not found - running with reduced feature set.",
file=sys.stderr,
)
tensorboard = program.TensorBoard(
default.get_plugins(),
program.get_default_assets_zip_provider(),
subcommands=[uploader_subcommand.UploaderSubcommand()],
)
try:
from absl import app
# Import this to check that app.run() will accept the flags_parser argument.
from absl.flags import argparse_flags
app.run(tensorboard.main, flags_parser=tensorboard.configure)
raise AssertionError("absl.app.run() shouldn't return")
except ImportError:
pass
except base_plugin.FlagsError as e:
print("Error: %s" % e, file=sys.stderr)
sys.exit(1)
tensorboard.configure(sys.argv)
sys.exit(tensorboard.main())
if __name__ == "__main__":
run_main()
| 34.303371 | 90 | 0.724533 |
4d70ede494c9d4f9141f09eb520e3ddb1ceea916 | 15,595 | py | Python | ironic_neutron_plugin/drivers/cisco/driver.py | rackerlabs/ironic-neutron-plugin | 7b3e19840048bc49d846362b84973c2f2b03b05e | [
"Apache-2.0"
] | 10 | 2015-01-21T22:04:40.000Z | 2017-06-29T06:55:45.000Z | ironic_neutron_plugin/drivers/cisco/driver.py | rackerlabs/ironic-neutron-plugin | 7b3e19840048bc49d846362b84973c2f2b03b05e | [
"Apache-2.0"
] | null | null | null | ironic_neutron_plugin/drivers/cisco/driver.py | rackerlabs/ironic-neutron-plugin | 7b3e19840048bc49d846362b84973c2f2b03b05e | [
"Apache-2.0"
] | 8 | 2015-01-30T16:40:30.000Z | 2020-07-23T06:06:53.000Z | # Copyright (c) 2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implements a Nexus-OS NETCONF over SSHv2 API Client.
This is lifted partially from the cisco ml2 mechanism.
"""
from ironic_neutron_plugin import config
import eventlet
from neutron.openstack.common import importutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from ironic_neutron_plugin.drivers import base as base_driver
from ironic_neutron_plugin.drivers.cisco import commands
from ironic_neutron_plugin.drivers.cisco import utils as cisco_utils
import time
LOG = logging.getLogger(__name__)
RETRYABLE_ERRORS = ['authorization failed',
'permission denied',
'not connected to netconf server']
class CiscoException(base_driver.DriverException):
pass
class CiscoDriver(base_driver.Driver):
def __init__(self, dry_run=None,
save_queue_max_age=None,
save_queue_get_wait=None):
self._config = config.cfg.CONF.ironic
self.connections = {}
self.ncclient = None
self.dry_run = dry_run
if dry_run is None:
self.dry_run = self._config.dry_run
self._save_queue_max_age = save_queue_max_age
if self._save_queue_max_age is None:
self._save_queue_max_age = self._config.save_queue_max_age
self._save_queue_get_wait = save_queue_get_wait
if self._save_queue_get_wait is None:
self._save_queue_get_wait = self._config.save_queue_get_wait
self._save_queue = eventlet.queue.Queue(maxsize=50)
eventlet.spawn(self._process_save_queue)
eventlet.sleep(0)
def _process_save_queue(self):
while True:
start_time = time.time()
elapsed = lambda: time.time() - start_time
# poll save queue
save_queue = {}
LOG.debug('Polling save queue.')
while not save_queue or elapsed() < self._save_queue_max_age:
try:
port = self._save_queue.get(
timeout=self._save_queue_get_wait)
save_queue[port[0].switch_host] = port
LOG.debug(('Queued config save on %s.' %
(port[0].switch_host)))
except eventlet.queue.Empty:
if not save_queue:
start_time = time.time()
# process save queue
LOG.info('Running config save on %s queued items.' % len(save_queue))
for port, attempt in save_queue.values():
attempt = attempt + 1
LOG.debug(('Starting config save on %s (attempt %d/3)' %
(port.switch_host, attempt)))
try:
self._save(port)
LOG.info(('Finished config save on %s (attempt %d/3)' %
(port.switch_host, attempt)))
except Exception as e:
if attempt >= 3:
LOG.error(('Failed config save on %s (attempt: %d/3) '
'Aborting, %s') % (port.switch_host, attempt, e))
else:
LOG.debug(('Failed config save on %s (attempt: %d/3) '
'Retrying, %s') % (port.switch_host, attempt, e))
self._save_queue.put((port, attempt))
eventlet.sleep(0) # yield after each save
def _save(self, port):
cmds = commands.copy_running_config()
self._run_commands(port, cmds)
def save(self, port, async=True):
if async:
try:
LOG.info('Queuing config save on %s' % (port.switch_host))
self._save_queue.put((port, 0), block=False)
except eventlet.queue.Full:
LOG.error('Failed config save on %s, queue is full.' %
(port.switch_host))
else:
self._save(port)
def show_interface(self, port, type="ethernet"):
LOG.debug("Fetching interface %s %s" % (type, port.interface))
cmds = commands.show_interface(type, port.interface)
result = self._run_commands(port, cmds)
return cisco_utils.parse_interface_status(result)
def show_interface_configuration(self, port, type="ethernet"):
LOG.debug("Fetching interface %s %s" % (type, port.interface))
cmds = commands.show_interface_configuration(type, port.interface)
result = self._run_commands(port, cmds)
return cisco_utils.parse_command_result(result)
def show_dhcp_snooping_configuration(self, port):
LOG.debug("Fetching dhcp snooping entries for int %s" % port.interface)
po_int = commands._make_portchannel_interface(port.interface)
cmds = commands.show_dhcp_snooping_configuration(po_int)
result = self._run_commands(port, cmds)
return cisco_utils.parse_command_result(result)
def _clear(self, port):
"""Remove all configuration for a given interface, which includes
the ethernet interface, related port-channel, and any dhcp snooping
bindings or other port security features.
"""
LOG.debug("clearing interface %s" % (port.interface))
interface = port.interface
po_int = commands._make_portchannel_interface(interface)
eth_int = commands._make_ethernet_interface(interface)
# get and filter relevant dhcp snooping bindings
dhcp_conf = self.show_dhcp_snooping_configuration(port)
dhcp_conf = [cisco_utils.negate_conf(c) for c in dhcp_conf]
# we need to configure the portchannel because there is no
# guarantee that it exists, and you cannot remove snooping
# bindings without the actual interface existing.
cmds = []
if dhcp_conf:
cmds = cmds + commands._configure_interface('port-channel', po_int)
cmds = cmds + dhcp_conf
# for some reason authentication errors happen apparently randomly when
# running commands. All other port creation commands are safe to run
# twice during retry except for removing the dhcp binding, which fails
# with 'ERROR: Entry does not exist'
if cmds:
self._run_commands(port, cmds)
# delete the portchannel and default the eth interface
cmds = commands._delete_port_channel_interface(po_int)
cmds = cmds + commands._delete_ethernet_interface(eth_int)
return self._run_commands(port, cmds)
def create(self, port):
self._clear(port)
LOG.debug("Creating port %s for hardware_id %s"
% (port.interface, port.hardware_id))
LOG.debug("Attaching vlan %s to interface %s"
% (port.vlan_id, port.interface))
cmds = commands.create_port(
hardware_id=port.hardware_id,
interface=port.interface,
vlan_id=port.vlan_id,
ip=port.ip,
mac_address=port.mac_address,
trunked=port.trunked)
res = self._run_commands(port, cmds)
# for some reason authentication errors happen apparently randomly when
# running commands. All other port creation commnads are safe to run
# twice during retry except for adding the vpc to the port-channel, as
# it fails with 'ERROR: Operation failed: [vPC already exists]'
if port.trunked:
interface = port.interface
po_int = commands._make_portchannel_interface(interface)
cmds = commands._configure_interface('port-channel', po_int)
cmds = cmds + commands._add_vpc(po_int)
res = self._run_commands(port, cmds)
self.save(port)
return res
def delete(self, port):
LOG.debug("Deleting port %s for hardware_id %s"
% (port.interface, port.hardware_id))
res = self._clear(port)
self.save(port)
return res
def attach(self, port):
LOG.debug("Attaching vlan %s to interface %s"
% (port.vlan_id, port.interface))
cmds = commands.add_vlan(
interface=port.interface,
vlan_id=port.vlan_id,
ip=port.ip,
mac_address=port.mac_address,
trunked=port.trunked)
res = self._run_commands(port, cmds)
self.save(port)
return res
def detach(self, port):
LOG.debug("Detaching vlan %s from interface %s"
% (port.vlan_id, port.interface))
cmds = commands.remove_vlan(
interface=port.interface,
vlan_id=port.vlan_id,
ip=port.ip,
mac_address=port.mac_address,
trunked=port.trunked)
self._run_commands(port, cmds)
# TODO(morgbara) this is not ideal, but we don't want
# to fail an vlan removal if the ip binding doesn't exist,
# and there really isn't a way to do this safely without
# checking for it (which takes time). This will be a little
# better when we differenciate between types of failures when
# talking to a switch better.
cmds = commands.unbind_ip(
interface=port.interface,
vlan_id=port.vlan_id,
ip=port.ip,
mac_address=port.mac_address,
trunked=port.trunked
)
try:
res = self._run_commands(port, cmds)
except CiscoException as e:
LOG.info("Failed to remove ip binding: %s" % str(e))
res = None
self.save(port)
return res
def running_config(self, port):
LOG.debug("Fetching running-config %s" % (port.interface))
switch = {
"interface": port.interface,
"hostname": port.switch_host
}
running_config = {}
running_config['dhcp'] = self.show_dhcp_snooping_configuration(port)
running_config['ethernet'] = self.show_interface_configuration(
port, type="ethernet")
# a port-channel might not be defined
try:
running_config['port-channel'] = self.show_interface_configuration(
port, type="port-channel")
except CiscoException as e:
if ('syntax error' in str(e).lower()):
running_config['port-channel'] = ['no port-channel']
else:
raise e
return {
"switch": switch,
"running-config": running_config
}
def interface_status(self, port):
LOG.debug("Fetching interface status %s" % (port.interface))
switch = {
"interface": port.interface,
"hostname": port.switch_host
}
status = {}
status['ethernet'] = self.show_interface(
port, type="ethernet")
# a port-channel might not be defined
try:
status['port-channel'] = self.show_interface(
port, type="port-channel")
except CiscoException as e:
if ('syntax error' in str(e).lower()):
status['port-channel'] = ['no port-channel']
else:
raise e
return {
"switch": switch,
"interface-status": status
}
def _import_ncclient(self):
"""Import the NETCONF client (ncclient) module.
The ncclient module is not installed as part of the normal Neutron
distributions. It is imported dynamically in this module so that
the import can be mocked, allowing unit testing without requiring
the installation of ncclient.
"""
return importutils.import_module('ncclient.manager')
def _connect(self, port):
c = self.connections.get(port.switch_host)
# TODO(morgabra) connected is updated from a thread, so obviously
# there are some issues with checking this here.
if not c or not c.connected:
LOG.debug("starting session: %s@%s" % (port.switch_username,
port.switch_host))
connect_args = {
"host": port.switch_host,
"port": 22, # TODO(morgabra) configurable
"username": port.switch_username,
"password": port.switch_password,
"timeout": 10 # TOOD(morgabra) configurable
}
c = self.ncclient.connect(**connect_args)
self.connections[port.switch_host] = c
LOG.debug("got session: %s@%s id:%s" % (port.switch_username,
port.switch_host,
c.session_id))
return c
def _retryable_error(self, err, retryable=RETRYABLE_ERRORS):
err = str(err).lower()
for retry_err in retryable:
if retry_err in err:
return True
return False
def _run_commands_inner(self, port, commands):
if not commands:
LOG.debug("No commands to run - %(switch)s %(interface)s" %
(port.switch_host, port.interface))
return
LOG.debug("executing commands - %s %s: %s" %
(port.switch_host, port.interface, commands))
if self.dry_run:
LOG.debug("Dry run is enabled - skipping")
return None
if not self.ncclient:
self.ncclient = self._import_ncclient()
c = None
try:
c = self._connect(port)
return c.command(commands)
except Exception as e:
LOG.debug("Failed running commands - %s %s: %s" %
(port.switch_host, port.interface, e))
if c:
self.connections[port.switch_host] = None
try:
c.close_session()
except Exception as err:
LOG.debug("Failed closing session %(sess)s: %(e)s",
{'sess': c.session_id, 'e': err})
raise CiscoException(e)
def _run_commands(self, port, commands):
num_tries = 0
max_tries = 1 + self._config.auth_failure_retries
sleep_time = self._config.auth_failure_retry_interval
while True:
num_tries += 1
try:
# we must lock during switch communication here because we run
# the save commands in a separate greenthread.
with lockutils.lock('CiscoDriver-%s' % (port.switch_host),
lock_file_prefix='neutron-'):
return self._run_commands_inner(port, commands)
except CiscoException as err:
if (num_tries == max_tries or not self._retryable_error(err)):
raise
LOG.warning("Received retryable failure: %s" % err)
time.sleep(sleep_time)
| 36.099537 | 84 | 0.58942 |
e1e91e2707eeeecbd5b67fe5ea28d8fc076a95d2 | 2,171 | py | Python | timemory/api/gperftools/__init__.py | tkonolige/timemory | 1662925a3a7b6dfb1b7bba08ebd366d3fe38194a | [
"MIT"
] | 284 | 2019-08-06T17:41:39.000Z | 2022-03-25T23:37:47.000Z | timemory/api/gperftools/__init__.py | tkonolige/timemory | 1662925a3a7b6dfb1b7bba08ebd366d3fe38194a | [
"MIT"
] | 123 | 2019-08-06T03:09:54.000Z | 2022-02-26T01:51:36.000Z | timemory/api/gperftools/__init__.py | tkonolige/timemory | 1662925a3a7b6dfb1b7bba08ebd366d3fe38194a | [
"MIT"
] | 26 | 2019-12-06T22:21:09.000Z | 2022-03-23T09:34:29.000Z | #!/usr/bin/env python
# MIT License
#
# Copyright (c) 2020, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of any
# required approvals from the U.S. Dept. of Energy). All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" @file timemory/gperftools/__init__.py
Plotting routines for gperf
"""
from __future__ import absolute_import
from __future__ import division
import sys
import warnings
__author__ = "Jonathan Madsen"
__copyright__ = "Copyright 2020, The Regents of the University of California"
__credits__ = ["Jonathan Madsen"]
__license__ = "MIT"
__version__ = "@PROJECT_VERSION@"
__maintainer__ = "Jonathan Madsen"
__email__ = "jrmadsen@lbl.gov"
__status__ = "Development"
try:
from .utils import * # noqa: F401
from . import utils as utils # noqa: F401
from . import cpu_profiler as cpu_profiler # noqa: F401
from . import heap_profiler as heap_profiler # noqa: F401
# __all__ = ['utils', 'cpu_profiler', 'heap_profiler']
except Exception as e:
sys.stderr.write("{}\n".format(e))
warnings.warn("timemory.gperftools is disabled")
| 38.087719 | 80 | 0.759097 |
ff455e0dbeddc2ae2add43f51286f663357aa2d5 | 73 | py | Python | ffmpeg_progress_yield/__init__.py | slhck/ffmpeg-progress | dcde02184ca2faf63a24eafd87a8f0845f037dfc | [
"MIT"
] | 13 | 2021-03-01T13:53:44.000Z | 2022-02-17T08:09:01.000Z | ffmpeg_progress_yield/__init__.py | slhck/ffmpeg-progress | dcde02184ca2faf63a24eafd87a8f0845f037dfc | [
"MIT"
] | 2 | 2021-06-30T08:52:40.000Z | 2021-07-01T11:38:03.000Z | ffmpeg_progress_yield/__init__.py | slhck/ffmpeg-progress | dcde02184ca2faf63a24eafd87a8f0845f037dfc | [
"MIT"
] | 2 | 2021-06-27T19:19:51.000Z | 2021-11-02T09:25:01.000Z | from .ffmpeg_progress_yield import FfmpegProgress
__version__ = "0.2.0"
| 18.25 | 49 | 0.808219 |
1dccfe447438d882af6240a947f81f8bc0432f75 | 848 | py | Python | Utils/Classes/discordleveldisabledchannel.py | The-CJ/Phaazebot | 83a9563d210718071d4e2cdcca3b212c87abaf51 | [
"MIT"
] | 2 | 2017-09-14T08:07:55.000Z | 2021-05-18T05:05:05.000Z | Utils/Classes/discordleveldisabledchannel.py | The-CJ/Phaazebot | 83a9563d210718071d4e2cdcca3b212c87abaf51 | [
"MIT"
] | 111 | 2018-04-15T14:32:14.000Z | 2021-03-28T21:06:29.000Z | Utils/Classes/discordleveldisabledchannel.py | The-CJ/Phaazebot | 83a9563d210718071d4e2cdcca3b212c87abaf51 | [
"MIT"
] | 1 | 2018-04-15T13:24:44.000Z | 2018-04-15T13:24:44.000Z | from Utils.Classes.undefined import UNDEFINED
from Utils.Classes.contentclass import ContentClass
class DiscordLevelDisabledChannel(ContentClass):
"""
Contains and represents stuff for a level disabled discord channel
"""
def __repr__(self):
return f"<{self.__class__.__name__} server='{self.guild_id}' channel_id='{self.channel_id}'>"
def __init__(self, data:dict):
# key
self.entry_id:int = self.asInteger(data.get("id", UNDEFINED))
self.guild_id:str = self.asString(data.get("guild_id", UNDEFINED))
# vars
self.channel_id:str = self.asString(data.get("channel_id", UNDEFINED))
def toJSON(self) -> dict:
""" Returns a json save dict representation of all values for API, storage, etc... """
j:dict = dict()
j["entry_id"] = self.asString(self.entry_id)
j["channel_id"] = self.asString(self.channel_id)
return j
| 29.241379 | 95 | 0.728774 |
7f5bd36868d5848eabebb687403a70c00767150b | 2,589 | py | Python | .code_snippets/custom_components/buspro.py | neroxps/pybuspro | 47a0a3a7db4e56dea49f0e7aaf46d3fafebf228c | [
"MIT"
] | 2 | 2019-03-15T03:47:10.000Z | 2019-10-30T15:34:09.000Z | .code_snippets/custom_components/buspro.py | neroxps/pybuspro | 47a0a3a7db4e56dea49f0e7aaf46d3fafebf228c | [
"MIT"
] | null | null | null | .code_snippets/custom_components/buspro.py | neroxps/pybuspro | 47a0a3a7db4e56dea49f0e7aaf46d3fafebf228c | [
"MIT"
] | 4 | 2019-01-12T17:50:24.000Z | 2020-01-12T16:56:24.000Z | """
Support for Buspro devices.
For more details about this component, please refer to the documentation at
https://home-assistant.io/...
"""
from uuid import uuid4
import asyncio
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (CONF_HOST, CONF_PORT, CONF_NAME)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.discovery import load_platform
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'buspro'
DEPENDENCIES = []
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Optional(CONF_NAME, default=''): cv.string
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Setup the Buspro component. """
_LOGGER.info("STARTING...")
host = config[DOMAIN][CONF_HOST]
port = config[DOMAIN][CONF_PORT]
name = config[DOMAIN][CONF_NAME]
controller = Hdlbus('192.168.1.15', 6000, hass.loop)
hass.data[DOMAIN] = controller
result = await controller.connect()
if not result:
return False
async def _close():
controller.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close())
_LOGGER.info("CONNECTED...")
#load_platform(hass, 'light', DOMAIN, {'optional': 'arguments'})
#load_platform(hass, 'light', DOMAIN, busprodevice, config)
# Added via configuration.yaml light:
#load_platform(hass, 'light', DOMAIN)
load_platform(hass, 'sensor', DOMAIN)
_LOGGER.info(f"Listening on {host}:{port} with alias '{name}'")
# hass.async_create_task(discovery.async_load_platform(
# hass, 'light', DOMAIN, {'gateway': gateway_id}, hass_config))
# hass.async_create_task(discovery.async_load_platform(
# hass, 'sensor', DOMAIN, {'gateway': gateway_id}, hass_config))
return True
class Hdlbus:
def __init__(self, ip, port, loop=None):
self._value = "_value_"
@property
def value(self):
return self._value
async def connect(self):
return True
def close(self):
return True
async def observe(self, callback=None, err_callback=None, duration=0):
pass
| 20.712 | 75 | 0.651217 |
51b6eff455dc3df50c4a1e2d789191f40d67558a | 5,430 | py | Python | netspot/lib/spotmax/net_collector.py | MaxIV-KitsControls/netspot | 42f505d004bcadcfb32b6ca0511572d38641c23a | [
"MIT"
] | null | null | null | netspot/lib/spotmax/net_collector.py | MaxIV-KitsControls/netspot | 42f505d004bcadcfb32b6ca0511572d38641c23a | [
"MIT"
] | null | null | null | netspot/lib/spotmax/net_collector.py | MaxIV-KitsControls/netspot | 42f505d004bcadcfb32b6ca0511572d38641c23a | [
"MIT"
] | null | null | null | #!/usr/bin/python -W ignore -tt
"""Module to collect data from network devices."""
from collections import defaultdict
from datetime import datetime
import sys
import re
import warnings
import ipaddress
import helpers
import netspot
import netspot_settings
from napalm import get_network_driver
from spotmax import SpotMAX
# JUNOS Ethernet swtich table RE
RE_VLAN = r'\s+([\w\d-]+)\s+'
RE_MAC = r'\s?([*\w\d:]+)\s+'
RE_TYPE = r'\s?([\w]+) '
RE_AGE = r'\s+([-\d:]+)'
RE_INTERFACE = r'\s+([-.\w\d/]+)'
RE_SWITCHING_TABLE = RE_VLAN + RE_MAC + RE_TYPE + RE_AGE + RE_INTERFACE
class NetCollector(object):
"""NetCollector class."""
def __init__(self, hostname, username, password, ssh_keyfile=None):
self.mac_arp_table = defaultdict()
self.hostname = hostname
# Check if hostname is IP address instead of hostname
try:
self.loopback_ip = str(ipaddress.ip_address(unicode(hostname)))
# Hostname is given as IP address - need to find asset name
inventory = netspot.NetSPOT()
assets = inventory.search(hostname, key='loopback')
for asset in assets:
if hostname == asset['loopback']:
self.hostname = asset['asset']
except ValueError:
# Resolve hostname
try:
self.loopback_ip = helpers.resolv(hostname)[0]
except helpers.CouldNotResolv:
self.loopback_ip = None
sys.exit('Could not resolv hostname: %s' % hostname)
self.device_macs = {'asset': self.hostname,
'macs': []}
# Connect and get data from device
driver = get_network_driver('junos')
if ssh_keyfile:
device = driver(self.loopback_ip, username, password, optional_args={'key_file': ssh_keyfile})
else:
device = driver(self.loopback_ip, username, password)
device.open()
# Get MAC and ARP tables
self.macs = device.get_mac_address_table()
self.arps = device.get_arp_table()
# Due to a bug with the JUNOS API some devices returns 0 MACs
if len(self.macs) == 0:
self._get_mac_table(device)
# Close device connection
device.close()
# Analyze collected data
self.analyze_data()
def analyze_data(self):
"""Run methods to generate a common MAC-ARP table."""
self._extract_arp()
self._extract_macs()
def _extract_arp(self):
"""Extract ARPs and creates ARP entries."""
for arp in self.arps:
arp_entry = {'interface': arp['interface'],
'mac': arp['mac'],
'last_move': None,
'moves': None,
'vlan': None,
'static': None,
'ip': arp['ip']}
self.mac_arp_table[arp['mac']] = arp_entry
def _extract_macs(self):
"""Extract MAC addresses and create MAC entries."""
for mac in self.macs:
# Get IP
if self.mac_arp_table.get(mac['mac']):
ip_address = self.mac_arp_table[mac['mac']]['ip']
else:
ip_address = None
# Create entry
mac_entry = {'interface': mac['interface'],
'mac': mac['mac'],
'last_move': mac['last_move'],
'moves': mac['moves'],
'vlan': mac['vlan'],
'static': mac['static'],
'ip': ip_address}
self.device_macs['macs'].append(mac_entry)
def _get_mac_table(self, device):
"""Run CLI command to get ethernet switch table.
Args:
device: NAPALM device object
"""
with warnings.catch_warnings(record=True) as warning:
warnings.filterwarnings('ignore')
macs = device.cli(['show ethernet-switching table'])
mac_entry = re.findall(RE_SWITCHING_TABLE, macs.values()[0])
mac_result = list()
if mac_entry:
for mac in mac_entry:
# Ignore '*' MAC
if mac[1] == '*':
continue
# Check if MAC is static
static = False
if mac[2] == 'Static':
static = True
mac_result.append({'interface': mac[4],
'mac': mac[1].upper(),
'vlan': mac[0],
'moves': None,
'last_move': None,
'static': static})
self.macs = mac_result
class IPUsage(SpotMAX):
"""Class that save IP usage to database."""
def __init__(self, device_macs, database=netspot_settings.DATABASE, collection=netspot_settings.COLL_IP):
SpotMAX.__init__(self, database, collection)
#super(IPUsage, self).__init__(database, collection)
self.device_macs = device_macs
def uppdate_ip(self):
"""Add or update IP address entry in database."""
# Get time and date
now = datetime.now()
for mac in self.device_macs['macs']:
if mac['ip']:
ip_address = {'date': now.strftime("%Y-%m-%d"),
'time': now.strftime("%H:%M"),
'ip': mac['ip'],
'vlan': mac['vlan'],
'mac': mac['mac'],
'asset': self.device_macs['asset'],
'interface': mac['interface']
}
if not self._exist(mac['ip'], key='ip'):
# Add asset to database
self.collection.insert_one(ip_address)
else:
update = {"$set": ip_address}
self.collection.update_one({'ip': mac['ip']}, update)
if __name__ == '__main__':
pass
| 29.037433 | 107 | 0.577532 |
4f4a229f18e25e763756e6ed9d7cced8241cf1c3 | 211 | py | Python | 101notebook/ipython-minibook/chapter2/center.py | OpenBookProjects/ipynb | 72a28109e8e30aea0b9c6713e78821e4affa2e33 | [
"MIT"
] | 6 | 2015-06-08T12:50:14.000Z | 2018-11-20T10:05:01.000Z | 101notebook/ipython-minibook/chapter2/center.py | OpenBookProjects/ipynb | 72a28109e8e30aea0b9c6713e78821e4affa2e33 | [
"MIT"
] | 15 | 2021-09-12T15:06:13.000Z | 2022-03-31T19:02:08.000Z | 101notebook/ipython-minibook/chapter2/center.py | OpenBookProjects/ipynb | 72a28109e8e30aea0b9c6713e78821e4affa2e33 | [
"MIT"
] | 8 | 2016-01-26T14:12:50.000Z | 2021-02-20T14:24:09.000Z | import networkx as nx
g = nx.read_edgelist('data/facebook/0.edges')
sg = nx.connected_component_subgraphs(g)[0]
center = [node for node in sg.nodes() if nx.eccentricity(sg, node) == nx.radius(sg)]
print(center)
| 35.166667 | 84 | 0.739336 |
5bb64b606b542710d8cdacefd96ceb252274597e | 21,751 | py | Python | tests/tests.py | tylergannon/snakemake | a10a16a5a65e73b1cc7255eedabef8cc31191163 | [
"MIT"
] | 2 | 2020-02-21T08:58:16.000Z | 2020-02-21T08:58:30.000Z | tests/tests.py | xuexiaohua-bio/snakemake | 14f3c5f56ae35bbcc0b1536bacdec34851a7ad8d | [
"MIT"
] | null | null | null | tests/tests.py | xuexiaohua-bio/snakemake | 14f3c5f56ae35bbcc0b1536bacdec34851a7ad8d | [
"MIT"
] | null | null | null | __authors__ = ["Tobias Marschall", "Marcel Martin", "Johannes Köster"]
__copyright__ = "Copyright 2015-2019, Johannes Köster"
__email__ = "koester@jimmy.harvard.edu"
__license__ = "MIT"
import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
from common import *
def test_list_untracked():
run(dpath("test_list_untracked"))
def test_delete_all_output():
run(dpath("test_delete_all_output"))
def test_github_issue_14():
"""Add cleanup_scripts argument to allow the user to keep scripts"""
# Return temporary directory for inspection - we should keep scripts here
tmpdir = run(dpath("test_github_issue_14"), cleanup=False, cleanup_scripts=False)
assert os.listdir(os.path.join(tmpdir, ".snakemake", "scripts"))
shutil.rmtree(tmpdir)
# And not here
tmpdir = run(dpath("test_github_issue_14"), cleanup=False)
assert not os.listdir(os.path.join(tmpdir, ".snakemake", "scripts"))
shutil.rmtree(tmpdir)
def test_issue956():
run(dpath("test_issue956"))
def test01():
run(dpath("test01"))
def test02():
run(dpath("test02"))
def test03():
run(dpath("test03"), targets=["test.out"])
def test04():
run(dpath("test04"), targets=["test.out"])
def test05():
run(dpath("test05"))
def test06():
run(dpath("test06"), targets=["test.bla.out"])
def test07():
run(dpath("test07"), targets=["test.out", "test2.out"])
def test08():
run(dpath("test08"), targets=["test.out", "test2.out"])
def test09():
run(dpath("test09"), shouldfail=True)
def test10():
run(dpath("test10"))
def test11():
run(dpath("test11"))
def test12():
run(dpath("test12"))
def test13():
run(dpath("test13"))
def test14():
run(dpath("test14"), snakefile="Snakefile.nonstandard", cluster="./qsub")
def test15():
run(dpath("test15"))
def test_directory():
run(
dpath("test_directory"),
targets=[
"downstream",
"symlinked_input",
"child_to_input",
"some/dir-child",
"some/shadow",
],
)
run(dpath("test_directory"), targets=["file_expecting_dir"], shouldfail=True)
run(dpath("test_directory"), targets=["dir_expecting_file"], shouldfail=True)
run(dpath("test_directory"), targets=["child_to_other"], shouldfail=True)
def test_ancient():
run(dpath("test_ancient"), targets=["D", "old_file"])
def test_report():
run(dpath("test_report"), report="report.html", check_md5=False)
def test_dynamic():
run(dpath("test_dynamic"))
def test_params():
run(dpath("test_params"))
def test_same_wildcard():
run(dpath("test_same_wildcard"))
def test_conditional():
run(
dpath("test_conditional"),
targets="test.out test.0.out test.1.out test.2.out".split(),
)
def test_unpack_dict():
run(dpath("test_unpack_dict"))
def test_unpack_list():
run(dpath("test_unpack_list"))
def test_shell():
run(dpath("test_shell"))
def test_temp():
run(dpath("test_temp"), cluster="./qsub", targets="test.realigned.bam".split())
def test_keyword_list():
run(dpath("test_keyword_list"))
def test_subworkflows():
run(dpath("test_subworkflows"), subpath=dpath("test02"))
def test_globwildcards():
run(dpath("test_globwildcards"))
def test_local_import():
run(dpath("test_local_import"))
def test_ruledeps():
run(dpath("test_ruledeps"))
def test_persistent_dict():
try:
import pytools
run(dpath("test_persistent_dict"))
except ImportError:
pass
@connected
def test_url_include():
run(dpath("test_url_include"))
def test_touch():
run(dpath("test_touch"))
def test_config():
run(dpath("test_config"))
def test_update_config():
run(dpath("test_update_config"))
def test_wildcard_keyword():
run(dpath("test_wildcard_keyword"))
def test_benchmark():
run(dpath("test_benchmark"), check_md5=False)
def test_temp_expand():
run(dpath("test_temp_expand"))
def test_wildcard_count_ambiguity():
run(dpath("test_wildcard_count_ambiguity"))
def test_srcdir():
run(dpath("test_srcdir"))
def test_multiple_includes():
run(dpath("test_multiple_includes"))
def test_yaml_config():
run(dpath("test_yaml_config"))
def test_remote():
run(dpath("test_remote"), cores=1)
def test_cluster_sync():
run(dpath("test14"), snakefile="Snakefile.nonstandard", cluster_sync="./qsub")
@pytest.mark.skip(reason="This does not work reliably in CircleCI.")
def test_symlink_temp():
run(dpath("test_symlink_temp"), shouldfail=True)
def test_empty_include():
run(dpath("test_empty_include"))
def test_script():
run(dpath("test_script"), use_conda=True)
def test_shadow():
run(dpath("test_shadow"))
def test_shadow_prefix():
run(dpath("test_shadow_prefix"), shadow_prefix="shadowdir")
run(dpath("test_shadow_prefix"), shadow_prefix="shadowdir", cluster="./qsub")
def test_until():
run(
dpath("test_until"),
until=[
"leveltwo_first", # rule name
"leveltwo_second.txt", # file name
"second_wildcard",
],
) # wildcard rule
def test_omitfrom():
run(
dpath("test_omitfrom"),
omit_from=[
"leveltwo_first", # rule name
"leveltwo_second.txt", # file name
"second_wildcard",
],
) # wildcard rule
def test_nonstr_params():
run(dpath("test_nonstr_params"))
def test_delete_output():
run(dpath("test_delete_output"), cores=1)
def test_input_generator():
run(dpath("test_input_generator"))
def test_symlink_time_handling():
# See Snakefile for notes on why this fails on some systems
if os.utime in os.supports_follow_symlinks:
run(dpath("test_symlink_time_handling"))
def test_protected_symlink_output():
run(dpath("test_protected_symlink_output"))
def test_issue328():
try:
import pytools
run(dpath("test_issue328"), forcerun=["split"])
except ImportError:
# skip test if import fails
pass
def test_conda():
if conda_available():
run(dpath("test_conda"), use_conda=True)
def test_conda_custom_prefix():
if conda_available():
run(
dpath("test_conda_custom_prefix"),
use_conda=True,
conda_prefix="custom",
set_pythonpath=False,
)
def test_wrapper():
if conda_available():
run(dpath("test_wrapper"), use_conda=True)
def conda_available():
return which("conda")
def test_get_log_none():
run(dpath("test_get_log_none"))
def test_get_log_both():
run(dpath("test_get_log_both"))
def test_get_log_stderr():
run(dpath("test_get_log_stderr"))
def test_get_log_stdout():
run(dpath("test_get_log_stdout"))
def test_get_log_complex():
run(dpath("test_get_log_complex"))
def test_spaces_in_fnames():
run(
dpath("test_spaces_in_fnames"),
# cluster="./qsub",
targets=["test bam file realigned.bam"],
printshellcmds=True,
)
# TODO deactivate because of problems with moto and boto3.
# def test_static_remote():
# import importlib
# try:
# importlib.reload(boto3)
# importlib.reload(moto)
# # only run the remote file test if the dependencies
# # are installed, otherwise do nothing
# run(dpath("test_static_remote"), cores=1)
# except ImportError:
# pass
@connected
def test_remote_ncbi_simple():
try:
import Bio
# only run the remote file test if the dependencies
# are installed, otherwise do nothing
run(dpath("test_remote_ncbi_simple"))
except ImportError:
pass
@connected
def test_remote_ncbi():
try:
import Bio
# only run the remote file test if the dependencies
# are installed, otherwise do nothing
run(dpath("test_remote_ncbi"))
except ImportError:
pass
@ci
def test_remote_irods():
run(dpath("test_remote_irods"))
def test_deferred_func_eval():
run(dpath("test_deferred_func_eval"))
def test_format_params():
run(dpath("test_format_params"), check_md5=True)
def test_rule_defined_in_for_loop():
# issue 257
run(dpath("test_rule_defined_in_for_loop"))
def test_issue381():
run(dpath("test_issue381"))
def test_format_wildcards():
run(dpath("test_format_wildcards"))
def test_with_parentheses():
run(dpath("test (with parenthese's)"))
def test_dup_out_patterns():
"""Duplicate output patterns should emit an error
Duplicate output patterns can be detected on the rule level
"""
run(dpath("test_dup_out_patterns"), shouldfail=True)
def test_restartable_job_cmd_exit_1_no_restart():
"""Test the restartable job feature on ``exit 1``
The shell snippet in the Snakemake file will fail the first time
and succeed the second time.
"""
run(
dpath("test_restartable_job_cmd_exit_1"),
cluster="./qsub",
restart_times=0,
shouldfail=True,
)
def test_restartable_job_cmd_exit_1_one_restart():
# Restarting once is enough
run(
dpath("test_restartable_job_cmd_exit_1"),
cluster="./qsub",
restart_times=1,
printshellcmds=True,
)
def test_restartable_job_qsub_exit_1():
"""Test the restartable job feature when qsub fails
The qsub in the sub directory will fail the first time and succeed the
second time.
"""
# Even two consecutive times should fail as files are cleared
run(
dpath("test_restartable_job_qsub_exit_1"),
cluster="./qsub",
restart_times=0,
shouldfail=True,
)
run(
dpath("test_restartable_job_qsub_exit_1"),
cluster="./qsub",
restart_times=0,
shouldfail=True,
)
# Restarting once is enough
run(
dpath("test_restartable_job_qsub_exit_1"),
cluster="./qsub",
restart_times=1,
shouldfail=False,
)
def test_threads():
run(dpath("test_threads"), cores=20)
def test_threads0():
run(dpath("test_threads0"))
def test_dynamic_temp():
run(dpath("test_dynamic_temp"))
# TODO this currently hangs. Has to be investigated (issue #660).
# def test_ftp_immediate_close():
# try:
# import ftputil
#
# # only run the remote file test if the dependencies
# # are installed, otherwise do nothing
# run(dpath("test_ftp_immediate_close"))
# except ImportError:
# pass
def test_issue260():
run(dpath("test_issue260"))
@not_ci
def test_default_remote():
run(
dpath("test_default_remote"),
cores=1,
default_remote_provider="S3Mocked",
default_remote_prefix="test-remote-bucket",
)
def test_run_namedlist():
run(dpath("test_run_namedlist"))
@connected
@not_ci
def test_remote_gs():
run(dpath("test_remote_gs"))
@pytest.mark.skip(reason="Need to choose how to provide billable project")
@connected
@not_ci
def test_gs_requester_pays(
requesting_project=None,
requesting_url="gcp-public-data-landsat/LC08/01/001/003/LC08_L1GT_001003_20170430_20170501_01_RT/LC08_L1GT_001003_20170430_20170501_01_RT_MTL.txt",
):
""" Tests pull-request 79 / issue 96 for billable user projects on GS
If requesting_project None, behaves as test_remote_gs().
Parameters
----------
requesting_project: Optional[str]
User project to bill for download. None will not provide project for
requester-pays as is the usual default
requesting_url: str
URL of bucket to download. Default will match expected output, but is a
bucket that doesn't require requester pays.
"""
# create temporary config file
with tempfile.NamedTemporaryFile(suffix=".yaml") as handle:
# specify project and url for download
if requesting_project is None:
handle.write(b"project: null\n")
else:
handle.write('project: "{}"\n'.format(requesting_project).encode())
handle.write('url: "{}"\n'.format(requesting_url).encode())
# make sure we can read them
handle.flush()
# run the pipeline
run(dpath("test_gs_requester_pays"), configfiles=[handle.name], forceall=True)
@pytest.mark.skip(reason="We need free azure access to test this in CircleCI.")
@connected
@ci
def test_remote_azure():
run(dpath("test_remote_azure"))
def test_remote_log():
run(dpath("test_remote_log"), shouldfail=True)
@connected
@pytest.mark.xfail
def test_remote_http():
run(dpath("test_remote_http"))
@connected
@pytest.mark.xfail
def test_remote_http_cluster():
run(dpath("test_remote_http"), cluster=os.path.abspath(dpath("test14/qsub")))
def test_profile():
run(dpath("test_profile"))
@connected
def test_singularity():
run(dpath("test_singularity"), use_singularity=True)
def test_singularity_invalid():
run(
dpath("test_singularity"),
targets=["invalid.txt"],
use_singularity=True,
shouldfail=True,
)
@connected
def test_singularity_conda():
run(dpath("test_singularity_conda"), use_singularity=True, use_conda=True)
def test_issue612():
run(dpath("test_issue612"), dryrun=True)
def test_bash():
run(dpath("test_bash"))
def test_inoutput_is_path():
run(dpath("test_inoutput_is_path"))
def test_archive():
run(dpath("test_archive"), archive="workflow-archive.tar.gz")
def test_log_input():
run(dpath("test_log_input"))
@pytest.fixture(scope="module")
def gcloud_cluster():
class Cluster:
def __init__(self):
self.cluster = os.environ["GCLOUD_CLUSTER"]
self.bucket_name = "snakemake-testing-{}".format(self.cluster)
shell(
"""
$GCLOUD container clusters create {self.cluster} --num-nodes 3 --scopes storage-rw --zone us-central1-a --machine-type f1-micro
$GCLOUD container clusters get-credentials {self.cluster} --zone us-central1-a
$GSUTIL mb gs://{self.bucket_name}
"""
)
def delete(self):
shell(
"""
$GCLOUD container clusters delete {self.cluster} --zone us-central1-a --quiet || true
$GSUTIL rm -r gs://{self.bucket_name} || true
"""
)
def run(self, test="test_kubernetes", **kwargs):
try:
run(
dpath(test),
kubernetes="default",
default_remote_provider="GS",
default_remote_prefix=self.bucket_name,
no_tmpdir=True,
**kwargs
)
except Exception as e:
shell(
"for p in `kubectl get pods | grep ^snakejob- | cut -f 1 -d ' '`; do kubectl logs $p; done"
)
raise e
def reset(self):
shell("$GSUTIL rm -r gs://{self.bucket_name}/* || true")
cluster = Cluster()
yield cluster
cluster.delete()
@gcloud
@pytest.mark.skip(
reason="reenable once we have figured out how to fail if available core hours per month are exceeded"
)
@pytest.mark.xfail
def test_gcloud_plain(gcloud_cluster):
gcloud_cluster.reset()
gcloud_cluster.run()
@gcloud
@pytest.mark.skip(reason="need a faster cloud compute instance to run this")
def test_gcloud_conda(gcloud_cluster):
gcloud_cluster.reset()
gcloud_cluster.run(use_conda=True)
@gcloud
@pytest.mark.skip(reason="need a faster cloud compute instance to run this")
def test_gcloud_singularity(gcloud_cluster):
gcloud_cluster.reset()
gcloud_cluster.run(use_singularity=True)
@gcloud
@pytest.mark.skip(reason="need a faster cloud compute instance to run this")
def test_gcloud_conda_singularity(gcloud_cluster):
gcloud_cluster.reset()
gcloud_cluster.run(use_singularity=True, use_conda=True)
@gcloud()
@pytest.mark.skip(reason="need a faster cloud compute instance to run this")
def test_issue1041(gcloud_cluster):
gcloud_cluster.reset()
gcloud_cluster.run(test="test_issue1041")
@connected
def test_cwl():
run(dpath("test_cwl"))
@connected
def test_cwl_singularity():
run(dpath("test_cwl"), use_singularity=True)
def test_issue805():
run(dpath("test_issue805"), shouldfail=True)
def test_pathlib():
run(dpath("test_pathlib"))
def test_pathlib_missing_file():
run(dpath("test_pathlib_missing_file"), shouldfail=True)
def test_group_jobs():
run(dpath("test_group_jobs"), cluster="./qsub")
def test_group_job_fail():
run(dpath("test_group_job_fail"), cluster="./qsub", shouldfail=True)
def test_pipes():
run(dpath("test_pipes"))
def test_pipes_fail():
run(dpath("test_pipes_fail"), shouldfail=True)
def test_validate():
run(dpath("test_validate"))
def test_validate_fail():
run(
dpath("test_validate"),
configfiles=[dpath("test_validate/config.fail.yaml")],
shouldfail=True,
)
def test_issue854():
# output and benchmark have inconsistent wildcards
# this should fail when parsing
run(dpath("test_issue854"), shouldfail=True)
def test_issue850():
run(dpath("test_issue850"), cluster="./qsub")
def test_issue860():
run(dpath("test_issue860"), cluster="./qsub", targets=["done"])
def test_issue894():
run(dpath("test_issue894"))
def test_issue584():
run(dpath("test_issue584"))
def test_issue912():
run(dpath("test_issue912"))
def test_job_properties():
run(dpath("test_job_properties"), cluster="./qsub.py")
def test_issue916():
run(dpath("test_issue916"))
def test_issue930():
run(dpath("test_issue930"), cluster="./qsub")
def test_issue635():
run(dpath("test_issue635"), use_conda=True, check_md5=False)
# TODO remove skip
@pytest.mark.skip(
reason="Temporarily disable until the stable container image becomes available again."
)
def test_convert_to_cwl():
workdir = dpath("test_convert_to_cwl")
# run(workdir, export_cwl=os.path.join(workdir, "workflow.cwl"))
shell(
"cd {workdir}; PYTHONPATH={src} python -m snakemake --export-cwl workflow.cwl",
src=os.getcwd(),
)
shell("cd {workdir}; cwltool --singularity workflow.cwl")
assert os.path.exists(os.path.join(workdir, "test.out"))
def test_issue1037():
run(dpath("test_issue1037"), dryrun=True, cluster="qsub", targets=["Foo_A.done"])
def test_issue1046():
run(dpath("test_issue1046"))
def test_checkpoints():
run(dpath("test_checkpoints"))
def test_checkpoints_dir():
run(dpath("test_checkpoints_dir"))
def test_issue1092():
run(dpath("test_issue1092"))
def test_issue1093():
run(dpath("test_issue1093"), use_conda=True)
def test_issue958():
run(dpath("test_issue958"), cluster="dummy", dryrun=True)
def test_issue471():
run(dpath("test_issue471"))
def test_issue1085():
run(dpath("test_issue1085"), shouldfail=True)
def test_issue1083():
run(dpath("test_issue1083"), use_singularity=True)
def test_pipes2():
run(dpath("test_pipes2"))
def test_expand_flag():
run(dpath("test_expand_flag"), shouldfail=True)
def test_default_resources():
from snakemake.resources import DefaultResources
run(
dpath("test_default_resources"),
default_resources=DefaultResources(
["mem_mb=max(2*input.size, 1000)", "disk_mb=max(2*input.size, 1000)"]
),
)
def test_issue1284():
run(dpath("test_issue1284"))
def test_issue1281():
run(dpath("test_issue1281"))
def test_filegraph():
workdir = dpath("test_filegraph")
dot_path = os.path.abspath("fg.dot")
pdf_path = "fg.pdf"
# make sure the calls work
shell("cd {workdir}; python -m snakemake --filegraph > {dot_path}")
# make sure the output can be interpreted by dot
with open(dot_path, "rb") as dot_file, open(pdf_path, "wb") as pdf_file:
pdf_file.write(
subprocess.check_output(["dot", "-Tpdf"], stdin=dot_file, cwd=workdir)
)
# make sure the generated pdf file is not empty
assert os.stat(pdf_path).st_size > 0
def test_batch():
from snakemake.dag import Batch
run(dpath("test_batch"), batch=Batch("aggregate", 1, 2))
def test_batch_final():
from snakemake.dag import Batch
run(dpath("test_batch_final"), batch=Batch("aggregate", 1, 1))
def test_batch_fail():
from snakemake.dag import Batch
run(dpath("test_batch"), batch=Batch("aggregate", 2, 2), shouldfail=True)
def test_github_issue52():
run(dpath("test_github_issue52"), shouldfail=True)
run(dpath("test_github_issue52"), snakefile="other.smk", shouldfail=True)
def test_github_issue78():
run(dpath("test_github_issue78"), use_singularity=True)
def test_github_issue105():
run(dpath("test_github_issue105"))
def test_output_file_cache():
test_path = dpath("test_output_file_cache")
os.environ["SNAKEMAKE_OUTPUT_CACHE"] = os.path.join(test_path, "cache")
run(test_path, cache=["a", "b", "c"])
run(test_path, cache=["invalid_multi"], targets="invalid1.txt", shouldfail=True)
def test_output_file_cache_remote():
test_path = dpath("test_output_file_cache_remote")
os.environ["SNAKEMAKE_OUTPUT_CACHE"] = "cache"
run(
test_path,
cache=["a", "b", "c"],
default_remote_provider="S3Mocked",
default_remote_prefix="test-remote-bucket",
)
def test_multiext():
run(dpath("test_multiext"))
def test_core_dependent_threads():
run(dpath("test_core_dependent_threads"))
def test_env_modules():
run(dpath("test_env_modules"), use_env_modules=True)
| 22.217569 | 151 | 0.665625 |
7e10db9a7a41c201087b052a26c36dca7ff70dec | 5,421 | py | Python | VTK-7.1.1/build/Wrapping/Python/vtk/__init__.py | likewatchk/python-pcl | 2a66797719f1b5af7d6a0d0893f697b3786db461 | [
"BSD-3-Clause"
] | null | null | null | VTK-7.1.1/build/Wrapping/Python/vtk/__init__.py | likewatchk/python-pcl | 2a66797719f1b5af7d6a0d0893f697b3786db461 | [
"BSD-3-Clause"
] | null | null | null | VTK-7.1.1/build/Wrapping/Python/vtk/__init__.py | likewatchk/python-pcl | 2a66797719f1b5af7d6a0d0893f697b3786db461 | [
"BSD-3-Clause"
] | null | null | null | """ This module loads the entire VTK library into its namespace. It
also allows one to use specific packages inside the vtk directory.."""
from __future__ import absolute_import
import os
import sys
# The dl module is used to force the symbols in the loaded VTK modules to
# be global, that is, to force symbols to be shared between modules. This
# used to be necessary in VTK 4 but might not be with VTK 5 and later.
# The first "except" is because systems like AIX don't have the dl module.
# The second "except" is because the dl module raises a system error on
# ia64 and x86_64 systems because "int" and addresses are different sizes.
try:
import dl
except ImportError:
# do not give up too early:
# are we on AMD64 ?
try:
import DLFCN as dl
except ImportError:
dl = None
except SystemError:
dl = None
# set the dlopen flags so that VTK does not run into problems with
# shared symbols.
try:
# only Python >= 2.2 has this functionality
orig_dlopen_flags = sys.getdlopenflags()
except AttributeError:
orig_dlopen_flags = None
if dl and (os.name == 'posix'):
sys.setdlopenflags(dl.RTLD_NOW|dl.RTLD_GLOBAL)
# --------------------------------------
from .vtkCommonCore import *
from .vtkCommonMath import *
from .vtkCommonMisc import *
from .vtkCommonSystem import *
from .vtkCommonTransforms import *
from .vtkCommonDataModel import *
from .vtkCommonColor import *
from .vtkCommonExecutionModel import *
from .vtkCommonComputationalGeometry import *
from .vtkFiltersCore import *
from .vtkFiltersGeneral import *
from .vtkImagingCore import *
from .vtkImagingFourier import *
from .vtkFiltersStatistics import *
from .vtkFiltersExtraction import *
from .vtkInfovisCore import *
from .vtkFiltersGeometry import *
from .vtkFiltersSources import *
from .vtkRenderingCore import *
from .vtkRenderingFreeType import *
from .vtkRenderingContext2D import *
from .vtkChartsCore import *
from .vtkIOCore import *
from .vtkIOLegacy import *
from .vtkIOXMLParser import *
from .vtkDomainsChemistry import *
from .vtkIOImage import *
from .vtkRenderingOpenGL2 import *
from .vtkDomainsChemistryOpenGL2 import *
from .vtkIOXML import *
from .vtkParallelCore import *
from .vtkFiltersAMR import *
from .vtkFiltersFlowPaths import *
from .vtkFiltersGeneric import *
from .vtkImagingSources import *
from .vtkFiltersHybrid import *
from .vtkFiltersHyperTree import *
from .vtkImagingGeneral import *
from .vtkFiltersImaging import *
from .vtkFiltersModeling import *
from .vtkFiltersParallel import *
from .vtkFiltersParallelImaging import *
from .vtkFiltersPoints import *
from .vtkFiltersProgrammable import *
from .vtkFiltersPython import *
from .vtkFiltersSMP import *
from .vtkFiltersSelection import *
from .vtkFiltersTexture import *
from .vtkFiltersVerdict import *
from .vtkImagingHybrid import *
from .vtkInfovisLayout import *
from .vtkInteractionStyle import *
from .vtkImagingColor import *
from .vtkRenderingAnnotation import *
from .vtkRenderingVolume import *
from .vtkInteractionWidgets import *
from .vtkViewsCore import *
from .vtkGeovisCore import *
from .vtkIOAMR import *
from .vtkIOEnSight import *
from .vtkIOExodus import *
from .vtkRenderingGL2PSOpenGL2 import *
from .vtkIOExport import *
from .vtkIOGeometry import *
from .vtkIOImport import *
from .vtkIOInfovis import *
from .vtkIOLSDyna import *
from .vtkIOMINC import *
from .vtkIOMovie import *
from .vtkIONetCDF import *
from .vtkIOPLY import *
from .vtkIOParallel import *
from .vtkIOParallelXML import *
from .vtkIOSQL import *
from .vtkIOTecplotTable import *
from .vtkIOVideo import *
from .vtkImagingMath import *
from .vtkImagingMorphological import *
from .vtkImagingStatistics import *
from .vtkImagingStencil import *
from .vtkInteractionImage import *
from .vtkRenderingContextOpenGL2 import *
from .vtkRenderingImage import *
from .vtkRenderingLOD import *
from .vtkRenderingLabel import *
from .vtkRenderingVolumeOpenGL2 import *
from .vtkViewsContext2D import *
from .vtkViewsInfovis import *
# --------------------------------------
# useful macro for getting type names
__vtkTypeNameDict = {VTK_VOID:"void",
VTK_DOUBLE:"double",
VTK_FLOAT:"float",
VTK_LONG:"long",
VTK_UNSIGNED_LONG:"unsigned long",
VTK_INT:"int",
VTK_UNSIGNED_INT:"unsigned int",
VTK_SHORT:"short",
VTK_UNSIGNED_SHORT:"unsigned short",
VTK_CHAR:"char",
VTK_UNSIGNED_CHAR:"unsigned char",
VTK_SIGNED_CHAR:"signed char",
VTK_LONG_LONG:"long long",
VTK_UNSIGNED_LONG_LONG:"unsigned long long",
VTK___INT64:"__int64",
VTK_UNSIGNED___INT64:"unsigned __int64",
VTK_ID_TYPE:"vtkIdType",
VTK_BIT:"bit"}
def vtkImageScalarTypeNameMacro(type):
return __vtkTypeNameDict[type]
# import convenience decorators
from .util.misc import calldata_type
# import the vtkVariant helpers
from .util.vtkVariant import *
# reset the dlopen flags to the original state if possible.
if dl and (os.name == 'posix') and orig_dlopen_flags:
sys.setdlopenflags(orig_dlopen_flags)
# removing things the user shouldn't have to see.
del orig_dlopen_flags
del sys, dl, os
| 32.461078 | 74 | 0.723852 |
2ce32883390a95999b43ce8ed04f27ccb21cfdb2 | 248 | py | Python | src/pretalx/cfp/__init__.py | hnzlmnn/pretalx | fcdf1a03c9428c1207ee4f4228694b2ed8e7495b | [
"Apache-2.0"
] | 1 | 2018-12-09T12:35:10.000Z | 2018-12-09T12:35:10.000Z | src/pretalx/cfp/__init__.py | hnzlmnn/pretalx | fcdf1a03c9428c1207ee4f4228694b2ed8e7495b | [
"Apache-2.0"
] | 1 | 2019-07-05T20:03:42.000Z | 2019-07-05T20:03:42.000Z | src/pretalx/cfp/__init__.py | hnzlmnn/pretalx | fcdf1a03c9428c1207ee4f4228694b2ed8e7495b | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
class CfPConfig(AppConfig):
name = 'pretalx.cfp'
def ready(self):
from . import permissions # noqa
from .phrases import CfPPhrases # noqa
default_app_config = 'pretalx.cfp.CfPConfig'
| 19.076923 | 47 | 0.689516 |
e021d62d9f1804f8e5c43d1078ee225dea783b07 | 709 | py | Python | precommit.py | iafisher/oeuvre | 42ed81894b2836b081c284b4c9e25cec3a93d647 | [
"MIT"
] | null | null | null | precommit.py | iafisher/oeuvre | 42ed81894b2836b081c284b4c9e25cec3a93d647 | [
"MIT"
] | 31 | 2020-05-24T00:27:50.000Z | 2020-07-28T04:59:37.000Z | precommit.py | iafisher/oeuvre | 42ed81894b2836b081c284b4c9e25cec3a93d647 | [
"MIT"
] | null | null | null | """Pre-commit configuration for git.
This file was created by precommit (https://github.com/iafisher/precommit).
You are welcome to edit it yourself to customize your pre-commit hook.
"""
from precommitlib import checks
def init(precommit):
# Generic checks
precommit.check(checks.NoStagedAndUnstagedChanges())
precommit.check(checks.NoWhitespaceInFilePath())
precommit.check(checks.DoNotSubmit())
# Language-specific checks
precommit.check(checks.PythonFormat())
precommit.check(checks.PythonLint(args=["--extend-ignore=E731"]))
precommit.check(checks.PythonTypes(exclude=["precommit.py"]))
precommit.check(checks.Command("UnitTests", ["python3", "oeuvre_test.py"]))
| 33.761905 | 79 | 0.747532 |
0799da280014de5f941647d1bd02cf3a180bb2f6 | 3,501 | py | Python | hw3/code/plot_multiple_from_pkl.py | hsilva664/Reinforcement_Learning_Course | 6a250bc017965bec76b6fe909068e40127e62fa7 | [
"MIT"
] | null | null | null | hw3/code/plot_multiple_from_pkl.py | hsilva664/Reinforcement_Learning_Course | 6a250bc017965bec76b6fe909068e40127e62fa7 | [
"MIT"
] | null | null | null | hw3/code/plot_multiple_from_pkl.py | hsilva664/Reinforcement_Learning_Course | 6a250bc017965bec76b6fe909068e40127e62fa7 | [
"MIT"
] | null | null | null | import pickle
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
import argparse
import re
out_folder = 'plots'
re_pattern = re.compile('.*\.pkl')
def filter_infs(np_arr):
np_arr[np.logical_not(np.isfinite(np_arr))] = 0
return np_arr
def main():
parser = argparse.ArgumentParser()
parser.add_argument('out_name', type=str)
parser.add_argument('exp_names', type=str, nargs='*')
args = parser.parse_args()
all_mean_100_rew_avg = []
all_mean_100_rew_std = []
all_best_mean_rew_avg = []
all_best_mean_rew_std = []
global_min_timesteps = None
global_min_timesteps_value = -1
for exp_name_i in args.exp_names:
pickle_filenames = [os.path.join(out_folder, exp_name_i, fn) for fn in os.listdir(os.path.join(out_folder, exp_name_i)) if re_pattern.search(fn) is not None]
mean_100_rew = []
best_mean_rew = []
min_timesteps = None
min_timesteps_value = -1
for filename in pickle_filenames:
with open(filename, 'rb') as f:
pk_obj = pickle.load(f)
new_mean_100_rew = pk_obj['mean_100_rew']
new_best_mean_rew = pk_obj['best_mean_rew']
new_min_timesteps_value = pk_obj['timesteps']
mean_100_rew.append(new_mean_100_rew)
best_mean_rew.append(new_best_mean_rew)
if (new_min_timesteps_value[-1] < global_min_timesteps_value) or (global_min_timesteps_value == -1):
global_min_timesteps_value = new_min_timesteps_value[-1]
global_min_timesteps = new_min_timesteps_value
if (new_min_timesteps_value[-1] < min_timesteps_value) or (min_timesteps_value == -1):
min_timesteps_value = new_min_timesteps_value[-1]
min_timesteps = new_min_timesteps_value
mean_100_rew = np.asarray([a[:(min_timesteps.shape[0])] for a in mean_100_rew])
best_mean_rew = np.asarray([a[:(min_timesteps.shape[0])] for a in best_mean_rew])
all_mean_100_rew_avg.append(np.mean(mean_100_rew, axis = 0))
all_mean_100_rew_std.append(filter_infs(np.std(mean_100_rew, axis = 0)))
all_best_mean_rew_avg.append(np.mean(best_mean_rew, axis = 0))
all_best_mean_rew_std.append(filter_infs(np.std(best_mean_rew, axis = 0)))
colors = ['r','g','b','c','m','y','k']
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
for i in range(len(args.exp_names)):
plt.plot(global_min_timesteps, all_mean_100_rew_avg[i][:(global_min_timesteps.shape[0])], \
color=colors[i % len(colors)], linewidth=2.0, label=args.exp_names[i])
plt.ylabel('Mean last 100 episode rew')
plt.xlabel('Timesteps')
plt.legend()
plt.savefig(os.path.join(out_folder, 'mean_100_%s.png'%(args.out_name)), bbox_inches='tight')
plt.clf()
plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
for i in range(len(args.exp_names)):
plt.plot(global_min_timesteps, all_best_mean_rew_avg[i][:(global_min_timesteps.shape[0])], \
color=colors[i % len(colors)], linewidth=2.0, label=args.exp_names[i])
plt.ylabel('Best mean episode rew')
plt.xlabel('Timesteps')
plt.legend()
plt.savefig(os.path.join(out_folder, 'best_mean_%s.png'%(args.out_name)), bbox_inches='tight')
plt.clf()
if __name__ == "__main__":
main()
| 35.363636 | 165 | 0.647815 |
4f7c84a7afa656bcfcc5c88120e16e5f35bdc70d | 664 | py | Python | web/mysite/app1/migrations/0001_initial.py | aaabbb200909/execjson | fb7fbb462f4ef135dc710e083068fb7c1f10496b | [
"Apache-2.0"
] | null | null | null | web/mysite/app1/migrations/0001_initial.py | aaabbb200909/execjson | fb7fbb462f4ef135dc710e083068fb7c1f10496b | [
"Apache-2.0"
] | 2 | 2016-10-09T13:35:43.000Z | 2016-10-09T13:37:42.000Z | web/mysite/app1/migrations/0001_initial.py | aaabbb200909/execjson | fb7fbb462f4ef135dc710e083068fb7c1f10496b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-03-30 12:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='user_id_jsons',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(max_length=30)),
('saveid', models.CharField(max_length=30)),
('json', models.TextField()),
],
),
]
| 25.538462 | 114 | 0.578313 |
ea7f6eb16fe1fb9718677d5b462abb1e184547f3 | 30,012 | py | Python | tests/components/sensibo/test_climate.py | mcx/core | 55eca2e2b4ebcf11486749035fd3c7e77ea14b8f | [
"Apache-2.0"
] | null | null | null | tests/components/sensibo/test_climate.py | mcx/core | 55eca2e2b4ebcf11486749035fd3c7e77ea14b8f | [
"Apache-2.0"
] | null | null | null | tests/components/sensibo/test_climate.py | mcx/core | 55eca2e2b4ebcf11486749035fd3c7e77ea14b8f | [
"Apache-2.0"
] | null | null | null | """The test for the sensibo binary sensor platform."""
from __future__ import annotations
from datetime import datetime, timedelta
from unittest.mock import AsyncMock, patch
from pysensibo.model import SensiboData
import pytest
from voluptuous import MultipleInvalid
from homeassistant.components.climate.const import (
ATTR_FAN_MODE,
ATTR_HVAC_MODE,
ATTR_SWING_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
DOMAIN as CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_SWING_MODE,
SERVICE_SET_TEMPERATURE,
)
from homeassistant.components.sensibo.climate import (
ATTR_AC_INTEGRATION,
ATTR_GEO_INTEGRATION,
ATTR_INDOOR_INTEGRATION,
ATTR_MINUTES,
ATTR_OUTDOOR_INTEGRATION,
ATTR_SENSITIVITY,
SERVICE_ASSUME_STATE,
SERVICE_DISABLE_PURE_BOOST,
SERVICE_ENABLE_PURE_BOOST,
SERVICE_ENABLE_TIMER,
_find_valid_target_temp,
)
from homeassistant.components.sensibo.const import DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
ATTR_TEMPERATURE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util import dt
from tests.common import async_fire_time_changed
async def test_climate_find_valid_targets():
"""Test function to return temperature from valid targets."""
valid_targets = [10, 16, 17, 18, 19, 20]
assert _find_valid_target_temp(7, valid_targets) == 10
assert _find_valid_target_temp(10, valid_targets) == 10
assert _find_valid_target_temp(11, valid_targets) == 16
assert _find_valid_target_temp(15, valid_targets) == 16
assert _find_valid_target_temp(16, valid_targets) == 16
assert _find_valid_target_temp(18.5, valid_targets) == 19
assert _find_valid_target_temp(20, valid_targets) == 20
assert _find_valid_target_temp(25, valid_targets) == 20
async def test_climate(
hass: HomeAssistant, load_int: ConfigEntry, get_data: SensiboData
) -> None:
"""Test the Sensibo climate."""
state1 = hass.states.get("climate.hallway")
state2 = hass.states.get("climate.kitchen")
assert state1.state == "heat"
assert state1.attributes == {
"hvac_modes": [
"cool",
"heat",
"dry",
"heat_cool",
"fan_only",
"off",
],
"min_temp": 10,
"max_temp": 20,
"target_temp_step": 1,
"fan_modes": ["quiet", "low", "medium"],
"swing_modes": [
"stopped",
"fixedTop",
"fixedMiddleTop",
],
"current_temperature": 21.2,
"temperature": 25,
"current_humidity": 32.9,
"fan_mode": "high",
"swing_mode": "stopped",
"friendly_name": "Hallway",
"supported_features": 41,
}
assert state2.state == "off"
async def test_climate_fan(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate fan service."""
state1 = hass.states.get("climate.hallway")
assert state1.attributes["fan_mode"] == "high"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_FAN_MODE: "low"},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["fan_mode"] == "low"
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"active_features",
[
"timestamp",
"on",
"mode",
"swing",
"targetTemperature",
"horizontalSwing",
"light",
],
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_FAN_MODE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_FAN_MODE: "low"},
blocking=True,
)
await hass.async_block_till_done()
state3 = hass.states.get("climate.hallway")
assert state3.attributes["fan_mode"] == "low"
async def test_climate_swing(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate swing service."""
state1 = hass.states.get("climate.hallway")
assert state1.attributes["swing_mode"] == "stopped"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_SWING_MODE: "fixedTop"},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["swing_mode"] == "fixedTop"
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"active_features",
[
"timestamp",
"on",
"mode",
"targetTemperature",
"horizontalSwing",
"light",
],
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_SWING_MODE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_SWING_MODE: "fixedTop"},
blocking=True,
)
await hass.async_block_till_done()
state3 = hass.states.get("climate.hallway")
assert state3.attributes["swing_mode"] == "fixedTop"
async def test_climate_temperatures(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate temperature service."""
state1 = hass.states.get("climate.hallway")
assert state1.attributes["temperature"] == 25
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_TEMPERATURE: 20},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 20
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_TEMPERATURE: 15},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 16
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_TEMPERATURE: 18.5},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 19
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_TEMPERATURE: 24},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 20
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_TEMPERATURE: 20},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 20
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
with pytest.raises(MultipleInvalid):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 20
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"active_features",
[
"timestamp",
"on",
"mode",
"swing",
"horizontalSwing",
"light",
],
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_TEMPERATURE: 20},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 20
async def test_climate_temperature_is_none(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate temperature service no temperature provided."""
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"active_features",
[
"timestamp",
"on",
"mode",
"fanLevel",
"targetTemperature",
"swing",
"horizontalSwing",
"light",
],
)
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"target_temp",
25,
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("climate.hallway")
assert state1.attributes["temperature"] == 25
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
):
with pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: state1.entity_id,
ATTR_TARGET_TEMP_HIGH: 30,
ATTR_TARGET_TEMP_LOW: 20,
},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.attributes["temperature"] == 25
async def test_climate_hvac_mode(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate hvac mode service."""
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"active_features",
[
"timestamp",
"on",
"mode",
"fanLevel",
"targetTemperature",
"swing",
"horizontalSwing",
"light",
],
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("climate.hallway")
assert state1.state == "heat"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_HVAC_MODE: "off"},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.state == "off"
monkeypatch.setattr(get_data.parsed["ABC999111"], "device_on", False)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_HVAC_MODE: "heat"},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.state == "heat"
async def test_climate_on_off(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate on/off service."""
monkeypatch.setattr(get_data.parsed["ABC999111"], "hvac_mode", "heat")
monkeypatch.setattr(get_data.parsed["ABC999111"], "device_on", True)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("climate.hallway")
assert state1.state == "heat"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: state1.entity_id},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.state == "off"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: state1.entity_id},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.state == "heat"
async def test_climate_service_failed(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate service failed."""
monkeypatch.setattr(get_data.parsed["ABC999111"], "hvac_mode", "heat")
monkeypatch.setattr(get_data.parsed["ABC999111"], "device_on", True)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("climate.hallway")
assert state1.state == "heat"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Error", "failureReason": "Did not work"}},
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: state1.entity_id},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.state == "heat"
async def test_climate_assumed_state(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate assumed state service."""
monkeypatch.setattr(get_data.parsed["ABC999111"], "hvac_mode", "heat")
monkeypatch.setattr(get_data.parsed["ABC999111"], "device_on", True)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("climate.hallway")
assert state1.state == "heat"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_ac_state_property",
return_value={"result": {"status": "Success"}},
):
await hass.services.async_call(
DOMAIN,
SERVICE_ASSUME_STATE,
{ATTR_ENTITY_ID: state1.entity_id, ATTR_STATE: "off"},
blocking=True,
)
await hass.async_block_till_done()
state2 = hass.states.get("climate.hallway")
assert state2.state == "off"
async def test_climate_no_fan_no_swing(
hass: HomeAssistant,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate fan service."""
state = hass.states.get("climate.hallway")
assert state.attributes["fan_mode"] == "high"
assert state.attributes["swing_mode"] == "stopped"
monkeypatch.setattr(get_data.parsed["ABC999111"], "fan_mode", None)
monkeypatch.setattr(get_data.parsed["ABC999111"], "swing_mode", None)
monkeypatch.setattr(get_data.parsed["ABC999111"], "fan_modes", None)
monkeypatch.setattr(get_data.parsed["ABC999111"], "swing_modes", None)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state = hass.states.get("climate.hallway")
assert state.attributes["fan_mode"] is None
assert state.attributes["swing_mode"] is None
assert state.attributes["fan_modes"] is None
assert state.attributes["swing_modes"] is None
async def test_climate_set_timer(
hass: HomeAssistant,
entity_registry_enabled_by_default: AsyncMock,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate Set Timer service."""
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state_climate = hass.states.get("climate.hallway")
assert hass.states.get("sensor.hallway_timer_end_time").state == STATE_UNKNOWN
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_timer",
return_value={"status": "failure"},
):
with pytest.raises(MultipleInvalid):
await hass.services.async_call(
DOMAIN,
SERVICE_ENABLE_TIMER,
{
ATTR_ENTITY_ID: state_climate.entity_id,
},
blocking=True,
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_timer",
return_value={"status": "failure"},
):
with pytest.raises(HomeAssistantError):
await hass.services.async_call(
DOMAIN,
SERVICE_ENABLE_TIMER,
{
ATTR_ENTITY_ID: state_climate.entity_id,
ATTR_MINUTES: 30,
},
blocking=True,
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_timer",
return_value={"status": "success", "result": {"id": "SzTGE4oZ4D"}},
):
await hass.services.async_call(
DOMAIN,
SERVICE_ENABLE_TIMER,
{
ATTR_ENTITY_ID: state_climate.entity_id,
ATTR_MINUTES: 30,
},
blocking=True,
)
await hass.async_block_till_done()
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_on", True)
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_id", "SzTGE4oZ4D")
monkeypatch.setattr(get_data.parsed["ABC999111"], "timer_state_on", False)
monkeypatch.setattr(
get_data.parsed["ABC999111"],
"timer_time",
datetime(2022, 6, 6, 12, 00, 00, tzinfo=dt.UTC),
)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
assert (
hass.states.get("sensor.hallway_timer_end_time").state
== "2022-06-06T12:00:00+00:00"
)
async def test_climate_pure_boost(
hass: HomeAssistant,
entity_registry_enabled_by_default: AsyncMock,
load_int: ConfigEntry,
monkeypatch: pytest.MonkeyPatch,
get_data: SensiboData,
) -> None:
"""Test the Sensibo climate assumed state service."""
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state_climate = hass.states.get("climate.kitchen")
state2 = hass.states.get("binary_sensor.kitchen_pure_boost_enabled")
assert state2.state == "off"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_pureboost",
):
with pytest.raises(MultipleInvalid):
await hass.services.async_call(
DOMAIN,
SERVICE_ENABLE_PURE_BOOST,
{
ATTR_ENTITY_ID: state_climate.entity_id,
ATTR_INDOOR_INTEGRATION: True,
ATTR_OUTDOOR_INTEGRATION: True,
ATTR_SENSITIVITY: "Sensitive",
},
blocking=True,
)
await hass.async_block_till_done()
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.util.SensiboClient.async_set_pureboost",
return_value={
"status": "success",
"result": {
"enabled": True,
"sensitivity": "S",
"measurements_integration": True,
"ac_integration": False,
"geo_integration": False,
"prime_integration": True,
},
},
):
await hass.services.async_call(
DOMAIN,
SERVICE_ENABLE_PURE_BOOST,
{
ATTR_ENTITY_ID: state_climate.entity_id,
ATTR_AC_INTEGRATION: False,
ATTR_GEO_INTEGRATION: False,
ATTR_INDOOR_INTEGRATION: True,
ATTR_OUTDOOR_INTEGRATION: True,
ATTR_SENSITIVITY: "Sensitive",
},
blocking=True,
)
await hass.async_block_till_done()
monkeypatch.setattr(get_data.parsed["AAZZAAZZ"], "pure_boost_enabled", True)
monkeypatch.setattr(get_data.parsed["AAZZAAZZ"], "pure_sensitivity", "s")
monkeypatch.setattr(get_data.parsed["AAZZAAZZ"], "pure_measure_integration", True)
monkeypatch.setattr(get_data.parsed["AAZZAAZZ"], "pure_prime_integration", True)
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("binary_sensor.kitchen_pure_boost_enabled")
state2 = hass.states.get(
"binary_sensor.kitchen_pure_boost_linked_with_indoor_air_quality"
)
state3 = hass.states.get(
"binary_sensor.kitchen_pure_boost_linked_with_outdoor_air_quality"
)
state4 = hass.states.get("sensor.kitchen_pure_sensitivity")
assert state1.state == "on"
assert state2.state == "on"
assert state3.state == "on"
assert state4.state == "s"
with patch(
"homeassistant.components.sensibo.util.SensiboClient.async_get_devices_data",
return_value=get_data,
), patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_set_pureboost",
return_value={
"status": "success",
"result": {
"enabled": False,
"sensitivity": "S",
"measurements_integration": True,
"ac_integration": False,
"geo_integration": False,
"prime_integration": True,
},
},
) as mock_set_pureboost:
await hass.services.async_call(
DOMAIN,
SERVICE_DISABLE_PURE_BOOST,
{
ATTR_ENTITY_ID: state_climate.entity_id,
},
blocking=True,
)
await hass.async_block_till_done()
mock_set_pureboost.assert_called_once()
monkeypatch.setattr(get_data.parsed["AAZZAAZZ"], "pure_boost_enabled", False)
monkeypatch.setattr(get_data.parsed["AAZZAAZZ"], "pure_sensitivity", "s")
with patch(
"homeassistant.components.sensibo.coordinator.SensiboClient.async_get_devices_data",
return_value=get_data,
):
async_fire_time_changed(
hass,
dt.utcnow() + timedelta(minutes=5),
)
await hass.async_block_till_done()
state1 = hass.states.get("binary_sensor.kitchen_pure_boost_enabled")
state4 = hass.states.get("sensor.kitchen_pure_sensitivity")
assert state1.state == "off"
assert state4.state == "s"
| 31.961661 | 92 | 0.629848 |
6b7d1ed9a32809a62b788a2be22c39f6a1602a43 | 170,868 | py | Python | src/azure-cli/azure/cli/command_modules/vm/custom.py | asksven/azure-cli | 0f864e9a9d861a7aa54fb62dc4b1b96d4a30cf21 | [
"MIT"
] | 1 | 2020-07-08T18:56:32.000Z | 2020-07-08T18:56:32.000Z | src/azure-cli/azure/cli/command_modules/vm/custom.py | cindywu/azure-cli | bd011cb91ac6e0ac89f53e1105d76ea30b6609a0 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/vm/custom.py | cindywu/azure-cli | bd011cb91ac6e0ac89f53e1105d76ea30b6609a0 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=no-self-use,too-many-lines
from __future__ import print_function
import json
import os
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
# the urlopen is imported for automation purpose
from six.moves.urllib.request import urlopen # noqa, pylint: disable=import-error,unused-import,ungrouped-imports
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.command_modules.vm._validators import _get_resource_group_from_vault_name
from azure.cli.core.commands.validators import validate_file_or_dict
from azure.cli.core.commands import LongRunningOperation, DeploymentOutputLongRunningOperation
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_data_service_client
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import sdk_no_wait
from ._vm_utils import read_content_if_is_file
from ._vm_diagnostics_templates import get_default_diag_config
from ._actions import (load_images_from_aliases_doc, load_extension_images_thru_services,
load_images_thru_services, _get_latest_image_version)
from ._client_factory import (_compute_client_factory, cf_public_ip_addresses, cf_vm_image_term,
_dev_test_labs_client_factory)
logger = get_logger(__name__)
# Use the same name by portal, so people can update from both cli and portal
# (VM doesn't allow multiple handlers for the same extension)
_ACCESS_EXT_HANDLER_NAME = 'enablevmaccess'
_LINUX_ACCESS_EXT = 'VMAccessForLinux'
_WINDOWS_ACCESS_EXT = 'VMAccessAgent'
_LINUX_DIAG_EXT = 'LinuxDiagnostic'
_WINDOWS_DIAG_EXT = 'IaaSDiagnostics'
_LINUX_OMS_AGENT_EXT = 'OmsAgentForLinux'
_WINDOWS_OMS_AGENT_EXT = 'MicrosoftMonitoringAgent'
extension_mappings = {
_LINUX_ACCESS_EXT: {
'version': '1.5',
'publisher': 'Microsoft.OSTCExtensions'
},
_WINDOWS_ACCESS_EXT: {
'version': '2.4',
'publisher': 'Microsoft.Compute'
},
_LINUX_DIAG_EXT: {
'version': '3.0',
'publisher': 'Microsoft.Azure.Diagnostics'
},
_WINDOWS_DIAG_EXT: {
'version': '1.5',
'publisher': 'Microsoft.Azure.Diagnostics'
},
_LINUX_OMS_AGENT_EXT: {
'version': '1.0',
'publisher': 'Microsoft.EnterpriseCloud.Monitoring'
},
_WINDOWS_OMS_AGENT_EXT: {
'version': '1.0',
'publisher': 'Microsoft.EnterpriseCloud.Monitoring'
}
}
def _construct_identity_info(identity_scope, identity_role, implicit_identity, external_identities):
info = {}
if identity_scope:
info['scope'] = identity_scope
info['role'] = str(identity_role) # could be DefaultStr, so convert to string
info['userAssignedIdentities'] = external_identities or {}
info['systemAssignedIdentity'] = implicit_identity or ''
return info
# for injecting test seams to produce predicatable role assignment id for playback
def _gen_guid():
import uuid
return uuid.uuid4()
def _get_access_extension_upgrade_info(extensions, name):
version = extension_mappings[name]['version']
publisher = extension_mappings[name]['publisher']
auto_upgrade = None
if extensions:
extension = next((e for e in extensions if e.name == name), None)
from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
if extension and LooseVersion(extension.type_handler_version) < LooseVersion(version):
auto_upgrade = True
elif extension and LooseVersion(extension.type_handler_version) > LooseVersion(version):
version = extension.type_handler_version
return publisher, version, auto_upgrade
def _get_extension_instance_name(instance_view, publisher, extension_type_name,
suggested_name=None):
extension_instance_name = suggested_name or extension_type_name
full_type_name = '.'.join([publisher, extension_type_name])
if instance_view.extensions:
ext = next((x for x in instance_view.extensions
if x.type and (x.type.lower() == full_type_name.lower())), None)
if ext:
extension_instance_name = ext.name
return extension_instance_name
def _get_storage_management_client(cli_ctx):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE)
def _get_disk_lun(data_disks):
# start from 0, search for unused int for lun
if not data_disks:
return 0
existing_luns = sorted([d.lun for d in data_disks])
for i, current in enumerate(existing_luns):
if current != i:
return i
return len(existing_luns)
def _get_private_config(cli_ctx, resource_group_name, storage_account):
storage_mgmt_client = _get_storage_management_client(cli_ctx)
# pylint: disable=no-member
keys = storage_mgmt_client.storage_accounts.list_keys(resource_group_name, storage_account).keys
private_config = {
'storageAccountName': storage_account,
'storageAccountKey': keys[0].value
}
return private_config
def _get_resource_group_location(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
# pylint: disable=no-member
return client.resource_groups.get(resource_group_name).location
def _get_sku_object(cmd, sku):
if cmd.supported_api_version(min_api='2017-03-30'):
DiskSku = cmd.get_models('DiskSku')
return DiskSku(name=sku)
return sku
def _grant_access(cmd, resource_group_name, name, duration_in_seconds, is_disk, access_level):
AccessLevel = cmd.get_models('AccessLevel')
client = _compute_client_factory(cmd.cli_ctx)
op = client.disks if is_disk else client.snapshots
return op.grant_access(resource_group_name, name, access_level or AccessLevel.read, duration_in_seconds)
def _is_linux_os(vm):
os_type = vm.storage_profile.os_disk.os_type.value if vm.storage_profile.os_disk.os_type else None
if os_type:
return os_type.lower() == 'linux'
# the os_type could be None for VM scaleset, let us check out os configurations
if vm.os_profile.linux_configuration:
return bool(vm.os_profile.linux_configuration)
return False
def _merge_secrets(secrets):
"""
Merge a list of secrets. Each secret should be a dict fitting the following JSON structure:
[{ "sourceVault": { "id": "value" },
"vaultCertificates": [{ "certificateUrl": "value",
"certificateStore": "cert store name (only on windows)"}] }]
The array of secrets is merged on sourceVault.id.
:param secrets:
:return:
"""
merged = {}
vc_name = 'vaultCertificates'
for outer in secrets:
for secret in outer:
if secret['sourceVault']['id'] not in merged:
merged[secret['sourceVault']['id']] = []
merged[secret['sourceVault']['id']] = \
secret[vc_name] + merged[secret['sourceVault']['id']]
# transform the reduced map to vm format
formatted = [{'sourceVault': {'id': source_id},
'vaultCertificates': value}
for source_id, value in list(merged.items())]
return formatted
def _normalize_extension_version(cli_ctx, publisher, vm_extension_name, version, location):
def _trim_away_build_number(version):
# workaround a known issue: the version must only contain "major.minor", even though
# "extension image list" gives more detail
return '.'.join(version.split('.')[0:2])
if not version:
result = load_extension_images_thru_services(cli_ctx, publisher, vm_extension_name, None, location,
show_latest=True, partial_match=False)
if not result:
raise CLIError('Failed to find the latest version for the extension "{}"'.format(vm_extension_name))
# with 'show_latest' enabled, we will only get one result.
version = result[0]['version']
version = _trim_away_build_number(version)
return version
def _parse_rg_name(strid):
'''From an ID, extract the contained (resource group, name) tuple.'''
from msrestazure.tools import parse_resource_id
parts = parse_resource_id(strid)
return (parts['resource_group'], parts['name'])
def _set_sku(cmd, instance, sku):
if cmd.supported_api_version(min_api='2017-03-30'):
instance.sku = cmd.get_models('DiskSku')(name=sku)
else:
instance.account_type = sku
def _show_missing_access_warning(resource_group, name, command):
warn = ("No access was given yet to the '{1}', because '--scope' was not provided. "
"You should setup by creating a role assignment, e.g. "
"'az role assignment create --assignee <principal-id> --role contributor -g {0}' "
"would let it access the current resource group. To get the pricipal id, run "
"'az {2} show -g {0} -n {1} --query \"identity.principalId\" -otsv'".format(resource_group, name, command))
logger.warning(warn)
# Hide extension information from output as the info is not correct and unhelpful; also
# commands using it mean to hide the extension concept from users.
class ExtensionUpdateLongRunningOperation(LongRunningOperation): # pylint: disable=too-few-public-methods
pass
# region Disks (Managed)
def create_managed_disk(cmd, resource_group_name, disk_name, location=None, # pylint: disable=too-many-locals, too-many-branches, too-many-statements
size_gb=None, sku='Premium_LRS', os_type=None,
source=None, for_upload=None, upload_size_bytes=None, # pylint: disable=unused-argument
# below are generated internally from 'source'
source_blob_uri=None, source_disk=None, source_snapshot=None,
source_storage_account_id=None, no_wait=False, tags=None, zone=None,
disk_iops_read_write=None, disk_mbps_read_write=None, hyper_v_generation=None,
encryption_type=None, disk_encryption_set=None, max_shares=None,
disk_iops_read_only=None, disk_mbps_read_only=None,
image_reference=None, image_reference_lun=None,
gallery_image_reference=None, gallery_image_reference_lun=None):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
Disk, CreationData, DiskCreateOption, Encryption = cmd.get_models(
'Disk', 'CreationData', 'DiskCreateOption', 'Encryption')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
if source_blob_uri:
option = DiskCreateOption.import_enum
elif source_disk or source_snapshot:
option = DiskCreateOption.copy
elif for_upload:
option = DiskCreateOption.upload
elif image_reference or gallery_image_reference:
option = DiskCreateOption.from_image
else:
option = DiskCreateOption.empty
if source_storage_account_id is None and source_blob_uri is not None:
subscription_id = get_subscription_id(cmd.cli_ctx)
storage_account_name = source_blob_uri.split('.')[0].split('/')[-1]
source_storage_account_id = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts', name=storage_account_name)
if upload_size_bytes is not None and for_upload is not True:
raise CLIError('usage error: --upload-size-bytes should be used together with --for-upload')
if image_reference is not None:
if not is_valid_resource_id(image_reference):
# URN or name
terms = image_reference.split(':')
if len(terms) == 4: # URN
disk_publisher, disk_offer, disk_sku, disk_version = terms[0], terms[1], terms[2], terms[3]
if disk_version.lower() == 'latest':
disk_version = _get_latest_image_version(cmd.cli_ctx, location, disk_publisher, disk_offer,
disk_sku)
client = _compute_client_factory(cmd.cli_ctx)
response = client.virtual_machine_images.get(location, disk_publisher, disk_offer, disk_sku,
disk_version)
image_reference = response.id
else: # error
raise CLIError('usage error: --image-reference should be ID or URN (publisher:offer:sku:version).')
# image_reference is an ID now
image_reference = {'id': image_reference}
if image_reference_lun is not None:
image_reference['lun'] = image_reference_lun
if gallery_image_reference is not None:
gallery_image_reference = {'id': gallery_image_reference}
if gallery_image_reference_lun is not None:
gallery_image_reference['lun'] = gallery_image_reference_lun
creation_data = CreationData(create_option=option, source_uri=source_blob_uri,
image_reference=image_reference, gallery_image_reference=gallery_image_reference,
source_resource_id=source_disk or source_snapshot,
storage_account_id=source_storage_account_id,
upload_size_bytes=upload_size_bytes)
if size_gb is None and upload_size_bytes is None and (option == DiskCreateOption.empty or for_upload):
raise CLIError('usage error: --size-gb or --upload-size-bytes required to create an empty disk')
if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set):
disk_encryption_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)
if disk_encryption_set is not None and encryption_type is None:
raise CLIError('usage error: Please specify --encryption-type.')
if encryption_type is not None:
encryption = Encryption(type=encryption_type, disk_encryption_set_id=disk_encryption_set)
else:
encryption = None
disk = Disk(location=location, creation_data=creation_data, tags=(tags or {}),
sku=_get_sku_object(cmd, sku), disk_size_gb=size_gb, os_type=os_type, encryption=encryption)
if hyper_v_generation:
disk.hyper_vgeneration = hyper_v_generation
if zone:
disk.zones = zone
if disk_iops_read_write is not None:
disk.disk_iops_read_write = disk_iops_read_write
if disk_mbps_read_write is not None:
disk.disk_mbps_read_write = disk_mbps_read_write
if max_shares is not None:
disk.max_shares = max_shares
if disk_iops_read_only is not None:
disk.disk_iops_read_only = disk_iops_read_only
if disk_mbps_read_only is not None:
disk.disk_mbps_read_only = disk_mbps_read_only
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.disks.create_or_update, resource_group_name, disk_name, disk)
def grant_disk_access(cmd, resource_group_name, disk_name, duration_in_seconds, access_level=None):
return _grant_access(cmd, resource_group_name, disk_name, duration_in_seconds, is_disk=True,
access_level=access_level)
def list_managed_disks(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.disks.list_by_resource_group(resource_group_name)
return client.disks.list()
def update_managed_disk(cmd, resource_group_name, instance, size_gb=None, sku=None, disk_iops_read_write=None,
disk_mbps_read_write=None, encryption_type=None, disk_encryption_set=None):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if size_gb is not None:
instance.disk_size_gb = size_gb
if sku is not None:
_set_sku(cmd, instance, sku)
if disk_iops_read_write is not None:
instance.disk_iops_read_write = disk_iops_read_write
if disk_mbps_read_write is not None:
instance.disk_mbps_read_write = disk_mbps_read_write
if disk_encryption_set is not None:
if instance.encryption.type != 'EncryptionAtRestWithCustomerKey' and \
encryption_type != 'EncryptionAtRestWithCustomerKey':
raise CLIError('usage error: Please set --encryption-type to EncryptionAtRestWithCustomerKey')
if not is_valid_resource_id(disk_encryption_set):
disk_encryption_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)
instance.encryption.disk_encryption_set_id = disk_encryption_set
if encryption_type is not None:
instance.encryption.type = encryption_type
return instance
# endregion
# region Images (Managed)
def create_image(cmd, resource_group_name, name, source, os_type=None, data_disk_sources=None, location=None, # pylint: disable=too-many-locals,unused-argument
# below are generated internally from 'source' and 'data_disk_sources'
source_virtual_machine=None, storage_sku=None, hyper_v_generation=None,
os_blob_uri=None, data_blob_uris=None,
os_snapshot=None, data_snapshots=None,
os_disk=None, os_disk_caching=None, data_disks=None, data_disk_caching=None,
tags=None, zone_resilient=None):
ImageOSDisk, ImageDataDisk, ImageStorageProfile, Image, SubResource, OperatingSystemStateTypes = cmd.get_models(
'ImageOSDisk', 'ImageDataDisk', 'ImageStorageProfile', 'Image', 'SubResource', 'OperatingSystemStateTypes')
if source_virtual_machine:
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
image_storage_profile = None if zone_resilient is None else ImageStorageProfile(zone_resilient=zone_resilient)
image = Image(location=location, source_virtual_machine=SubResource(id=source_virtual_machine),
storage_profile=image_storage_profile, tags=(tags or {}))
else:
os_disk = ImageOSDisk(os_type=os_type,
os_state=OperatingSystemStateTypes.generalized,
caching=os_disk_caching,
snapshot=SubResource(id=os_snapshot) if os_snapshot else None,
managed_disk=SubResource(id=os_disk) if os_disk else None,
blob_uri=os_blob_uri,
storage_account_type=storage_sku)
all_data_disks = []
lun = 0
if data_blob_uris:
for d in data_blob_uris:
all_data_disks.append(ImageDataDisk(lun=lun, blob_uri=d, caching=data_disk_caching))
lun += 1
if data_snapshots:
for d in data_snapshots:
all_data_disks.append(ImageDataDisk(lun=lun, snapshot=SubResource(id=d), caching=data_disk_caching))
lun += 1
if data_disks:
for d in data_disks:
all_data_disks.append(ImageDataDisk(lun=lun, managed_disk=SubResource(id=d), caching=data_disk_caching))
lun += 1
image_storage_profile = ImageStorageProfile(os_disk=os_disk, data_disks=all_data_disks)
if zone_resilient is not None:
image_storage_profile.zone_resilient = zone_resilient
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
# pylint disable=no-member
image = Image(location=location, storage_profile=image_storage_profile, tags=(tags or {}))
if hyper_v_generation:
image.hyper_vgeneration = hyper_v_generation
client = _compute_client_factory(cmd.cli_ctx)
return client.images.create_or_update(resource_group_name, name, image)
def update_image(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
def list_images(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.images.list_by_resource_group(resource_group_name)
return client.images.list()
# endregion
# region Snapshots
# pylint: disable=unused-argument,too-many-locals
def create_snapshot(cmd, resource_group_name, snapshot_name, location=None, size_gb=None, sku='Standard_LRS',
source=None, for_upload=None, incremental=None,
# below are generated internally from 'source'
source_blob_uri=None, source_disk=None, source_snapshot=None, source_storage_account_id=None,
hyper_v_generation=None, tags=None, no_wait=False, disk_encryption_set=None,
encryption_type=None):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
Snapshot, CreationData, DiskCreateOption, Encryption = cmd.get_models(
'Snapshot', 'CreationData', 'DiskCreateOption', 'Encryption')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
if source_blob_uri:
option = DiskCreateOption.import_enum
elif source_disk or source_snapshot:
option = DiskCreateOption.copy
elif for_upload:
option = DiskCreateOption.upload
else:
option = DiskCreateOption.empty
creation_data = CreationData(create_option=option, source_uri=source_blob_uri,
image_reference=None,
source_resource_id=source_disk or source_snapshot,
storage_account_id=source_storage_account_id)
if size_gb is None and option == DiskCreateOption.empty:
raise CLIError('Please supply size for the snapshots')
if disk_encryption_set is not None and not is_valid_resource_id(disk_encryption_set):
disk_encryption_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)
if disk_encryption_set is not None and encryption_type is None:
raise CLIError('usage error: Please specify --encryption-type.')
if encryption_type is not None:
encryption = Encryption(type=encryption_type, disk_encryption_set_id=disk_encryption_set)
else:
encryption = None
snapshot = Snapshot(location=location, creation_data=creation_data, tags=(tags or {}),
sku=_get_sku_object(cmd, sku), disk_size_gb=size_gb, incremental=incremental,
encryption=encryption)
if hyper_v_generation:
snapshot.hyper_vgeneration = hyper_v_generation
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.snapshots.create_or_update, resource_group_name, snapshot_name, snapshot)
def grant_snapshot_access(cmd, resource_group_name, snapshot_name, duration_in_seconds, access_level=None):
return _grant_access(cmd, resource_group_name, snapshot_name, duration_in_seconds, is_disk=False,
access_level=access_level)
def list_snapshots(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.snapshots.list_by_resource_group(resource_group_name)
return client.snapshots.list()
def update_snapshot(cmd, resource_group_name, instance, sku=None, disk_encryption_set=None, encryption_type=None):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if sku is not None:
_set_sku(cmd, instance, sku)
if disk_encryption_set is not None:
if instance.encryption.type != 'EncryptionAtRestWithCustomerKey' and \
encryption_type != 'EncryptionAtRestWithCustomerKey':
raise CLIError('usage error: Please set --encryption-type to EncryptionAtRestWithCustomerKey')
if not is_valid_resource_id(disk_encryption_set):
disk_encryption_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=disk_encryption_set)
instance.encryption.disk_encryption_set_id = disk_encryption_set
if encryption_type is not None:
instance.encryption.type = encryption_type
return instance
# endregion
# region VirtualMachines Identity
def show_vm_identity(cmd, resource_group_name, vm_name):
client = _compute_client_factory(cmd.cli_ctx)
return client.virtual_machines.get(resource_group_name, vm_name).identity
def show_vmss_identity(cmd, resource_group_name, vm_name):
client = _compute_client_factory(cmd.cli_ctx)
return client.virtual_machine_scale_sets.get(resource_group_name, vm_name).identity
def assign_vm_identity(cmd, resource_group_name, vm_name, assign_identity=None, identity_role='Contributor',
identity_role_id=None, identity_scope=None):
VirtualMachineIdentity, ResourceIdentityType, VirtualMachineUpdate = cmd.get_models('VirtualMachineIdentity',
'ResourceIdentityType',
'VirtualMachineUpdate')
VirtualMachineIdentityUserAssignedIdentitiesValue = cmd.get_models(
'VirtualMachineIdentityUserAssignedIdentitiesValue')
from azure.cli.core.commands.arm import assign_identity as assign_identity_helper
client = _compute_client_factory(cmd.cli_ctx)
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identity)
def getter():
return client.virtual_machines.get(resource_group_name, vm_name)
def setter(vm, external_identities=external_identities):
if vm.identity and vm.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif vm.identity and vm.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif vm.identity and vm.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
vm.identity = VirtualMachineIdentity(type=identity_types)
if external_identities:
vm.identity.user_assigned_identities = {}
for identity in external_identities:
vm.identity.user_assigned_identities[identity] = VirtualMachineIdentityUserAssignedIdentitiesValue()
vm_patch = VirtualMachineUpdate()
vm_patch.identity = vm.identity
return patch_vm(cmd, resource_group_name, vm_name, vm_patch)
assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role=identity_role_id, identity_scope=identity_scope)
vm = client.virtual_machines.get(resource_group_name, vm_name)
return _construct_identity_info(identity_scope, identity_role, vm.identity.principal_id,
vm.identity.user_assigned_identities)
# endregion
# region VirtualMachines
def capture_vm(cmd, resource_group_name, vm_name, vhd_name_prefix,
storage_container='vhds', overwrite=True):
VirtualMachineCaptureParameters = cmd.get_models('VirtualMachineCaptureParameters')
client = _compute_client_factory(cmd.cli_ctx)
parameter = VirtualMachineCaptureParameters(vhd_prefix=vhd_name_prefix,
destination_container_name=storage_container,
overwrite_vhds=overwrite)
poller = client.virtual_machines.capture(resource_group_name, vm_name, parameter)
result = LongRunningOperation(cmd.cli_ctx)(poller)
output = getattr(result, 'output', None) or result.resources[0]
print(json.dumps(output, indent=2)) # pylint: disable=no-member
# pylint: disable=too-many-locals, unused-argument, too-many-statements, too-many-branches
def create_vm(cmd, vm_name, resource_group_name, image=None, size='Standard_DS1_v2', location=None, tags=None,
no_wait=False, authentication_type=None, admin_password=None, computer_name=None,
admin_username=None, ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False,
availability_set=None, nics=None, nsg=None, nsg_rule=None, accelerated_networking=None,
private_ip_address=None, public_ip_address=None, public_ip_address_allocation='dynamic',
public_ip_address_dns_name=None, public_ip_sku=None, os_disk_name=None, os_type=None,
storage_account=None, os_caching=None, data_caching=None, storage_container_name=None, storage_sku=None,
use_unmanaged_disk=False, attach_os_disk=None, os_disk_size_gb=None, attach_data_disks=None,
data_disk_sizes_gb=None, disk_info=None,
vnet_name=None, vnet_address_prefix='10.0.0.0/16', subnet=None, subnet_address_prefix='10.0.0.0/24',
storage_profile=None, os_publisher=None, os_offer=None, os_sku=None, os_version=None,
storage_account_type=None, vnet_type=None, nsg_type=None, public_ip_address_type=None, nic_type=None,
validate=False, custom_data=None, secrets=None, plan_name=None, plan_product=None, plan_publisher=None,
plan_promotion_code=None, license_type=None, assign_identity=None, identity_scope=None,
identity_role='Contributor', identity_role_id=None, application_security_groups=None, zone=None,
boot_diagnostics_storage=None, ultra_ssd_enabled=None, ephemeral_os_disk=None,
proximity_placement_group=None, dedicated_host=None, dedicated_host_group=None, aux_subscriptions=None,
priority=None, max_price=None, eviction_policy=None, enable_agent=None, workspace=None, vmss=None,
os_disk_encryption_set=None, data_disk_encryption_sets=None, specialized=None):
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import random_string, hash_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.vm._template_builder import (build_vm_resource,
build_storage_account_resource, build_nic_resource,
build_vnet_resource, build_nsg_resource,
build_public_ip_resource, StorageProfile,
build_msi_role_assignment,
build_vm_linux_log_analytics_workspace_agent,
build_vm_windows_log_analytics_workspace_agent)
from msrestazure.tools import resource_id, is_valid_resource_id, parse_resource_id
subscription_id = get_subscription_id(cmd.cli_ctx)
if os_disk_encryption_set is not None and not is_valid_resource_id(os_disk_encryption_set):
os_disk_encryption_set = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_encryption_set)
if data_disk_encryption_sets is None:
data_disk_encryption_sets = []
for i, des in enumerate(data_disk_encryption_sets):
if des is not None and not is_valid_resource_id(des):
data_disk_encryption_sets[i] = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=des)
storage_sku = disk_info['os'].get('storageAccountType')
network_id_template = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Network')
vm_id = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm_name)
# determine final defaults and calculated values
tags = tags or {}
os_disk_name = os_disk_name or ('osdisk_{}'.format(hash_string(vm_id, length=10)) if use_unmanaged_disk else None)
storage_container_name = storage_container_name or 'vhds'
# Build up the ARM template
master_template = ArmTemplateBuilder()
vm_dependencies = []
if storage_account_type == 'new':
storage_account = storage_account or 'vhdstorage{}'.format(
hash_string(vm_id, length=14, force_lower=True))
vm_dependencies.append('Microsoft.Storage/storageAccounts/{}'.format(storage_account))
master_template.add_resource(build_storage_account_resource(cmd, storage_account, location,
tags, storage_sku))
nic_name = None
if nic_type == 'new':
nic_name = '{}VMNic'.format(vm_name)
vm_dependencies.append('Microsoft.Network/networkInterfaces/{}'.format(nic_name))
nic_dependencies = []
if vnet_type == 'new':
subnet = subnet or '{}Subnet'.format(vm_name)
vnet_exists = False
if vnet_name:
from azure.cli.command_modules.vm._vm_utils import check_existence
vnet_exists = \
check_existence(cmd.cli_ctx, vnet_name, resource_group_name, 'Microsoft.Network', 'virtualNetworks')
if vnet_exists:
from azure.cli.core.commands import cached_get, cached_put, upsert_to_collection
from azure.cli.command_modules.vm._validators import get_network_client
client = get_network_client(cmd.cli_ctx).virtual_networks
vnet = cached_get(cmd, client.get, resource_group_name, vnet_name)
Subnet = cmd.get_models('Subnet', resource_type=ResourceType.MGMT_NETWORK)
subnet_obj = Subnet(
name=subnet,
address_prefixes=[subnet_address_prefix],
address_prefix=subnet_address_prefix
)
upsert_to_collection(vnet, 'subnets', subnet_obj, 'name')
try:
cached_put(cmd, client.create_or_update, vnet, resource_group_name, vnet_name).result()
except Exception:
raise CLIError('Subnet({}) does not exist, but failed to create a new subnet with address '
'prefix {}. It may be caused by name or address prefix conflict. Please specify '
'an appropriate subnet name with --subnet or a valid address prefix value with '
'--subnet-address-prefix.'.format(subnet, subnet_address_prefix))
if not vnet_exists:
vnet_name = vnet_name or '{}VNET'.format(vm_name)
nic_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
master_template.add_resource(build_vnet_resource(
cmd, vnet_name, location, tags, vnet_address_prefix, subnet, subnet_address_prefix))
if nsg_type == 'new':
if nsg_rule is None:
nsg_rule = 'RDP' if os_type.lower() == 'windows' else 'SSH'
nsg = nsg or '{}NSG'.format(vm_name)
nic_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg))
master_template.add_resource(build_nsg_resource(cmd, nsg, location, tags, nsg_rule))
if public_ip_address_type == 'new':
public_ip_address = public_ip_address or '{}PublicIP'.format(vm_name)
nic_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(
public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location, tags,
public_ip_address_allocation,
public_ip_address_dns_name,
public_ip_sku, zone))
subnet_id = subnet if is_valid_resource_id(subnet) else \
'{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet)
nsg_id = None
if nsg:
nsg_id = nsg if is_valid_resource_id(nsg) else \
'{}/networkSecurityGroups/{}'.format(network_id_template, nsg)
public_ip_address_id = None
if public_ip_address:
public_ip_address_id = public_ip_address if is_valid_resource_id(public_ip_address) \
else '{}/publicIPAddresses/{}'.format(network_id_template, public_ip_address)
nics = [
{'id': '{}/networkInterfaces/{}'.format(network_id_template, nic_name)}
]
nic_resource = build_nic_resource(
cmd, nic_name, location, tags, vm_name, subnet_id, private_ip_address, nsg_id,
public_ip_address_id, application_security_groups, accelerated_networking=accelerated_networking)
nic_resource['dependsOn'] = nic_dependencies
master_template.add_resource(nic_resource)
else:
# Using an existing NIC
invalid_parameters = [nsg, public_ip_address, subnet, vnet_name, application_security_groups]
if any(invalid_parameters):
raise CLIError('When specifying an existing NIC, do not specify NSG, '
'public IP, ASGs, VNet or subnet.')
if accelerated_networking is not None:
logger.warning('When specifying an existing NIC, do not specify accelerated networking. '
'Ignore --accelerated-networking now. '
'This will trigger an error instead of a warning in future releases.')
os_vhd_uri = None
if storage_profile in [StorageProfile.SACustomImage, StorageProfile.SAPirImage]:
storage_account_name = storage_account.rsplit('/', 1)
storage_account_name = storage_account_name[1] if \
len(storage_account_name) > 1 else storage_account_name[0]
os_vhd_uri = 'https://{}.blob.{}/{}/{}.vhd'.format(
storage_account_name, cmd.cli_ctx.cloud.suffixes.storage_endpoint, storage_container_name, os_disk_name)
elif storage_profile == StorageProfile.SASpecializedOSDisk:
os_vhd_uri = attach_os_disk
os_disk_name = attach_os_disk.rsplit('/', 1)[1][:-4]
if custom_data:
custom_data = read_content_if_is_file(custom_data)
if secrets:
secrets = _merge_secrets([validate_file_or_dict(secret) for secret in secrets])
vm_resource = build_vm_resource(
cmd=cmd, name=vm_name, location=location, tags=tags, size=size, storage_profile=storage_profile, nics=nics,
admin_username=admin_username, availability_set_id=availability_set, admin_password=admin_password,
ssh_key_values=ssh_key_value, ssh_key_path=ssh_dest_key_path, image_reference=image,
os_disk_name=os_disk_name, custom_image_os_type=os_type, authentication_type=authentication_type,
os_publisher=os_publisher, os_offer=os_offer, os_sku=os_sku, os_version=os_version, os_vhd_uri=os_vhd_uri,
attach_os_disk=attach_os_disk, os_disk_size_gb=os_disk_size_gb, custom_data=custom_data, secrets=secrets,
license_type=license_type, zone=zone, disk_info=disk_info,
boot_diagnostics_storage_uri=boot_diagnostics_storage, ultra_ssd_enabled=ultra_ssd_enabled,
proximity_placement_group=proximity_placement_group, computer_name=computer_name,
dedicated_host=dedicated_host, priority=priority, max_price=max_price, eviction_policy=eviction_policy,
enable_agent=enable_agent, vmss=vmss, os_disk_encryption_set=os_disk_encryption_set,
data_disk_encryption_sets=data_disk_encryption_sets, specialized=specialized)
vm_resource['dependsOn'] = vm_dependencies
if plan_name:
vm_resource['plan'] = {
'name': plan_name,
'publisher': plan_publisher,
'product': plan_product,
'promotionCode': plan_promotion_code
}
enable_local_identity = None
if assign_identity is not None:
vm_resource['identity'], _, _, enable_local_identity = _build_identities_info(assign_identity)
role_assignment_guid = None
if identity_scope:
role_assignment_guid = str(_gen_guid())
master_template.add_resource(build_msi_role_assignment(vm_name, vm_id, identity_role_id,
role_assignment_guid, identity_scope))
if workspace is not None:
workspace_id = _prepare_workspace(cmd, resource_group_name, workspace)
master_template.add_secure_parameter('workspaceId', workspace_id)
if os_type.lower() == 'linux':
vm_mmaExtension_resource = build_vm_linux_log_analytics_workspace_agent(cmd, vm_name, location)
master_template.add_resource(vm_mmaExtension_resource)
elif os_type.lower() == 'windows':
vm_mmaExtension_resource = build_vm_windows_log_analytics_workspace_agent(cmd, vm_name, location)
master_template.add_resource(vm_mmaExtension_resource)
else:
logger.warning("Unsupported OS type. Skip the connection step for log analytics workspace.")
master_template.add_resource(vm_resource)
if admin_password:
master_template.add_secure_parameter('adminPassword', admin_password)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'vm_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
aux_subscriptions=aux_subscriptions).deployments
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
if validate:
from azure.cli.command_modules.vm._vm_utils import log_pprint_template
log_pprint_template(template)
log_pprint_template(parameters)
return client.validate(resource_group_name, deployment_name, properties)
# creates the VM deployment
if no_wait:
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, deployment_name, properties)
LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, deployment_name, properties))
vm = get_vm_details(cmd, resource_group_name, vm_name)
if assign_identity is not None:
if enable_local_identity and not identity_scope:
_show_missing_access_warning(resource_group_name, vm_name, 'vm')
setattr(vm, 'identity', _construct_identity_info(identity_scope, identity_role, vm.identity.principal_id,
vm.identity.user_assigned_identities))
if workspace is not None:
workspace_name = parse_resource_id(workspace_id)['name']
_set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name)
return vm
def auto_shutdown_vm(cmd, resource_group_name, vm_name, off=None, email=None, webhook=None, time=None,
location=None):
from msrestazure.tools import resource_id
from azure.mgmt.devtestlabs.models import Schedule
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
client = _dev_test_labs_client_factory(cmd.cli_ctx, subscription_id)
name = 'shutdown-computevm-' + vm_name
vm_id = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm_name)
if off:
if email is not None or webhook is not None or time is not None:
# I don't want to disrupt users. So I warn instead of raising an error.
logger.warning('If --off, other parameters will be ignored.')
return client.global_schedules.delete(resource_group_name, name)
if time is None:
raise CLIError('usage error: --time is a required parameter')
daily_recurrence = {'time': time}
notification_settings = None
if webhook:
notification_settings = {
'emailRecipient': email,
'webhookUrl': webhook,
'timeInMinutes': 30,
'status': 'Enabled'
}
schedule = Schedule(status='Enabled',
target_resource_id=vm_id,
daily_recurrence=daily_recurrence,
notification_settings=notification_settings,
time_zone_id='UTC',
task_type='ComputeVmShutdownTask',
location=location)
return client.global_schedules.create_or_update(resource_group_name, name, schedule)
def get_instance_view(cmd, resource_group_name, vm_name):
return get_vm(cmd, resource_group_name, vm_name, 'instanceView')
def get_vm(cmd, resource_group_name, vm_name, expand=None):
client = _compute_client_factory(cmd.cli_ctx)
return client.virtual_machines.get(resource_group_name, vm_name, expand=expand)
def get_vm_details(cmd, resource_group_name, vm_name):
from msrestazure.tools import parse_resource_id
from azure.cli.command_modules.vm._vm_utils import get_target_network_api
result = get_instance_view(cmd, resource_group_name, vm_name)
network_client = get_mgmt_service_client(
cmd.cli_ctx, ResourceType.MGMT_NETWORK, api_version=get_target_network_api(cmd.cli_ctx))
public_ips = []
fqdns = []
private_ips = []
mac_addresses = []
# pylint: disable=line-too-long,no-member
for nic_ref in result.network_profile.network_interfaces:
nic_parts = parse_resource_id(nic_ref.id)
nic = network_client.network_interfaces.get(nic_parts['resource_group'], nic_parts['name'])
if nic.mac_address:
mac_addresses.append(nic.mac_address)
for ip_configuration in nic.ip_configurations:
if ip_configuration.private_ip_address:
private_ips.append(ip_configuration.private_ip_address)
if ip_configuration.public_ip_address:
res = parse_resource_id(ip_configuration.public_ip_address.id)
public_ip_info = network_client.public_ip_addresses.get(res['resource_group'],
res['name'])
if public_ip_info.ip_address:
public_ips.append(public_ip_info.ip_address)
if public_ip_info.dns_settings:
fqdns.append(public_ip_info.dns_settings.fqdn)
setattr(result, 'power_state',
','.join([s.display_status for s in result.instance_view.statuses if s.code.startswith('PowerState/')]))
setattr(result, 'public_ips', ','.join(public_ips))
setattr(result, 'fqdns', ','.join(fqdns))
setattr(result, 'private_ips', ','.join(private_ips))
setattr(result, 'mac_addresses', ','.join(mac_addresses))
del result.instance_view # we don't need other instance_view info as people won't care
return result
def list_skus(cmd, location=None, size=None, zone=None, show_all=None, resource_type=None):
from ._vm_utils import list_sku_info
result = list_sku_info(cmd.cli_ctx, location)
if not show_all:
result = [x for x in result if not [y for y in (x.restrictions or [])
if y.reason_code == 'NotAvailableForSubscription']]
if resource_type:
result = [x for x in result if x.resource_type.lower() == resource_type.lower()]
if size:
result = [x for x in result if x.resource_type == 'virtualMachines' and size.lower() in x.name.lower()]
if zone:
result = [x for x in result if x.location_info and x.location_info[0].zones]
return result
def list_vm(cmd, resource_group_name=None, show_details=False):
ccf = _compute_client_factory(cmd.cli_ctx)
vm_list = ccf.virtual_machines.list(resource_group_name=resource_group_name) \
if resource_group_name else ccf.virtual_machines.list_all()
if show_details:
return [get_vm_details(cmd, _parse_rg_name(v.id)[0], v.name) for v in vm_list]
return list(vm_list)
def list_vm_ip_addresses(cmd, resource_group_name=None, vm_name=None):
# We start by getting NICs as they are the smack in the middle of all data that we
# want to collect for a VM (as long as we don't need any info on the VM than what
# is available in the Id, we don't need to make any calls to the compute RP)
#
# Since there is no guarantee that a NIC is in the same resource group as a given
# Virtual Machine, we can't constrain the lookup to only a single group...
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
nics = network_client.network_interfaces.list_all()
public_ip_addresses = network_client.public_ip_addresses.list_all()
ip_address_lookup = {pip.id: pip for pip in list(public_ip_addresses)}
result = []
for nic in [n for n in list(nics) if n.virtual_machine]:
nic_resource_group, nic_vm_name = _parse_rg_name(nic.virtual_machine.id)
# If provided, make sure that resource group name and vm name match the NIC we are
# looking at before adding it to the result...
same_resource_group_name = (resource_group_name is None or
resource_group_name.lower() == nic_resource_group.lower())
same_vm_name = (vm_name is None or
vm_name.lower() == nic_vm_name.lower())
if same_resource_group_name and same_vm_name:
network_info = {
'privateIpAddresses': [],
'publicIpAddresses': []
}
for ip_configuration in nic.ip_configurations:
network_info['privateIpAddresses'].append(ip_configuration.private_ip_address)
if ip_configuration.public_ip_address and ip_configuration.public_ip_address.id in ip_address_lookup:
public_ip_address = ip_address_lookup[ip_configuration.public_ip_address.id]
public_ip_addr_info = {
'id': public_ip_address.id,
'name': public_ip_address.name,
'ipAddress': public_ip_address.ip_address,
'ipAllocationMethod': public_ip_address.public_ip_allocation_method
}
try:
public_ip_addr_info['zone'] = public_ip_address.zones[0]
except (AttributeError, IndexError, TypeError):
pass
network_info['publicIpAddresses'].append(public_ip_addr_info)
result.append({
'virtualMachine': {
'resourceGroup': nic_resource_group,
'name': nic_vm_name,
'network': network_info
}
})
return result
def open_vm_port(cmd, resource_group_name, vm_name, port, priority=900, network_security_group_name=None,
apply_to_subnet=False):
from msrestazure.tools import parse_resource_id
network = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
vm = get_vm(cmd, resource_group_name, vm_name)
location = vm.location
nic_ids = list(vm.network_profile.network_interfaces)
if len(nic_ids) > 1:
raise CLIError('Multiple NICs is not supported for this command. Create rules on the NSG '
'directly.')
if not nic_ids:
raise CLIError("No NIC associated with VM '{}'".format(vm_name))
# get existing NSG or create a new one
created_nsg = False
nic = network.network_interfaces.get(resource_group_name, os.path.split(nic_ids[0].id)[1])
if not apply_to_subnet:
nsg = nic.network_security_group
else:
subnet_id = parse_resource_id(nic.ip_configurations[0].subnet.id)
subnet = network.subnets.get(resource_group_name, subnet_id['name'], subnet_id['child_name_1'])
nsg = subnet.network_security_group
if not nsg:
NetworkSecurityGroup = \
cmd.get_models('NetworkSecurityGroup', resource_type=ResourceType.MGMT_NETWORK)
nsg = LongRunningOperation(cmd.cli_ctx, 'Creating network security group')(
network.network_security_groups.create_or_update(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=NetworkSecurityGroup(location=location)
)
)
created_nsg = True
# update the NSG with the new rule to allow inbound traffic
SecurityRule = cmd.get_models('SecurityRule', resource_type=ResourceType.MGMT_NETWORK)
rule_name = 'open-port-all' if port == '*' else 'open-port-{}'.format(port)
rule = SecurityRule(protocol='*', access='allow', direction='inbound', name=rule_name,
source_port_range='*', destination_port_range=port, priority=priority,
source_address_prefix='*', destination_address_prefix='*')
nsg_name = nsg.name or os.path.split(nsg.id)[1]
LongRunningOperation(cmd.cli_ctx, 'Adding security rule')(
network.security_rules.create_or_update(
resource_group_name, nsg_name, rule_name, rule)
)
# update the NIC or subnet if a new NSG was created
if created_nsg and not apply_to_subnet:
nic.network_security_group = nsg
LongRunningOperation(cmd.cli_ctx, 'Updating NIC')(network.network_interfaces.create_or_update(
resource_group_name, nic.name, nic))
elif created_nsg and apply_to_subnet:
subnet.network_security_group = nsg
LongRunningOperation(cmd.cli_ctx, 'Updating subnet')(network.subnets.create_or_update(
resource_group_name=resource_group_name,
virtual_network_name=subnet_id['name'],
subnet_name=subnet_id['child_name_1'],
subnet_parameters=subnet
))
return network.network_security_groups.get(resource_group_name, nsg_name)
def resize_vm(cmd, resource_group_name, vm_name, size, no_wait=False):
vm = get_vm(cmd, resource_group_name, vm_name)
if vm.hardware_profile.vm_size == size:
logger.warning("VM is already %s", size)
return None
vm.hardware_profile.vm_size = size # pylint: disable=no-member
return set_vm(cmd, vm, no_wait=no_wait)
def restart_vm(cmd, resource_group_name, vm_name, no_wait=False, force=False):
client = _compute_client_factory(cmd.cli_ctx)
if force:
return sdk_no_wait(no_wait, client.virtual_machines.redeploy, resource_group_name, vm_name)
return sdk_no_wait(no_wait, client.virtual_machines.restart, resource_group_name, vm_name)
def set_vm(cmd, instance, lro_operation=None, no_wait=False):
instance.resources = None # Issue: https://github.com/Azure/autorest/issues/934
client = _compute_client_factory(cmd.cli_ctx)
parsed_id = _parse_rg_name(instance.id)
poller = sdk_no_wait(no_wait, client.virtual_machines.create_or_update,
resource_group_name=parsed_id[0],
vm_name=parsed_id[1],
parameters=instance)
if lro_operation:
return lro_operation(poller)
return LongRunningOperation(cmd.cli_ctx)(poller)
def patch_vm(cmd, resource_group_name, vm_name, vm):
client = _compute_client_factory(cmd.cli_ctx)
poller = client.virtual_machines.update(resource_group_name, vm_name, vm)
return LongRunningOperation(cmd.cli_ctx)(poller)
def show_vm(cmd, resource_group_name, vm_name, show_details=False):
return get_vm_details(cmd, resource_group_name, vm_name) if show_details \
else get_vm(cmd, resource_group_name, vm_name)
def update_vm(cmd, resource_group_name, vm_name, os_disk=None, disk_caching=None,
write_accelerator=None, license_type=None, no_wait=False, ultra_ssd_enabled=None,
priority=None, max_price=None, proximity_placement_group=None, workspace=None, **kwargs):
from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id
from ._vm_utils import update_write_accelerator_settings, update_disk_caching
vm = kwargs['parameters']
if os_disk is not None:
if is_valid_resource_id(os_disk):
disk_id, disk_name = os_disk, parse_resource_id(os_disk)['name']
else:
res = parse_resource_id(vm.id)
disk_id = resource_id(subscription=res['subscription'], resource_group=res['resource_group'],
namespace='Microsoft.Compute', type='disks', name=os_disk)
disk_name = os_disk
vm.storage_profile.os_disk.managed_disk.id = disk_id
vm.storage_profile.os_disk.name = disk_name
if write_accelerator is not None:
update_write_accelerator_settings(vm.storage_profile, write_accelerator)
if disk_caching is not None:
update_disk_caching(vm.storage_profile, disk_caching)
if license_type is not None:
vm.license_type = license_type
if ultra_ssd_enabled is not None:
if vm.additional_capabilities is None:
AdditionalCapabilities = cmd.get_models('AdditionalCapabilities')
vm.additional_capabilities = AdditionalCapabilities(ultra_ssd_enabled=ultra_ssd_enabled)
else:
vm.additional_capabilities.ultra_ssd_enabled = ultra_ssd_enabled
if priority is not None:
vm.priority = priority
if max_price is not None:
if vm.billing_profile is None:
BillingProfile = cmd.get_models('BillingProfile')
vm.billing_profile = BillingProfile(max_price=max_price)
else:
vm.billing_profile.max_price = max_price
if proximity_placement_group is not None:
vm.proximity_placement_group = {'id': proximity_placement_group}
if workspace is not None:
workspace_id = _prepare_workspace(cmd, resource_group_name, workspace)
workspace_name = parse_resource_id(workspace_id)['name']
_set_log_analytics_workspace_extension(cmd=cmd,
resource_group_name=resource_group_name,
vm=vm,
vm_name=vm_name,
workspace_name=workspace_name)
os_type = vm.storage_profile.os_disk.os_type.value if vm.storage_profile.os_disk.os_type else None
_set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name)
return sdk_no_wait(no_wait, _compute_client_factory(cmd.cli_ctx).virtual_machines.create_or_update,
resource_group_name, vm_name, **kwargs)
# endregion
# region VirtualMachines AvailabilitySets
def _get_availset(cmd, resource_group_name, name):
return _compute_client_factory(cmd.cli_ctx).availability_sets.get(resource_group_name, name)
def _set_availset(cmd, resource_group_name, name, **kwargs):
return _compute_client_factory(cmd.cli_ctx).availability_sets.create_or_update(resource_group_name, name, **kwargs)
# pylint: disable=inconsistent-return-statements
def convert_av_set_to_managed_disk(cmd, resource_group_name, availability_set_name):
av_set = _get_availset(cmd, resource_group_name, availability_set_name)
if av_set.sku.name != 'Aligned':
av_set.sku.name = 'Aligned'
# let us double check whether the existing FD number is supported
skus = list_skus(cmd, av_set.location)
av_sku = next((s for s in skus if s.resource_type == 'availabilitySets' and s.name == 'Aligned'), None)
if av_sku and av_sku.capabilities:
max_fd = int(next((c.value for c in av_sku.capabilities if c.name == 'MaximumPlatformFaultDomainCount'),
'0'))
if max_fd and max_fd < av_set.platform_fault_domain_count:
logger.warning("The fault domain count will be adjusted from %s to %s so to stay within region's "
"limitation", av_set.platform_fault_domain_count, max_fd)
av_set.platform_fault_domain_count = max_fd
return _set_availset(cmd, resource_group_name=resource_group_name, name=availability_set_name,
parameters=av_set)
logger.warning('Availability set %s is already configured for managed disks.', availability_set_name)
def create_av_set(cmd, availability_set_name, resource_group_name, platform_fault_domain_count=2,
platform_update_domain_count=None, location=None, proximity_placement_group=None, unmanaged=False,
no_wait=False, tags=None, validate=False):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.vm._template_builder import build_av_set_resource
tags = tags or {}
# Build up the ARM template
master_template = ArmTemplateBuilder()
av_set_resource = build_av_set_resource(cmd, availability_set_name, location, tags,
platform_update_domain_count,
platform_fault_domain_count, unmanaged,
proximity_placement_group=proximity_placement_group)
master_template.add_resource(av_set_resource)
template = master_template.build()
# deploy ARM template
deployment_name = 'av_set_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
if validate:
return client.validate(resource_group_name, deployment_name, properties)
if no_wait:
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, deployment_name, properties)
LongRunningOperation(cmd.cli_ctx)(sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, deployment_name, properties))
compute_client = _compute_client_factory(cmd.cli_ctx)
return compute_client.availability_sets.get(resource_group_name, availability_set_name)
def update_av_set(instance, resource_group_name, proximity_placement_group=None):
if proximity_placement_group is not None:
instance.proximity_placement_group = {'id': proximity_placement_group}
return instance
def list_av_sets(cmd, resource_group_name=None):
op_group = _compute_client_factory(cmd.cli_ctx).availability_sets
if resource_group_name:
return op_group.list(resource_group_name)
return op_group.list_by_subscription(expand='virtualMachines/$ref')
# endregion
# region VirtualMachines BootDiagnostics
def disable_boot_diagnostics(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
diag_profile = vm.diagnostics_profile
if not (diag_profile and diag_profile.boot_diagnostics and diag_profile.boot_diagnostics.enabled):
return
diag_profile.boot_diagnostics.enabled = False
diag_profile.boot_diagnostics.storage_uri = None
set_vm(cmd, vm, ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'disabling boot diagnostics', 'done'))
def enable_boot_diagnostics(cmd, resource_group_name, vm_name, storage):
from azure.cli.command_modules.vm._vm_utils import get_storage_blob_uri
vm = get_vm(cmd, resource_group_name, vm_name)
storage_uri = get_storage_blob_uri(cmd.cli_ctx, storage)
if (vm.diagnostics_profile and
vm.diagnostics_profile.boot_diagnostics and
vm.diagnostics_profile.boot_diagnostics.enabled and
vm.diagnostics_profile.boot_diagnostics.storage_uri and
vm.diagnostics_profile.boot_diagnostics.storage_uri.lower() == storage_uri.lower()):
return
DiagnosticsProfile, BootDiagnostics = cmd.get_models('DiagnosticsProfile', 'BootDiagnostics')
boot_diag = BootDiagnostics(enabled=True, storage_uri=storage_uri)
if vm.diagnostics_profile is None:
vm.diagnostics_profile = DiagnosticsProfile(boot_diagnostics=boot_diag)
else:
vm.diagnostics_profile.boot_diagnostics = boot_diag
set_vm(cmd, vm, ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'enabling boot diagnostics', 'done'))
class BootLogStreamWriter(object): # pylint: disable=too-few-public-methods
def __init__(self, out):
self.out = out
def write(self, str_or_bytes):
content = str_or_bytes
if isinstance(str_or_bytes, bytes):
content = str_or_bytes.decode('utf8')
try:
self.out.write(content)
except UnicodeEncodeError:
# e.g. 'charmap' codec can't encode characters in position 258829-258830: character maps to <undefined>
import unicodedata
ascii_content = unicodedata.normalize('NFKD', content).encode('ascii', 'ignore')
self.out.write(ascii_content.decode())
logger.warning("A few unicode characters have been ignored because the shell is not able to display. "
"To see the full log, use a shell with unicode capacity")
def get_boot_log(cmd, resource_group_name, vm_name):
import re
import sys
from azure.cli.core.profiles import get_sdk
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob.blockblobservice#BlockBlobService')
client = _compute_client_factory(cmd.cli_ctx)
virtual_machine = client.virtual_machines.get(resource_group_name, vm_name, expand='instanceView')
# pylint: disable=no-member
if (not virtual_machine.instance_view.boot_diagnostics or
not virtual_machine.instance_view.boot_diagnostics.serial_console_log_blob_uri):
raise CLIError('Please enable boot diagnostics.')
blob_uri = virtual_machine.instance_view.boot_diagnostics.serial_console_log_blob_uri
# Find storage account for diagnostics
storage_mgmt_client = _get_storage_management_client(cmd.cli_ctx)
if not blob_uri:
raise CLIError('No console log available')
try:
storage_accounts = storage_mgmt_client.storage_accounts.list()
matching_storage_account = (a for a in list(storage_accounts)
if blob_uri.startswith(a.primary_endpoints.blob))
storage_account = next(matching_storage_account)
except StopIteration:
raise CLIError('Failed to find storage accont for console log file')
regex = r'/subscriptions/[^/]+/resourceGroups/(?P<rg>[^/]+)/.+'
match = re.search(regex, storage_account.id, re.I)
rg = match.group('rg')
# Get account key
keys = storage_mgmt_client.storage_accounts.list_keys(rg, storage_account.name)
# Extract container and blob name from url...
container, blob = urlparse(blob_uri).path.split('/')[-2:]
storage_client = get_data_service_client(
cmd.cli_ctx,
BlockBlobService,
storage_account.name,
keys.keys[0].value,
endpoint_suffix=cmd.cli_ctx.cloud.suffixes.storage_endpoint) # pylint: disable=no-member
# our streamwriter not seekable, so no parallel.
storage_client.get_blob_to_stream(container, blob, BootLogStreamWriter(sys.stdout), max_connections=1)
# endregion
# region VirtualMachines Diagnostics
def set_diagnostics_extension(
cmd, resource_group_name, vm_name, settings, protected_settings=None, version=None,
no_auto_upgrade=False):
client = _compute_client_factory(cmd.cli_ctx)
vm = client.virtual_machines.get(resource_group_name, vm_name, 'instanceView')
# pylint: disable=no-member
is_linux_os = _is_linux_os(vm)
vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT
if is_linux_os: # check incompatible version
exts = vm.instance_view.extensions or []
major_ver = extension_mappings[_LINUX_DIAG_EXT]['version'].split('.')[0]
if next((e for e in exts if e.name == vm_extension_name and
not e.type_handler_version.startswith(major_ver + '.')), None):
logger.warning('There is an incompatible version of diagnostics extension installed. '
'We will update it with a new version')
poller = client.virtual_machine_extensions.delete(resource_group_name, vm_name,
vm_extension_name)
LongRunningOperation(cmd.cli_ctx)(poller)
return set_extension(cmd, resource_group_name, vm_name, vm_extension_name,
extension_mappings[vm_extension_name]['publisher'],
version or extension_mappings[vm_extension_name]['version'],
settings,
protected_settings,
no_auto_upgrade)
def show_default_diagnostics_configuration(is_windows_os=False):
public_settings = get_default_diag_config(is_windows_os)
# pylint: disable=line-too-long
protected_settings_info = json.dumps({
'storageAccountName': "__STORAGE_ACCOUNT_NAME__",
# LAD and WAD are not consistent on sas token format. Call it out here
"storageAccountSasToken": "__SAS_TOKEN_{}__".format("WITH_LEADING_QUESTION_MARK" if is_windows_os else "WITHOUT_LEADING_QUESTION_MARK")
}, indent=2)
logger.warning('Protected settings with storage account info is required to work with the default configurations, e.g. \n%s', protected_settings_info)
return public_settings
# endregion
# region VirtualMachines Disks (Managed)
def attach_managed_data_disk(cmd, resource_group_name, vm_name, disk, new=False, sku=None,
size_gb=1023, lun=None, caching=None, enable_write_accelerator=False):
'''attach a managed disk'''
from msrestazure.tools import parse_resource_id
vm = get_vm(cmd, resource_group_name, vm_name)
DataDisk, ManagedDiskParameters, DiskCreateOption = cmd.get_models(
'DataDisk', 'ManagedDiskParameters', 'DiskCreateOptionTypes')
# pylint: disable=no-member
if lun is None:
lun = _get_disk_lun(vm.storage_profile.data_disks)
if new:
data_disk = DataDisk(lun=lun, create_option=DiskCreateOption.empty,
name=parse_resource_id(disk)['name'],
disk_size_gb=size_gb, caching=caching,
managed_disk=ManagedDiskParameters(storage_account_type=sku))
else:
params = ManagedDiskParameters(id=disk, storage_account_type=sku)
data_disk = DataDisk(lun=lun, create_option=DiskCreateOption.attach, managed_disk=params, caching=caching)
if enable_write_accelerator:
data_disk.write_accelerator_enabled = enable_write_accelerator
vm.storage_profile.data_disks.append(data_disk)
set_vm(cmd, vm)
def detach_data_disk(cmd, resource_group_name, vm_name, disk_name):
# here we handle both unmanaged or managed disk
vm = get_vm(cmd, resource_group_name, vm_name)
# pylint: disable=no-member
leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk_name.lower()]
if len(vm.storage_profile.data_disks) == len(leftovers):
raise CLIError("No disk with the name '{}' was found".format(disk_name))
vm.storage_profile.data_disks = leftovers
set_vm(cmd, vm)
# endregion
# region VirtualMachines Extensions
def list_extensions(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
extension_type = 'Microsoft.Compute/virtualMachines/extensions'
result = [r for r in (vm.resources or []) if r.type == extension_type]
return result
def set_extension(cmd, resource_group_name, vm_name, vm_extension_name, publisher, version=None, settings=None,
protected_settings=None, no_auto_upgrade=False, force_update=False, no_wait=False,
extension_instance_name=None):
vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView')
client = _compute_client_factory(cmd.cli_ctx)
if not extension_instance_name:
extension_instance_name = vm_extension_name
VirtualMachineExtension = cmd.get_models('VirtualMachineExtension')
instance_name = _get_extension_instance_name(vm.instance_view, publisher, vm_extension_name,
suggested_name=extension_instance_name)
if instance_name != extension_instance_name:
msg = "A %s extension with name %s already exists. Updating it with your settings..."
logger.warning(msg, vm_extension_name, instance_name)
version = _normalize_extension_version(cmd.cli_ctx, publisher, vm_extension_name, version, vm.location)
ext = VirtualMachineExtension(location=vm.location,
publisher=publisher,
virtual_machine_extension_type=vm_extension_name,
protected_settings=protected_settings,
type_handler_version=version,
settings=settings,
auto_upgrade_minor_version=(not no_auto_upgrade))
if force_update:
ext.force_update_tag = str(_gen_guid())
return sdk_no_wait(no_wait, client.virtual_machine_extensions.create_or_update,
resource_group_name, vm_name, instance_name, ext)
# endregion
# region VirtualMachines Extension Images
def list_vm_extension_images(
cmd, image_location=None, publisher_name=None, name=None, version=None, latest=False):
return load_extension_images_thru_services(
cmd.cli_ctx, publisher_name, name, version, image_location, latest)
# endregion
# region VirtualMachines Identity
def _remove_identities(cmd, resource_group_name, name, identities, getter, setter):
from ._vm_utils import MSI_LOCAL_ID
ResourceIdentityType = cmd.get_models('ResourceIdentityType', operation_group='virtual_machines')
remove_system_assigned_identity = False
if MSI_LOCAL_ID in identities:
remove_system_assigned_identity = True
identities.remove(MSI_LOCAL_ID)
resource = getter(cmd, resource_group_name, name)
if resource.identity is None:
return None
emsis_to_remove = []
if identities:
existing_emsis = {x.lower() for x in list((resource.identity.user_assigned_identities or {}).keys())}
emsis_to_remove = {x.lower() for x in identities}
non_existing = emsis_to_remove.difference(existing_emsis)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_emsis - emsis_to_remove): # if all emsis are gone, we need to update the type
if resource.identity.type == ResourceIdentityType.user_assigned:
resource.identity.type = ResourceIdentityType.none
elif resource.identity.type == ResourceIdentityType.system_assigned_user_assigned:
resource.identity.type = ResourceIdentityType.system_assigned
resource.identity.user_assigned_identities = None
if remove_system_assigned_identity:
resource.identity.type = (ResourceIdentityType.none
if resource.identity.type == ResourceIdentityType.system_assigned
else ResourceIdentityType.user_assigned)
if emsis_to_remove:
if resource.identity.type not in [ResourceIdentityType.none, ResourceIdentityType.system_assigned]:
resource.identity.user_assigned_identities = {}
for identity in emsis_to_remove:
resource.identity.user_assigned_identities[identity] = None
result = LongRunningOperation(cmd.cli_ctx)(setter(resource_group_name, name, resource))
return result.identity
def remove_vm_identity(cmd, resource_group_name, vm_name, identities=None):
def setter(resource_group_name, vm_name, vm):
client = _compute_client_factory(cmd.cli_ctx)
VirtualMachineUpdate = cmd.get_models('VirtualMachineUpdate', operation_group='virtual_machines')
vm_update = VirtualMachineUpdate(identity=vm.identity)
return client.virtual_machines.update(resource_group_name, vm_name, vm_update)
if identities is None:
from ._vm_utils import MSI_LOCAL_ID
identities = [MSI_LOCAL_ID]
return _remove_identities(cmd, resource_group_name, vm_name, identities, get_vm, setter)
# endregion
# region VirtualMachines Images
def list_vm_images(cmd, image_location=None, publisher_name=None, offer=None, sku=None,
all=False): # pylint: disable=redefined-builtin
load_thru_services = all
if load_thru_services:
if not publisher_name and not offer and not sku:
logger.warning("You are retrieving all the images from server which could take more than a minute. "
"To shorten the wait, provide '--publisher', '--offer' or '--sku'. Partial name search "
"is supported.")
all_images = load_images_thru_services(cmd.cli_ctx, publisher_name, offer, sku, image_location)
else:
all_images = load_images_from_aliases_doc(cmd.cli_ctx, publisher_name, offer, sku)
logger.warning(
'You are viewing an offline list of images, use --all to retrieve an up-to-date list')
for i in all_images:
i['urn'] = ':'.join([i['publisher'], i['offer'], i['sku'], i['version']])
return all_images
def show_vm_image(cmd, urn=None, publisher=None, offer=None, sku=None, version=None, location=None):
from azure.cli.core.commands.parameters import get_one_of_subscription_locations
usage_err = 'usage error: --plan STRING --offer STRING --publish STRING --version STRING | --urn STRING'
location = location or get_one_of_subscription_locations(cmd.cli_ctx)
if urn:
if any([publisher, offer, sku, version]):
raise CLIError(usage_err)
publisher, offer, sku, version = urn.split(":")
if version.lower() == 'latest':
version = _get_latest_image_version(cmd.cli_ctx, location, publisher, offer, sku)
elif not publisher or not offer or not sku or not version:
raise CLIError(usage_err)
client = _compute_client_factory(cmd.cli_ctx)
return client.virtual_machine_images.get(location, publisher, offer, sku, version)
def accept_market_ordering_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements
usage_err = 'usage error: --plan STRING --offer STRING --publish STRING |--urn STRING'
if urn:
if any([publisher, offer, plan]):
raise CLIError(usage_err)
publisher, offer, _, _ = urn.split(':')
image = show_vm_image(cmd, urn)
if not image.plan:
logger.warning("Image '%s' has no terms to accept.", urn)
return
plan = image.plan.name
else:
if not publisher or not offer or not plan:
raise CLIError(usage_err)
market_place_client = get_mgmt_service_client(cmd.cli_ctx, MarketplaceOrderingAgreements)
term = market_place_client.marketplace_agreements.get(publisher, offer, plan)
term.accepted = True
return market_place_client.marketplace_agreements.create(publisher, offer, plan, term)
# endregion
def _terms_prepare(cmd, urn, publisher, offer, plan):
if urn:
if any([publisher, offer, plan]):
raise CLIError('usage error: If using --urn, do not use any of --plan, --offer, --publisher.')
terms = urn.split(':')
if len(terms) != 4:
raise CLIError('usage error: urn should be in the format of publisher:offer:sku:version.')
publisher, offer = terms[0], terms[1]
image = show_vm_image(cmd, urn)
if not image.plan:
raise CLIError("Image '%s' has no terms to accept." % urn)
plan = image.plan.name
else:
if not all([publisher, offer, plan]):
raise CLIError(
'usage error: If not using --urn, all of --plan, --offer and --publisher should be provided.')
return publisher, offer, plan
def _accept_cancel_terms(cmd, urn, publisher, offer, plan, accept):
publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan)
op = cf_vm_image_term(cmd.cli_ctx, '')
terms = op.get(publisher, offer, plan)
terms.accepted = accept
return op.create(publisher, offer, plan, terms)
def accept_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
"""
Accept Azure Marketplace image terms so that the image can be used to create VMs.
:param cmd:cmd
:param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted
:param publisher:Image publisher
:param offer:Image offer
:param plan:Image billing plan
:return:
"""
return _accept_cancel_terms(cmd, urn, publisher, offer, plan, True)
def cancel_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
"""
Cancel Azure Marketplace image terms.
:param cmd:cmd
:param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted
:param publisher:Image publisher
:param offer:Image offer
:param plan:Image billing plan
:return:
"""
return _accept_cancel_terms(cmd, urn, publisher, offer, plan, False)
def get_terms(cmd, urn=None, publisher=None, offer=None, plan=None):
"""
Get the details of Azure Marketplace image terms.
:param cmd:cmd
:param urn:URN, in the format of 'publisher:offer:sku:version'. If specified, other argument values can be omitted
:param publisher:Image publisher
:param offer:Image offer
:param plan:Image billing plan
:return:
"""
publisher, offer, plan = _terms_prepare(cmd, urn, publisher, offer, plan)
op = cf_vm_image_term(cmd.cli_ctx, '')
terms = op.get(publisher, offer, plan)
return terms
# region VirtualMachines NetworkInterfaces (NICs)
def show_vm_nic(cmd, resource_group_name, vm_name, nic):
from msrestazure.tools import parse_resource_id
vm = get_vm(cmd, resource_group_name, vm_name)
found = next(
(n for n in vm.network_profile.network_interfaces if nic.lower() == n.id.lower()), None
# pylint: disable=no-member
)
if found:
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
nic_name = parse_resource_id(found.id)['name']
return network_client.network_interfaces.get(resource_group_name, nic_name)
raise CLIError("NIC '{}' not found on VM '{}'".format(nic, vm_name))
def list_vm_nics(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
return vm.network_profile.network_interfaces # pylint: disable=no-member
def add_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None):
vm = get_vm(cmd, resource_group_name, vm_name)
new_nics = _build_nic_list(cmd, nics)
existing_nics = _get_existing_nics(vm)
return _update_vm_nics(cmd, vm, existing_nics + new_nics, primary_nic)
def remove_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None):
def to_delete(nic_id):
return [n for n in nics_to_delete if n.id.lower() == nic_id.lower()]
vm = get_vm(cmd, resource_group_name, vm_name)
nics_to_delete = _build_nic_list(cmd, nics)
existing_nics = _get_existing_nics(vm)
survived = [x for x in existing_nics if not to_delete(x.id)]
return _update_vm_nics(cmd, vm, survived, primary_nic)
def set_vm_nic(cmd, resource_group_name, vm_name, nics, primary_nic=None):
vm = get_vm(cmd, resource_group_name, vm_name)
nics = _build_nic_list(cmd, nics)
return _update_vm_nics(cmd, vm, nics, primary_nic)
def _build_nic_list(cmd, nic_ids):
NetworkInterfaceReference = cmd.get_models('NetworkInterfaceReference')
nic_list = []
if nic_ids:
# pylint: disable=no-member
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
for nic_id in nic_ids:
rg, name = _parse_rg_name(nic_id)
nic = network_client.network_interfaces.get(rg, name)
nic_list.append(NetworkInterfaceReference(id=nic.id, primary=False))
return nic_list
def _get_existing_nics(vm):
network_profile = getattr(vm, 'network_profile', None)
nics = []
if network_profile is not None:
nics = network_profile.network_interfaces or []
return nics
def _update_vm_nics(cmd, vm, nics, primary_nic):
NetworkProfile = cmd.get_models('NetworkProfile')
if primary_nic:
try:
_, primary_nic_name = _parse_rg_name(primary_nic)
except IndexError:
primary_nic_name = primary_nic
matched = [n for n in nics if _parse_rg_name(n.id)[1].lower() == primary_nic_name.lower()]
if not matched:
raise CLIError('Primary Nic {} is not found'.format(primary_nic))
if len(matched) > 1:
raise CLIError('Duplicate Nic entries with name {}'.format(primary_nic))
for n in nics:
n.primary = False
matched[0].primary = True
elif nics:
if not [n for n in nics if n.primary]:
nics[0].primary = True
network_profile = getattr(vm, 'network_profile', None)
if network_profile is None:
vm.network_profile = NetworkProfile(network_interfaces=nics)
else:
network_profile.network_interfaces = nics
return set_vm(cmd, vm).network_profile.network_interfaces
# endregion
# region VirtualMachines RunCommand
def run_command_invoke(cmd, resource_group_name, vm_vmss_name, command_id, scripts=None, parameters=None, instance_id=None): # pylint: disable=line-too-long
RunCommandInput, RunCommandInputParameter = cmd.get_models('RunCommandInput', 'RunCommandInputParameter')
parameters = parameters or []
run_command_input_parameters = []
auto_arg_name_num = 0
for p in parameters:
if '=' in p:
n, v = p.split('=', 1)
else:
# RunCommand API requires named arguments, which doesn't make lots of sense for bash scripts
# using positional arguments, so here we provide names just to get API happy
# note, we don't handle mixing styles, but will consolidate by GA when API is settled
auto_arg_name_num += 1
n = 'arg{}'.format(auto_arg_name_num)
v = p
run_command_input_parameters.append(RunCommandInputParameter(name=n, value=v))
client = _compute_client_factory(cmd.cli_ctx)
# if instance_id, this is a vmss instance
if instance_id:
return client.virtual_machine_scale_set_vms.run_command(resource_group_name, vm_vmss_name, instance_id,
RunCommandInput(command_id=command_id, script=scripts,
parameters=run_command_input_parameters)) # pylint: disable=line-too-long
# otherwise this is a regular vm instance
return client.virtual_machines.run_command(resource_group_name, vm_vmss_name,
RunCommandInput(command_id=command_id, script=scripts,
parameters=run_command_input_parameters))
def vm_run_command_invoke(cmd, resource_group_name, vm_name, command_id, scripts=None, parameters=None):
return run_command_invoke(cmd, resource_group_name, vm_name, command_id, scripts, parameters)
# endregion
# region VirtualMachines Secrets
def _get_vault_id_from_name(cli_ctx, client, vault_name):
group_name = _get_resource_group_from_vault_name(cli_ctx, vault_name)
if not group_name:
raise CLIError("unable to find vault '{}' in current subscription.".format(vault_name))
vault = client.get(group_name, vault_name)
return vault.id
def get_vm_format_secret(cmd, secrets, certificate_store=None, keyvault=None, resource_group_name=None):
from azure.keyvault import KeyVaultId
import re
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT).vaults
grouped_secrets = {}
merged_secrets = []
for s in secrets:
merged_secrets += s.splitlines()
# group secrets by source vault
for secret in merged_secrets:
parsed = KeyVaultId.parse_secret_id(secret)
match = re.search('://(.+?)\\.', parsed.vault)
vault_name = match.group(1)
if vault_name not in grouped_secrets:
grouped_secrets[vault_name] = {
'vaultCertificates': [],
'id': keyvault or _get_vault_id_from_name(cmd.cli_ctx, client, vault_name)
}
vault_cert = {'certificateUrl': secret}
if certificate_store:
vault_cert['certificateStore'] = certificate_store
grouped_secrets[vault_name]['vaultCertificates'].append(vault_cert)
# transform the reduced map to vm format
formatted = [{'sourceVault': {'id': value['id']},
'vaultCertificates': value['vaultCertificates']}
for _, value in list(grouped_secrets.items())]
return formatted
def add_vm_secret(cmd, resource_group_name, vm_name, keyvault, certificate, certificate_store=None):
from msrestazure.tools import parse_resource_id
from ._vm_utils import create_keyvault_data_plane_client, get_key_vault_base_url
VaultSecretGroup, SubResource, VaultCertificate = cmd.get_models(
'VaultSecretGroup', 'SubResource', 'VaultCertificate')
vm = get_vm(cmd, resource_group_name, vm_name)
if '://' not in certificate: # has a cert name rather a full url?
keyvault_client = create_keyvault_data_plane_client(cmd.cli_ctx)
cert_info = keyvault_client.get_certificate(
get_key_vault_base_url(cmd.cli_ctx, parse_resource_id(keyvault)['name']), certificate, '')
certificate = cert_info.sid
if not _is_linux_os(vm):
certificate_store = certificate_store or 'My'
elif certificate_store:
raise CLIError('Usage error: --certificate-store is only applicable on Windows VM')
vault_cert = VaultCertificate(certificate_url=certificate, certificate_store=certificate_store)
vault_secret_group = next((x for x in vm.os_profile.secrets
if x.source_vault and x.source_vault.id.lower() == keyvault.lower()), None)
if vault_secret_group:
vault_secret_group.vault_certificates.append(vault_cert)
else:
vault_secret_group = VaultSecretGroup(source_vault=SubResource(id=keyvault), vault_certificates=[vault_cert])
vm.os_profile.secrets.append(vault_secret_group)
vm = set_vm(cmd, vm)
return vm.os_profile.secrets
def list_vm_secrets(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
if vm.os_profile:
return vm.os_profile.secrets
return []
def remove_vm_secret(cmd, resource_group_name, vm_name, keyvault, certificate=None):
vm = get_vm(cmd, resource_group_name, vm_name)
# support 2 kinds of filter:
# a. if only keyvault is supplied, we delete its whole vault group.
# b. if both keyvault and certificate are supplied, we only delete the specific cert entry.
to_keep = vm.os_profile.secrets
keyvault_matched = []
if keyvault:
keyvault = keyvault.lower()
keyvault_matched = [x for x in to_keep if x.source_vault and x.source_vault.id.lower() == keyvault]
if keyvault and not certificate:
to_keep = [x for x in to_keep if x not in keyvault_matched]
elif certificate:
temp = keyvault_matched if keyvault else to_keep
cert_url_pattern = certificate.lower()
if '://' not in cert_url_pattern: # just a cert name?
cert_url_pattern = '/' + cert_url_pattern + '/'
for x in temp:
x.vault_certificates = ([v for v in x.vault_certificates
if not(v.certificate_url and cert_url_pattern in v.certificate_url.lower())])
to_keep = [x for x in to_keep if x.vault_certificates] # purge all groups w/o any cert entries
vm.os_profile.secrets = to_keep
vm = set_vm(cmd, vm)
return vm.os_profile.secrets
# endregion
# region VirtualMachines UnmanagedDisks
def attach_unmanaged_data_disk(cmd, resource_group_name, vm_name, new=False, vhd_uri=None, lun=None,
disk_name=None, size_gb=1023, caching=None):
DataDisk, DiskCreateOptionTypes, VirtualHardDisk = cmd.get_models(
'DataDisk', 'DiskCreateOptionTypes', 'VirtualHardDisk')
if not new and not disk_name:
raise CLIError('Please provide the name of the existing disk to attach')
create_option = DiskCreateOptionTypes.empty if new else DiskCreateOptionTypes.attach
vm = get_vm(cmd, resource_group_name, vm_name)
if disk_name is None:
import datetime
disk_name = vm_name + '-' + datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
# pylint: disable=no-member
if vhd_uri is None:
if not hasattr(vm.storage_profile.os_disk, 'vhd') or not vm.storage_profile.os_disk.vhd:
raise CLIError('Adding unmanaged disks to a VM with managed disks is not supported')
blob_uri = vm.storage_profile.os_disk.vhd.uri
vhd_uri = blob_uri[0:blob_uri.rindex('/') + 1] + disk_name + '.vhd'
if lun is None:
lun = _get_disk_lun(vm.storage_profile.data_disks)
disk = DataDisk(lun=lun, vhd=VirtualHardDisk(uri=vhd_uri), name=disk_name,
create_option=create_option,
caching=caching, disk_size_gb=size_gb if new else None)
if vm.storage_profile.data_disks is None:
vm.storage_profile.data_disks = []
vm.storage_profile.data_disks.append(disk)
return set_vm(cmd, vm)
def list_unmanaged_disks(cmd, resource_group_name, vm_name):
vm = get_vm(cmd, resource_group_name, vm_name)
return vm.storage_profile.data_disks # pylint: disable=no-member
# endregion
# region VirtualMachines Users
def _update_linux_access_extension(cmd, vm_instance, resource_group_name, protected_settings,
no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
VirtualMachineExtension = cmd.get_models('VirtualMachineExtension')
# pylint: disable=no-member
instance_name = _get_extension_instance_name(vm_instance.instance_view,
extension_mappings[_LINUX_ACCESS_EXT]['publisher'],
_LINUX_ACCESS_EXT,
_ACCESS_EXT_HANDLER_NAME)
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
vm_instance.resources, _LINUX_ACCESS_EXT)
ext = VirtualMachineExtension(location=vm_instance.location, # pylint: disable=no-member
publisher=publisher,
virtual_machine_extension_type=_LINUX_ACCESS_EXT,
protected_settings=protected_settings,
type_handler_version=version,
settings={},
auto_upgrade_minor_version=auto_upgrade)
return sdk_no_wait(no_wait, client.virtual_machine_extensions.create_or_update,
resource_group_name, vm_instance.name, instance_name, ext)
def _set_linux_user(cmd, vm_instance, resource_group_name, username,
password=None, ssh_key_value=None, no_wait=False):
protected_settings = {}
protected_settings['username'] = username
if password:
protected_settings['password'] = password
elif not ssh_key_value and not password: # default to ssh
ssh_key_value = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')
if ssh_key_value:
protected_settings['ssh_key'] = read_content_if_is_file(ssh_key_value)
if no_wait:
return _update_linux_access_extension(cmd, vm_instance, resource_group_name,
protected_settings, no_wait)
poller = _update_linux_access_extension(cmd, vm_instance, resource_group_name,
protected_settings)
return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'setting user', 'done')(poller)
def _reset_windows_admin(cmd, vm_instance, resource_group_name, username, password, no_wait=False):
'''Update the password. You can only change the password. Adding a new user is not supported. '''
client = _compute_client_factory(cmd.cli_ctx)
VirtualMachineExtension = cmd.get_models('VirtualMachineExtension')
publisher, version, auto_upgrade = _get_access_extension_upgrade_info(
vm_instance.resources, _WINDOWS_ACCESS_EXT)
# pylint: disable=no-member
instance_name = _get_extension_instance_name(vm_instance.instance_view,
publisher,
_WINDOWS_ACCESS_EXT,
_ACCESS_EXT_HANDLER_NAME)
ext = VirtualMachineExtension(location=vm_instance.location, # pylint: disable=no-member
publisher=publisher,
virtual_machine_extension_type=_WINDOWS_ACCESS_EXT,
protected_settings={'Password': password},
type_handler_version=version,
settings={'UserName': username},
auto_upgrade_minor_version=auto_upgrade)
if no_wait:
return sdk_no_wait(no_wait, client.virtual_machine_extensions.create_or_update,
resource_group_name, vm_instance.name, instance_name, ext)
poller = client.virtual_machine_extensions.create_or_update(resource_group_name,
vm_instance.name,
instance_name, ext)
return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'resetting admin', 'done')(poller)
def set_user(cmd, resource_group_name, vm_name, username, password=None, ssh_key_value=None,
no_wait=False):
vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView')
if _is_linux_os(vm):
return _set_linux_user(cmd, vm, resource_group_name, username, password, ssh_key_value, no_wait)
if ssh_key_value:
raise CLIError('SSH key is not appliable on a Windows VM')
return _reset_windows_admin(cmd, vm, resource_group_name, username, password, no_wait)
def delete_user(cmd, resource_group_name, vm_name, username, no_wait=False):
vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView')
if not _is_linux_os(vm):
raise CLIError('Deleting a user is not supported on Windows VM')
if no_wait:
return _update_linux_access_extension(cmd, vm, resource_group_name,
{'remove_user': username}, no_wait)
poller = _update_linux_access_extension(cmd, vm, resource_group_name,
{'remove_user': username})
return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'deleting user', 'done')(poller)
def reset_linux_ssh(cmd, resource_group_name, vm_name, no_wait=False):
vm = get_vm(cmd, resource_group_name, vm_name, 'instanceView')
if not _is_linux_os(vm):
raise CLIError('Resetting SSH is not supported in Windows VM')
if no_wait:
return _update_linux_access_extension(cmd, vm, resource_group_name,
{'reset_ssh': True}, no_wait)
poller = _update_linux_access_extension(cmd, vm, resource_group_name,
{'reset_ssh': True})
return ExtensionUpdateLongRunningOperation(cmd.cli_ctx, 'resetting SSH', 'done')(poller)
# endregion
# region VirtualMachineScaleSets
def assign_vmss_identity(cmd, resource_group_name, vmss_name, assign_identity=None, identity_role='Contributor',
identity_role_id=None, identity_scope=None):
VirtualMachineScaleSetIdentity, UpgradeMode, ResourceIdentityType, VirtualMachineScaleSetUpdate = cmd.get_models(
'VirtualMachineScaleSetIdentity', 'UpgradeMode', 'ResourceIdentityType', 'VirtualMachineScaleSetUpdate')
IdentityUserAssignedIdentitiesValue = cmd.get_models('VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue')
from azure.cli.core.commands.arm import assign_identity as assign_identity_helper
client = _compute_client_factory(cmd.cli_ctx)
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identity)
def getter():
return client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
def setter(vmss, external_identities=external_identities):
if vmss.identity and vmss.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif vmss.identity and vmss.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif vmss.identity and vmss.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
vmss.identity = VirtualMachineScaleSetIdentity(type=identity_types)
if external_identities:
vmss.identity.user_assigned_identities = {}
for identity in external_identities:
vmss.identity.user_assigned_identities[identity] = IdentityUserAssignedIdentitiesValue()
vmss_patch = VirtualMachineScaleSetUpdate()
vmss_patch.identity = vmss.identity
poller = client.virtual_machine_scale_sets.update(resource_group_name, vmss_name, vmss_patch)
return LongRunningOperation(cmd.cli_ctx)(poller)
assign_identity_helper(cmd.cli_ctx, getter, setter, identity_role=identity_role_id, identity_scope=identity_scope)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
if vmss.upgrade_policy.mode == UpgradeMode.manual:
logger.warning("With manual upgrade mode, you will need to run 'az vmss update-instances -g %s -n %s "
"--instance-ids *' to propagate the change", resource_group_name, vmss_name)
return _construct_identity_info(identity_scope, identity_role, vmss.identity.principal_id,
vmss.identity.user_assigned_identities)
# pylint: disable=too-many-locals, too-many-statements
def create_vmss(cmd, vmss_name, resource_group_name, image=None,
disable_overprovision=False, instance_count=2,
location=None, tags=None, upgrade_policy_mode='manual', validate=False,
admin_username=None, admin_password=None, authentication_type=None,
vm_sku=None, no_wait=False,
ssh_dest_key_path=None, ssh_key_value=None, generate_ssh_keys=False,
load_balancer=None, load_balancer_sku=None, application_gateway=None,
app_gateway_subnet_address_prefix=None,
app_gateway_sku='Standard_Large', app_gateway_capacity=10,
backend_pool_name=None, nat_pool_name=None, backend_port=None, health_probe=None,
public_ip_address=None, public_ip_address_allocation=None,
public_ip_address_dns_name=None, accelerated_networking=None,
public_ip_per_vm=False, vm_domain_name=None, dns_servers=None, nsg=None,
os_caching=None, data_caching=None,
storage_container_name='vhds', storage_sku=None,
os_type=None, os_disk_name=None,
use_unmanaged_disk=False, data_disk_sizes_gb=None, disk_info=None,
vnet_name=None, vnet_address_prefix='10.0.0.0/16',
subnet=None, subnet_address_prefix=None,
os_offer=None, os_publisher=None, os_sku=None, os_version=None,
load_balancer_type=None, app_gateway_type=None, vnet_type=None,
public_ip_address_type=None, storage_profile=None,
single_placement_group=None, custom_data=None, secrets=None, platform_fault_domain_count=None,
plan_name=None, plan_product=None, plan_publisher=None, plan_promotion_code=None, license_type=None,
assign_identity=None, identity_scope=None, identity_role='Contributor',
identity_role_id=None, zones=None, priority=None, eviction_policy=None,
application_security_groups=None, ultra_ssd_enabled=None, ephemeral_os_disk=None,
proximity_placement_group=None, aux_subscriptions=None, terminate_notification_time=None,
max_price=None, computer_name_prefix=None, orchestration_mode='ScaleSetVM', scale_in_policy=None,
os_disk_encryption_set=None, data_disk_encryption_sets=None, data_disk_iops=None, data_disk_mbps=None,
automatic_repairs_grace_period=None, specialized=None, os_disk_size_gb=None):
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import random_string, hash_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.vm._template_builder import (StorageProfile, build_vmss_resource,
build_vnet_resource, build_public_ip_resource,
build_load_balancer_resource,
build_vmss_storage_account_pool_resource,
build_application_gateway_resource,
build_msi_role_assignment, build_nsg_resource)
# Build up the ARM template
master_template = ArmTemplateBuilder()
scale_set_vm_str = 'ScaleSetVM'
vm_str = 'VM'
if orchestration_mode.lower() == scale_set_vm_str.lower():
from msrestazure.tools import resource_id, is_valid_resource_id
storage_sku = disk_info['os'].get('storageAccountType')
subscription_id = get_subscription_id(cmd.cli_ctx)
if os_disk_encryption_set is not None and not is_valid_resource_id(os_disk_encryption_set):
os_disk_encryption_set = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=os_disk_encryption_set)
if data_disk_encryption_sets is None:
data_disk_encryption_sets = []
for i, des in enumerate(data_disk_encryption_sets):
if des is not None and not is_valid_resource_id(des):
data_disk_encryption_sets[i] = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='diskEncryptionSets', name=des)
network_id_template = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Network')
vmss_id = resource_id(
subscription=subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachineScaleSets', name=vmss_name)
scrubbed_name = vmss_name.replace('-', '').lower()[:5]
naming_prefix = '{}{}'.format(scrubbed_name,
hash_string(vmss_id,
length=(9 - len(scrubbed_name)),
force_lower=True))
# determine final defaults and calculated values
tags = tags or {}
os_disk_name = os_disk_name or ('osdisk_{}'.format(hash_string(vmss_id, length=10))
if use_unmanaged_disk else None)
load_balancer = load_balancer or '{}LB'.format(vmss_name)
app_gateway = application_gateway or '{}AG'.format(vmss_name)
backend_pool_name = backend_pool_name or '{}BEPool'.format(load_balancer or application_gateway)
vmss_dependencies = []
# VNET will always be a dependency
if vnet_type == 'new':
vnet_name = vnet_name or '{}VNET'.format(vmss_name)
subnet = subnet or '{}Subnet'.format(vmss_name)
vmss_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
vnet = build_vnet_resource(
cmd, vnet_name, location, tags, vnet_address_prefix, subnet, subnet_address_prefix)
if app_gateway_type:
vnet['properties']['subnets'].append({
'name': 'appGwSubnet',
'properties': {
'addressPrefix': app_gateway_subnet_address_prefix
}
})
master_template.add_resource(vnet)
subnet_id = subnet if is_valid_resource_id(subnet) else \
'{}/virtualNetworks/{}/subnets/{}'.format(network_id_template, vnet_name, subnet)
gateway_subnet_id = ('{}/virtualNetworks/{}/subnets/appGwSubnet'.format(network_id_template, vnet_name)
if app_gateway_type == 'new' else None)
# public IP is used by either load balancer/application gateway
public_ip_address_id = None
if public_ip_address:
public_ip_address_id = (public_ip_address if is_valid_resource_id(public_ip_address)
else '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address))
def _get_public_ip_address_allocation(value, sku):
IPAllocationMethod = cmd.get_models('IPAllocationMethod', resource_type=ResourceType.MGMT_NETWORK)
if not value:
value = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
return value
# Handle load balancer creation
if load_balancer_type == 'new':
vmss_dependencies.append('Microsoft.Network/loadBalancers/{}'.format(load_balancer))
lb_dependencies = []
if vnet_type == 'new':
lb_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
if public_ip_address_type == 'new':
public_ip_address = public_ip_address or '{}PublicIP'.format(load_balancer)
lb_dependencies.append(
'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(
cmd, public_ip_address, location, tags,
_get_public_ip_address_allocation(public_ip_address_allocation, load_balancer_sku),
public_ip_address_dns_name, load_balancer_sku, zones))
public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
# calculate default names if not provided
nat_pool_name = nat_pool_name or '{}NatPool'.format(load_balancer)
if not backend_port:
backend_port = 3389 if os_type == 'windows' else 22
lb_resource = build_load_balancer_resource(
cmd, load_balancer, location, tags, backend_pool_name, nat_pool_name, backend_port,
'loadBalancerFrontEnd', public_ip_address_id, subnet_id, private_ip_address='',
private_ip_allocation='Dynamic', sku=load_balancer_sku, instance_count=instance_count,
disable_overprovision=disable_overprovision)
lb_resource['dependsOn'] = lb_dependencies
master_template.add_resource(lb_resource)
# Per https://docs.microsoft.com/azure/load-balancer/load-balancer-standard-overview#nsg
if load_balancer_sku and load_balancer_sku.lower() == 'standard' and nsg is None:
nsg_name = '{}NSG'.format(vmss_name)
master_template.add_resource(build_nsg_resource(
None, nsg_name, location, tags, 'rdp' if os_type.lower() == 'windows' else 'ssh'))
nsg = "[resourceId('Microsoft.Network/networkSecurityGroups', '{}')]".format(nsg_name)
vmss_dependencies.append('Microsoft.Network/networkSecurityGroups/{}'.format(nsg_name))
# Or handle application gateway creation
if app_gateway_type == 'new':
vmss_dependencies.append('Microsoft.Network/applicationGateways/{}'.format(app_gateway))
ag_dependencies = []
if vnet_type == 'new':
ag_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(vnet_name))
if public_ip_address_type == 'new':
public_ip_address = public_ip_address or '{}PublicIP'.format(app_gateway)
ag_dependencies.append(
'Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(
cmd, public_ip_address, location, tags,
_get_public_ip_address_allocation(public_ip_address_allocation, None), public_ip_address_dns_name,
None, zones))
public_ip_address_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
# calculate default names if not provided
backend_port = backend_port or 80
ag_resource = build_application_gateway_resource(
cmd, app_gateway, location, tags, backend_pool_name, backend_port, 'appGwFrontendIP',
public_ip_address_id, subnet_id, gateway_subnet_id, private_ip_address='',
private_ip_allocation='Dynamic', sku=app_gateway_sku, capacity=app_gateway_capacity)
ag_resource['dependsOn'] = ag_dependencies
master_template.add_variable(
'appGwID',
"[resourceId('Microsoft.Network/applicationGateways', '{}')]".format(app_gateway))
master_template.add_resource(ag_resource)
# create storage accounts if needed for unmanaged disk storage
if storage_profile == StorageProfile.SAPirImage:
master_template.add_resource(build_vmss_storage_account_pool_resource(
cmd, 'storageLoop', location, tags, storage_sku))
master_template.add_variable('storageAccountNames', [
'{}{}'.format(naming_prefix, x) for x in range(5)
])
master_template.add_variable('vhdContainers', [
"[concat('https://', variables('storageAccountNames')[{}], '.blob.{}/{}')]".format(
x, cmd.cli_ctx.cloud.suffixes.storage_endpoint, storage_container_name) for x in range(5)
])
vmss_dependencies.append('storageLoop')
backend_address_pool_id = None
inbound_nat_pool_id = None
if load_balancer_type or app_gateway_type:
network_balancer = load_balancer if load_balancer_type else app_gateway
balancer_type = 'loadBalancers' if load_balancer_type else 'applicationGateways'
if is_valid_resource_id(network_balancer):
# backend address pool needed by load balancer or app gateway
backend_address_pool_id = '{}/backendAddressPools/{}'.format(network_balancer, backend_pool_name)
if nat_pool_name:
inbound_nat_pool_id = '{}/inboundNatPools/{}'.format(network_balancer, nat_pool_name)
else:
# backend address pool needed by load balancer or app gateway
backend_address_pool_id = '{}/{}/{}/backendAddressPools/{}'.format(
network_id_template, balancer_type, network_balancer, backend_pool_name)
if nat_pool_name:
inbound_nat_pool_id = '{}/{}/{}/inboundNatPools/{}'.format(
network_id_template, balancer_type, network_balancer, nat_pool_name)
if health_probe and not is_valid_resource_id(health_probe):
health_probe = '{}/loadBalancers/{}/probes/{}'.format(network_id_template, load_balancer, health_probe)
ip_config_name = '{}IPConfig'.format(naming_prefix)
nic_name = '{}Nic'.format(naming_prefix)
if custom_data:
custom_data = read_content_if_is_file(custom_data)
if secrets:
secrets = _merge_secrets([validate_file_or_dict(secret) for secret in secrets])
if computer_name_prefix is not None and isinstance(computer_name_prefix, str):
naming_prefix = computer_name_prefix
if os_version and os_version != 'latest':
logger.warning('You are deploying VMSS pinned to a specific image version from Azure Marketplace. '
'Consider using "latest" as the image version.')
vmss_resource = build_vmss_resource(
cmd=cmd, name=vmss_name, naming_prefix=naming_prefix, location=location, tags=tags,
overprovision=not disable_overprovision, upgrade_policy_mode=upgrade_policy_mode, vm_sku=vm_sku,
instance_count=instance_count, ip_config_name=ip_config_name, nic_name=nic_name, subnet_id=subnet_id,
public_ip_per_vm=public_ip_per_vm, vm_domain_name=vm_domain_name, dns_servers=dns_servers, nsg=nsg,
accelerated_networking=accelerated_networking, admin_username=admin_username,
authentication_type=authentication_type, storage_profile=storage_profile, os_disk_name=os_disk_name,
disk_info=disk_info, os_type=os_type, image=image, admin_password=admin_password,
ssh_key_values=ssh_key_value, ssh_key_path=ssh_dest_key_path, os_publisher=os_publisher, os_offer=os_offer,
os_sku=os_sku, os_version=os_version, backend_address_pool_id=backend_address_pool_id,
inbound_nat_pool_id=inbound_nat_pool_id, health_probe=health_probe,
single_placement_group=single_placement_group, platform_fault_domain_count=platform_fault_domain_count,
custom_data=custom_data, secrets=secrets, license_type=license_type, zones=zones, priority=priority,
eviction_policy=eviction_policy, application_security_groups=application_security_groups,
ultra_ssd_enabled=ultra_ssd_enabled, proximity_placement_group=proximity_placement_group,
terminate_notification_time=terminate_notification_time, max_price=max_price,
scale_in_policy=scale_in_policy, os_disk_encryption_set=os_disk_encryption_set,
data_disk_encryption_sets=data_disk_encryption_sets, data_disk_iops=data_disk_iops,
data_disk_mbps=data_disk_mbps, automatic_repairs_grace_period=automatic_repairs_grace_period,
specialized=specialized, os_disk_size_gb=os_disk_size_gb)
vmss_resource['dependsOn'] = vmss_dependencies
if plan_name:
vmss_resource['plan'] = {
'name': plan_name,
'publisher': plan_publisher,
'product': plan_product,
'promotionCode': plan_promotion_code
}
enable_local_identity = None
if assign_identity is not None:
vmss_resource['identity'], _, _, enable_local_identity = _build_identities_info(
assign_identity)
if identity_scope:
role_assignment_guid = str(_gen_guid())
master_template.add_resource(build_msi_role_assignment(vmss_name, vmss_id, identity_role_id,
role_assignment_guid, identity_scope, False))
elif orchestration_mode.lower() == vm_str.lower():
if platform_fault_domain_count is None:
raise CLIError("usage error: --platform-fault-domain-count is required in VM mode")
vmss_resource = {
'type': 'Microsoft.Compute/virtualMachineScaleSets',
'name': vmss_name,
'location': location,
'tags': tags,
'apiVersion': cmd.get_api_version(ResourceType.MGMT_COMPUTE, operation_group='virtual_machine_scale_sets'),
'properties': {
'singlePlacementGroup': True,
'provisioningState': 0,
'platformFaultDomainCount': platform_fault_domain_count
}
}
if zones is not None:
vmss_resource['zones'] = zones
else:
raise CLIError('usage error: --orchestration-mode (ScaleSet | VM)')
master_template.add_resource(vmss_resource)
master_template.add_output('VMSS', vmss_name, 'Microsoft.Compute', 'virtualMachineScaleSets',
output_type='object')
if orchestration_mode.lower() == scale_set_vm_str.lower() and admin_password:
master_template.add_secure_parameter('adminPassword', admin_password)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'vmss_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
aux_subscriptions=aux_subscriptions).deployments
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
if validate:
from azure.cli.command_modules.vm._vm_utils import log_pprint_template
log_pprint_template(template)
log_pprint_template(parameters)
return sdk_no_wait(no_wait, client.validate, resource_group_name, deployment_name, properties)
# creates the VMSS deployment
deployment_result = DeploymentOutputLongRunningOperation(cmd.cli_ctx)(
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, deployment_name, properties))
if orchestration_mode.lower() == scale_set_vm_str.lower() and assign_identity is not None:
vmss_info = get_vmss(cmd, resource_group_name, vmss_name)
if enable_local_identity and not identity_scope:
_show_missing_access_warning(resource_group_name, vmss_name, 'vmss')
deployment_result['vmss']['identity'] = _construct_identity_info(identity_scope, identity_role,
vmss_info.identity.principal_id,
vmss_info.identity.user_assigned_identities)
return deployment_result
def _build_identities_info(identities):
from ._vm_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def deallocate_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_ids and len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.deallocate,
resource_group_name, vm_scale_set_name, instance_ids[0])
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.deallocate,
resource_group_name, vm_scale_set_name, instance_ids=instance_ids)
def delete_vmss_instances(cmd, resource_group_name, vm_scale_set_name, instance_ids, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.delete,
resource_group_name, vm_scale_set_name, instance_ids[0])
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.delete_instances,
resource_group_name, vm_scale_set_name, instance_ids)
def get_vmss(cmd, resource_group_name, name, instance_id=None):
client = _compute_client_factory(cmd.cli_ctx)
if instance_id is not None:
return client.virtual_machine_scale_set_vms.get(resource_group_name, name, instance_id)
return client.virtual_machine_scale_sets.get(resource_group_name, name)
def get_vmss_instance_view(cmd, resource_group_name, vm_scale_set_name, instance_id=None):
client = _compute_client_factory(cmd.cli_ctx)
if instance_id:
if instance_id == '*':
return [x.instance_view for x in (client.virtual_machine_scale_set_vms.list(
resource_group_name, vm_scale_set_name, select='instanceView', expand='instanceView'))]
return client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name, vm_scale_set_name,
instance_id)
return client.virtual_machine_scale_sets.get_instance_view(resource_group_name, vm_scale_set_name)
def list_vmss(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.virtual_machine_scale_sets.list(resource_group_name)
return client.virtual_machine_scale_sets.list_all()
def list_vmss_instance_connection_info(cmd, resource_group_name, vm_scale_set_name):
from msrestazure.tools import parse_resource_id
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vm_scale_set_name)
# find the load balancer
nic_configs = vmss.virtual_machine_profile.network_profile.network_interface_configurations
primary_nic_config = next((n for n in nic_configs if n.primary), None)
if primary_nic_config is None:
raise CLIError('could not find a primary NIC which is needed to search to load balancer')
ip_configs = primary_nic_config.ip_configurations
ip_config = next((ip for ip in ip_configs if ip.load_balancer_inbound_nat_pools), None)
if not ip_config:
raise CLIError('No load balancer exists to retrieve public IP address')
res_id = ip_config.load_balancer_inbound_nat_pools[0].id
lb_info = parse_resource_id(res_id)
lb_name = lb_info['name']
lb_rg = lb_info['resource_group']
# get public ip
network_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK)
lb = network_client.load_balancers.get(lb_rg, lb_name)
if getattr(lb.frontend_ip_configurations[0], 'public_ip_address', None):
res_id = lb.frontend_ip_configurations[0].public_ip_address.id
public_ip_info = parse_resource_id(res_id)
public_ip_name = public_ip_info['name']
public_ip_rg = public_ip_info['resource_group']
public_ip = network_client.public_ip_addresses.get(public_ip_rg, public_ip_name)
public_ip_address = public_ip.ip_address
# loop around inboundnatrule
instance_addresses = {}
for rule in lb.inbound_nat_rules:
instance_id = parse_resource_id(rule.backend_ip_configuration.id)['child_name_1']
instance_addresses['instance ' + instance_id] = '{}:{}'.format(public_ip_address,
rule.frontend_port)
return instance_addresses
raise CLIError('The VM scale-set uses an internal load balancer, hence no connection information')
def list_vmss_instance_public_ips(cmd, resource_group_name, vm_scale_set_name):
result = cf_public_ip_addresses(cmd.cli_ctx).list_virtual_machine_scale_set_public_ip_addresses(
resource_group_name, vm_scale_set_name)
# filter away over-provisioned instances which are deleted after 'create/update' returns
return [r for r in result if r.ip_address]
def reimage_vmss(cmd, resource_group_name, vm_scale_set_name, instance_id=None, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_id:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.reimage,
resource_group_name, vm_scale_set_name, instance_id)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.reimage, resource_group_name, vm_scale_set_name)
def restart_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_ids and len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.restart,
resource_group_name, vm_scale_set_name, instance_ids[0])
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.restart, resource_group_name, vm_scale_set_name,
instance_ids=instance_ids)
# pylint: disable=inconsistent-return-statements
def scale_vmss(cmd, resource_group_name, vm_scale_set_name, new_capacity, no_wait=False):
VirtualMachineScaleSet = cmd.get_models('VirtualMachineScaleSet')
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vm_scale_set_name)
# pylint: disable=no-member
if vmss.sku.capacity == new_capacity:
return
vmss.sku.capacity = new_capacity
vmss_new = VirtualMachineScaleSet(location=vmss.location, sku=vmss.sku)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.create_or_update,
resource_group_name, vm_scale_set_name, vmss_new)
def start_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_ids and len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.start,
resource_group_name, vm_scale_set_name, instance_ids[0])
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.start,
resource_group_name, vm_scale_set_name, instance_ids=instance_ids)
def stop_vmss(cmd, resource_group_name, vm_scale_set_name, instance_ids=None, no_wait=False, skip_shutdown=False):
client = _compute_client_factory(cmd.cli_ctx)
if instance_ids and len(instance_ids) == 1:
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.power_off, resource_group_name,
vm_scale_set_name, instance_id=instance_ids[0], skip_shutdown=skip_shutdown)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.power_off, resource_group_name, vm_scale_set_name,
instance_ids=instance_ids, skip_shutdown=skip_shutdown)
def update_vmss_instances(cmd, resource_group_name, vm_scale_set_name, instance_ids, no_wait=False):
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.update_instances,
resource_group_name, vm_scale_set_name, instance_ids)
def update_vmss(cmd, resource_group_name, name, license_type=None, no_wait=False, instance_id=None,
protect_from_scale_in=None, protect_from_scale_set_actions=None,
enable_terminate_notification=None, terminate_notification_time=None, ultra_ssd_enabled=None,
scale_in_policy=None, priority=None, max_price=None, proximity_placement_group=None,
enable_automatic_repairs=None, automatic_repairs_grace_period=None, **kwargs):
vmss = kwargs['parameters']
client = _compute_client_factory(cmd.cli_ctx)
VMProtectionPolicy = cmd.get_models('VirtualMachineScaleSetVMProtectionPolicy')
# handle vmss instance update
if instance_id is not None:
if license_type is not None:
vmss.license_type = license_type
if not vmss.protection_policy:
vmss.protection_policy = VMProtectionPolicy()
if protect_from_scale_in is not None:
vmss.protection_policy.protect_from_scale_in = protect_from_scale_in
if protect_from_scale_set_actions is not None:
vmss.protection_policy.protect_from_scale_set_actions = protect_from_scale_set_actions
return sdk_no_wait(no_wait, client.virtual_machine_scale_set_vms.update,
resource_group_name, name, instance_id, **kwargs)
# else handle vmss update
if license_type is not None:
vmss.virtual_machine_profile.license_type = license_type
if enable_terminate_notification is not None or terminate_notification_time is not None:
if vmss.virtual_machine_profile.scheduled_events_profile is None:
ScheduledEventsProfile = cmd.get_models('ScheduledEventsProfile')
vmss.virtual_machine_profile.scheduled_events_profile = ScheduledEventsProfile()
TerminateNotificationProfile = cmd.get_models('TerminateNotificationProfile')
vmss.virtual_machine_profile.scheduled_events_profile.terminate_notification_profile =\
TerminateNotificationProfile(not_before_timeout=terminate_notification_time,
enable=enable_terminate_notification)
if enable_automatic_repairs is not None or automatic_repairs_grace_period is not None:
AutomaticRepairsPolicy = cmd.get_models('AutomaticRepairsPolicy')
vmss.automatic_repairs_policy = \
AutomaticRepairsPolicy(enabled="true", grace_period=automatic_repairs_grace_period)
if ultra_ssd_enabled is not None:
if cmd.supported_api_version(min_api='2019-03-01', operation_group='virtual_machine_scale_sets'):
if vmss.additional_capabilities is None:
AdditionalCapabilities = cmd.get_models('AdditionalCapabilities')
vmss.additional_capabilities = AdditionalCapabilities(ultra_ssd_enabled=ultra_ssd_enabled)
else:
vmss.additional_capabilities.ultra_ssd_enabled = ultra_ssd_enabled
else:
if vmss.virtual_machine_profile.additional_capabilities is None:
AdditionalCapabilities = cmd.get_models('AdditionalCapabilities')
vmss.virtual_machine_profile.additional_capabilities = AdditionalCapabilities(
ultra_ssd_enabled=ultra_ssd_enabled)
else:
vmss.virtual_machine_profile.additional_capabilities.ultra_ssd_enabled = ultra_ssd_enabled
if scale_in_policy is not None:
ScaleInPolicy = cmd.get_models('ScaleInPolicy')
vmss.scale_in_policy = ScaleInPolicy(rules=scale_in_policy)
if priority is not None:
vmss.virtual_machine_profile.priority = priority
if max_price is not None:
if vmss.virtual_machine_profile.billing_profile is None:
BillingProfile = cmd.get_models('BillingProfile')
vmss.virtual_machine_profile.billing_profile = BillingProfile(max_price=max_price)
else:
vmss.virtual_machine_profile.billing_profile.max_price = max_price
if proximity_placement_group is not None:
vmss.proximity_placement_group = {'id': proximity_placement_group}
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.create_or_update,
resource_group_name, name, **kwargs)
# endregion
# region VirtualMachineScaleSets Diagnostics
def set_vmss_diagnostics_extension(
cmd, resource_group_name, vmss_name, settings, protected_settings=None, version=None,
no_auto_upgrade=False):
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
is_linux_os = _is_linux_os(vmss.virtual_machine_profile)
vm_extension_name = _LINUX_DIAG_EXT if is_linux_os else _WINDOWS_DIAG_EXT
if is_linux_os and vmss.virtual_machine_profile.extension_profile: # check incompatibles
exts = vmss.virtual_machine_profile.extension_profile.extensions or []
major_ver = extension_mappings[_LINUX_DIAG_EXT]['version'].split('.')[0]
# For VMSS, we don't do auto-removal like VM because there is no reliable API to wait for
# the removal done before we can install the newer one
if next((e for e in exts if e.name == _LINUX_DIAG_EXT and
not e.type_handler_version.startswith(major_ver + '.')), None):
delete_cmd = 'az vmss extension delete -g {} --vmss-name {} -n {}'.format(
resource_group_name, vmss_name, vm_extension_name)
raise CLIError("There is an incompatible version of diagnostics extension installed. "
"Please remove it by running '{}', and retry. 'az vmss update-instances'"
" might be needed if with manual upgrade policy".format(delete_cmd))
poller = set_vmss_extension(cmd, resource_group_name, vmss_name, vm_extension_name,
extension_mappings[vm_extension_name]['publisher'],
version or extension_mappings[vm_extension_name]['version'],
settings,
protected_settings,
no_auto_upgrade)
result = LongRunningOperation(cmd.cli_ctx)(poller)
UpgradeMode = cmd.get_models('UpgradeMode')
if vmss.upgrade_policy.mode == UpgradeMode.manual:
poller2 = update_vmss_instances(cmd, resource_group_name, vmss_name, ['*'])
LongRunningOperation(cmd.cli_ctx)(poller2)
return result
# endregion
# region VirtualMachineScaleSets Disks (Managed)
def attach_managed_data_disk_to_vmss(cmd, resource_group_name, vmss_name, size_gb=None, instance_id=None, lun=None,
caching=None, disk=None, sku=None):
def _init_data_disk(storage_profile, lun, existing_disk=None):
data_disks = storage_profile.data_disks or []
if lun is None:
lun = _get_disk_lun(data_disks)
if existing_disk is None:
data_disk = DataDisk(lun=lun, create_option=DiskCreateOptionTypes.empty, disk_size_gb=size_gb,
caching=caching, managed_disk=ManagedDiskParameters(storage_account_type=sku))
else:
data_disk = DataDisk(lun=lun, create_option=DiskCreateOptionTypes.attach, caching=caching,
managed_disk=ManagedDiskParameters(id=existing_disk, storage_account_type=sku))
data_disks.append(data_disk)
storage_profile.data_disks = data_disks
DiskCreateOptionTypes, ManagedDiskParameters = cmd.get_models(
'DiskCreateOptionTypes', 'ManagedDiskParameters')
if disk is None:
DataDisk = cmd.get_models('VirtualMachineScaleSetDataDisk')
else:
DataDisk = cmd.get_models('DataDisk')
client = _compute_client_factory(cmd.cli_ctx)
if instance_id is None:
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
_init_data_disk(vmss.virtual_machine_profile.storage_profile, lun)
return client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
vmss_vm = client.virtual_machine_scale_set_vms.get(resource_group_name, vmss_name, instance_id)
_init_data_disk(vmss_vm.storage_profile, lun, disk)
return client.virtual_machine_scale_set_vms.update(resource_group_name, vmss_name, instance_id, vmss_vm)
def detach_disk_from_vmss(cmd, resource_group_name, vmss_name, lun, instance_id=None):
client = _compute_client_factory(cmd.cli_ctx)
if instance_id is None:
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
data_disks = vmss.virtual_machine_profile.storage_profile.data_disks
else:
vmss_vm = client.virtual_machine_scale_set_vms.get(resource_group_name, vmss_name, instance_id)
data_disks = vmss_vm.storage_profile.data_disks
if not data_disks:
raise CLIError("Data disk doesn't exist")
leftovers = [d for d in data_disks if d.lun != lun]
if len(data_disks) == len(leftovers):
raise CLIError("Could not find the data disk with lun '{}'".format(lun))
if instance_id is None:
vmss.virtual_machine_profile.storage_profile.data_disks = leftovers
return client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
vmss_vm.storage_profile.data_disks = leftovers
return client.virtual_machine_scale_set_vms.update(resource_group_name, vmss_name, instance_id, vmss_vm)
# endregion
# region VirtualMachineScaleSets Extensions
def delete_vmss_extension(cmd, resource_group_name, vmss_name, extension_name):
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
if not vmss.virtual_machine_profile.extension_profile:
raise CLIError('Scale set has no extensions to delete')
keep_list = [e for e in vmss.virtual_machine_profile.extension_profile.extensions
if e.name != extension_name]
if len(keep_list) == len(vmss.virtual_machine_profile.extension_profile.extensions):
raise CLIError('Extension {} not found'.format(extension_name))
vmss.virtual_machine_profile.extension_profile.extensions = keep_list
return client.virtual_machine_scale_sets.create_or_update(resource_group_name, vmss_name, vmss)
# pylint: disable=inconsistent-return-statements
def get_vmss_extension(cmd, resource_group_name, vmss_name, extension_name):
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
if not vmss.virtual_machine_profile.extension_profile:
return
return next((e for e in vmss.virtual_machine_profile.extension_profile.extensions
if e.name == extension_name), None)
def list_vmss_extensions(cmd, resource_group_name, vmss_name):
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
# pylint: disable=no-member
if vmss.virtual_machine_profile and vmss.virtual_machine_profile.extension_profile:
return vmss.virtual_machine_profile.extension_profile.extensions
return None
def set_vmss_extension(cmd, resource_group_name, vmss_name, extension_name, publisher, version=None,
settings=None, protected_settings=None, no_auto_upgrade=False, force_update=False,
no_wait=False, extension_instance_name=None, provision_after_extensions=None):
if not extension_instance_name:
extension_instance_name = extension_name
client = _compute_client_factory(cmd.cli_ctx)
vmss = client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
VirtualMachineScaleSetExtension, VirtualMachineScaleSetExtensionProfile = cmd.get_models(
'VirtualMachineScaleSetExtension', 'VirtualMachineScaleSetExtensionProfile')
# pylint: disable=no-member
version = _normalize_extension_version(cmd.cli_ctx, publisher, extension_name, version, vmss.location)
extension_profile = vmss.virtual_machine_profile.extension_profile
if extension_profile:
extensions = extension_profile.extensions
if extensions:
extension_profile.extensions = [x for x in extensions if
x.type1.lower() != extension_name.lower() or x.publisher.lower() != publisher.lower()] # pylint: disable=line-too-long
ext = VirtualMachineScaleSetExtension(name=extension_instance_name,
publisher=publisher,
type1=extension_name,
protected_settings=protected_settings,
type_handler_version=version,
settings=settings,
auto_upgrade_minor_version=(not no_auto_upgrade),
provision_after_extensions=provision_after_extensions)
if force_update:
ext.force_update_tag = str(_gen_guid())
if not vmss.virtual_machine_profile.extension_profile:
vmss.virtual_machine_profile.extension_profile = VirtualMachineScaleSetExtensionProfile(extensions=[])
vmss.virtual_machine_profile.extension_profile.extensions.append(ext)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.create_or_update,
resource_group_name, vmss_name, vmss)
def set_orchestration_service_state(cmd, resource_group_name, vm_scale_set_name, service_name, action, no_wait=False):
# currently service_name has only one available value "AutomaticRepairs". And SDK does not accept service_name,
# instead SDK assign it to "AutomaticRepairs" in its own logic. As there may be more service name to be supported,
# we define service_name as a required parameter here to avoid introducing a breaking change in the future.
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.virtual_machine_scale_sets.set_orchestration_service_state,
resource_group_name, vm_scale_set_name, action)
# endregion
# region VirtualMachineScaleSets RunCommand
def vmss_run_command_invoke(cmd, resource_group_name, vmss_name, command_id, instance_id, scripts=None, parameters=None): # pylint: disable=line-too-long
return run_command_invoke(cmd, resource_group_name, vmss_name, command_id, scripts, parameters, instance_id)
# endregion
# region VirtualMachineScaleSets Identity
def remove_vmss_identity(cmd, resource_group_name, vmss_name, identities=None):
client = _compute_client_factory(cmd.cli_ctx)
def _get_vmss(_, resource_group_name, vmss_name):
return client.virtual_machine_scale_sets.get(resource_group_name, vmss_name)
def _set_vmss(resource_group_name, name, vmss_instance):
VirtualMachineScaleSetUpdate = cmd.get_models('VirtualMachineScaleSetUpdate',
operation_group='virtual_machine_scale_sets')
vmss_update = VirtualMachineScaleSetUpdate(identity=vmss_instance.identity)
return client.virtual_machine_scale_sets.update(resource_group_name, vmss_name, vmss_update)
if identities is None:
from ._vm_utils import MSI_LOCAL_ID
identities = [MSI_LOCAL_ID]
return _remove_identities(cmd, resource_group_name, vmss_name, identities,
_get_vmss,
_set_vmss)
# endregion
# region image galleries
def list_image_galleries(cmd, resource_group_name=None):
client = _compute_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.galleries.list_by_resource_group(resource_group_name)
return client.galleries.list()
def create_image_gallery(cmd, resource_group_name, gallery_name, description=None,
location=None, no_wait=False, tags=None):
client = _compute_client_factory(cmd.cli_ctx)
Gallery = cmd.get_models('Gallery')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
gallery = Gallery(description=description, location=location, tags=(tags or {}))
client = _compute_client_factory(cmd.cli_ctx)
return sdk_no_wait(no_wait, client.galleries.create_or_update, resource_group_name, gallery_name, gallery)
def create_gallery_image(cmd, resource_group_name, gallery_name, gallery_image_name, os_type, publisher, offer, sku,
os_state='Generalized', end_of_life_date=None, privacy_statement_uri=None,
release_note_uri=None, eula=None, description=None, location=None,
minimum_cpu_core=None, maximum_cpu_core=None, minimum_memory=None, maximum_memory=None,
disallowed_disk_types=None, plan_name=None, plan_publisher=None, plan_product=None, tags=None,
hyper_v_generation='V1'):
# pylint: disable=line-too-long
GalleryImage, GalleryImageIdentifier, RecommendedMachineConfiguration, ResourceRange, Disallowed, ImagePurchasePlan = cmd.get_models(
'GalleryImage', 'GalleryImageIdentifier', 'RecommendedMachineConfiguration', 'ResourceRange', 'Disallowed', 'ImagePurchasePlan')
client = _compute_client_factory(cmd.cli_ctx)
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
end_of_life_date = fix_gallery_image_date_info(end_of_life_date)
recommendation = None
if any([minimum_cpu_core, maximum_cpu_core, minimum_memory, maximum_memory]):
cpu_recommendation, memory_recommendation = None, None
if any([minimum_cpu_core, maximum_cpu_core]):
cpu_recommendation = ResourceRange(min=minimum_cpu_core, max=maximum_cpu_core)
if any([minimum_memory, maximum_memory]):
memory_recommendation = ResourceRange(min=minimum_memory, max=maximum_memory)
recommendation = RecommendedMachineConfiguration(v_cp_us=cpu_recommendation, memory=memory_recommendation)
purchase_plan = None
if any([plan_name, plan_publisher, plan_product]):
purchase_plan = ImagePurchasePlan(name=plan_name, publisher=plan_publisher, product=plan_product)
image = GalleryImage(identifier=GalleryImageIdentifier(publisher=publisher, offer=offer, sku=sku),
os_type=os_type, os_state=os_state, end_of_life_date=end_of_life_date,
recommended=recommendation, disallowed=Disallowed(disk_types=disallowed_disk_types),
purchase_plan=purchase_plan, location=location, eula=eula, tags=(tags or {}),
hyper_vgeneration=hyper_v_generation)
return client.gallery_images.create_or_update(resource_group_name, gallery_name, gallery_image_name, image)
def create_image_version(cmd, resource_group_name, gallery_name, gallery_image_name, gallery_image_version,
location=None, target_regions=None, storage_account_type=None,
end_of_life_date=None, exclude_from_latest=None, replica_count=None, tags=None,
os_snapshot=None, data_snapshots=None, managed_image=None, data_snapshot_luns=None,
target_region_encryption=None):
# print(target_regions)
from msrestazure.tools import resource_id, is_valid_resource_id
ImageVersionPublishingProfile, GalleryArtifactSource, ManagedArtifact, ImageVersion, TargetRegion = cmd.get_models(
'GalleryImageVersionPublishingProfile', 'GalleryArtifactSource', 'ManagedArtifact', 'GalleryImageVersion',
'TargetRegion')
client = _compute_client_factory(cmd.cli_ctx)
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
end_of_life_date = fix_gallery_image_date_info(end_of_life_date)
if managed_image and not is_valid_resource_id(managed_image):
managed_image = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='images', name=managed_image)
if os_snapshot and not is_valid_resource_id(os_snapshot):
os_snapshot = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='snapshots', name=os_snapshot)
if data_snapshots:
for i, s in enumerate(data_snapshots):
if not is_valid_resource_id(data_snapshots[i]):
data_snapshots[i] = resource_id(
subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.Compute', type='snapshots', name=s)
source = GalleryArtifactSource(managed_image=ManagedArtifact(id=managed_image))
profile = ImageVersionPublishingProfile(exclude_from_latest=exclude_from_latest, end_of_life_date=end_of_life_date,
target_regions=target_regions or [TargetRegion(name=location)],
source=source, replica_count=replica_count,
storage_account_type=storage_account_type)
if cmd.supported_api_version(min_api='2019-07-01', operation_group='gallery_image_versions'):
if managed_image is None and os_snapshot is None:
raise CLIError('usage error: Please provide --managed-image or --os-snapshot')
GalleryImageVersionStorageProfile = cmd.get_models('GalleryImageVersionStorageProfile')
GalleryArtifactVersionSource = cmd.get_models('GalleryArtifactVersionSource')
GalleryOSDiskImage = cmd.get_models('GalleryOSDiskImage')
GalleryDataDiskImage = cmd.get_models('GalleryDataDiskImage')
source = os_disk_image = data_disk_images = None
if managed_image is not None:
source = GalleryArtifactVersionSource(id=managed_image)
if os_snapshot is not None:
os_disk_image = GalleryOSDiskImage(source=GalleryArtifactVersionSource(id=os_snapshot))
if data_snapshot_luns and not data_snapshots:
raise CLIError('usage error: --data-snapshot-luns must be used together with --data-snapshots')
if data_snapshots:
if data_snapshot_luns and len(data_snapshots) != len(data_snapshot_luns):
raise CLIError('usage error: Length of --data-snapshots and --data-snapshot-luns should be equal.')
if not data_snapshot_luns:
data_snapshot_luns = [i for i in range(len(data_snapshots))]
data_disk_images = []
for i, s in enumerate(data_snapshots):
data_disk_images.append(GalleryDataDiskImage(source=GalleryArtifactVersionSource(id=s),
lun=data_snapshot_luns[i]))
storage_profile = GalleryImageVersionStorageProfile(source=source, os_disk_image=os_disk_image,
data_disk_images=data_disk_images)
image_version = ImageVersion(publishing_profile=profile, location=location, tags=(tags or {}),
storage_profile=storage_profile)
else:
if managed_image is None:
raise CLIError('usage error: Please provide --managed-image')
image_version = ImageVersion(publishing_profile=profile, location=location, tags=(tags or {}))
return client.gallery_image_versions.create_or_update(resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image_version_name=gallery_image_version,
gallery_image_version=image_version)
def fix_gallery_image_date_info(date_info):
# here we add needed time, if only date is provided, so the setting can be accepted by servie end
if date_info and 't' not in date_info.lower():
date_info += 'T12:59:59Z'
return date_info
def update_image_version(instance, target_regions=None, replica_count=None):
if target_regions:
instance.publishing_profile.target_regions = target_regions
if replica_count:
instance.publishing_profile.replica_count = replica_count
if instance.storage_profile.source is not None:
instance.storage_profile.os_disk_image = instance.storage_profile.data_disk_images = None
return instance
# endregion
# region proximity placement groups
def create_proximity_placement_group(cmd, client, proximity_placement_group_name, resource_group_name,
ppg_type=None, location=None, tags=None):
from knack.arguments import CaseInsensitiveList
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
ProximityPlacementGroup, PPGType = cmd.get_models('ProximityPlacementGroup', 'ProximityPlacementGroupType')
choices = CaseInsensitiveList([x.value for x in PPGType])
if ppg_type and ppg_type not in choices:
logger.info("Valid choices: %s", str(choices))
raise CLIError("Usage error: invalid value for --type/-t")
ppg_params = ProximityPlacementGroup(name=proximity_placement_group_name, proximity_placement_group_type=ppg_type,
location=location, tags=(tags or {}))
return client.create_or_update(resource_group_name=resource_group_name,
proximity_placement_group_name=proximity_placement_group_name, parameters=ppg_params)
def list_proximity_placement_groups(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list_by_subscription()
# endregion
# region dedicated host
def create_dedicated_host_group(cmd, client, host_group_name, resource_group_name, platform_fault_domain_count=None,
location=None, zones=None, tags=None):
DedicatedHostGroup = cmd.get_models('DedicatedHostGroup')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
host_group_params = DedicatedHostGroup(location=location, platform_fault_domain_count=platform_fault_domain_count,
zones=zones, tags=tags)
return client.create_or_update(resource_group_name, host_group_name, parameters=host_group_params)
def list_dedicated_host_groups(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def create_dedicated_host(cmd, client, host_group_name, host_name, resource_group_name, sku, platform_fault_domain=None,
auto_replace_on_failure=None, license_type=None, location=None, tags=None):
DedicatedHostType = cmd.get_models('DedicatedHost')
SkuType = cmd.get_models('Sku')
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group_name)
sku = SkuType(name=sku)
host_params = DedicatedHostType(location=location, platform_fault_domain=platform_fault_domain,
auto_replace_on_failure=auto_replace_on_failure, license_type=license_type,
sku=sku, tags=tags)
return client.create_or_update(resource_group_name, host_group_name, host_name, parameters=host_params)
def get_dedicated_host_instance_view(client, host_group_name, host_name, resource_group_name):
return client.get(resource_group_name, host_group_name, host_name, expand="instanceView")
# endregion
# region VMMonitor
def _get_log_analytics_client(cmd):
from ._client_factory import cf_log_analytics
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
return cf_log_analytics(cmd.cli_ctx, subscription_id)
def _prepare_workspace(cmd, resource_group_name, workspace):
from msrestazure.tools import is_valid_resource_id
from msrestazure.azure_exceptions import CloudError
workspace_id = None
if not is_valid_resource_id(workspace):
workspace_name = workspace
log_client = _get_log_analytics_client(cmd)
workspace_result = None
try:
workspace_result = log_client.workspaces.get(resource_group_name, workspace_name)
except CloudError:
from azure.mgmt.loganalytics.models import Workspace, WorkspaceSku, WorkspaceSkuNameEnum
sku = WorkspaceSku(name=WorkspaceSkuNameEnum.per_gb2018.value)
retention_time = 30 # default value
location = _get_resource_group_location(cmd.cli_ctx, resource_group_name)
workspace_instance = Workspace(location=location,
sku=sku,
retention_in_days=retention_time)
workspace_result = LongRunningOperation(cmd.cli_ctx)(log_client.workspaces.create_or_update(
resource_group_name,
workspace_name,
workspace_instance))
workspace_id = workspace_result.id
else:
workspace_id = workspace
return workspace_id
def _set_data_source_for_workspace(cmd, os_type, resource_group_name, workspace_name):
from ._client_factory import cf_log_analytics_data_sources
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.mgmt.loganalytics.models import DataSource
from msrestazure.azure_exceptions import CloudError
subscription_id = get_subscription_id(cmd.cli_ctx)
data_sources_client = cf_log_analytics_data_sources(cmd.cli_ctx, subscription_id)
data_source_name_template = "DataSource_{}_{}"
default_data_sources = None
if os_type.lower() == 'linux':
from ._workspace_data_source_settings import default_linux_data_sources
default_data_sources = default_linux_data_sources
elif os_type.lower() == 'windows':
from ._workspace_data_source_settings import default_windows_data_sources
default_data_sources = default_windows_data_sources
if default_data_sources is not None:
for data_source_kind, data_source_settings in default_data_sources.items():
for data_source_setting in data_source_settings:
data_source = DataSource(kind=data_source_kind,
properties=data_source_setting)
data_source_name = data_source_name_template.format(data_source_kind, _gen_guid())
try:
data_sources_client.create_or_update(resource_group_name,
workspace_name,
data_source_name,
data_source)
except CloudError as ex:
logger.warning("Failed to set data source due to %s. "
"Skip this step and need manual work later.", ex.message)
else:
logger.warning("Unsupported OS type. Skip the default settings for log analytics workspace.")
def execute_query_for_vm(cmd, client, resource_group_name, vm_name, analytics_query, timespan=None):
"""Executes a query against the Log Analytics workspace linked with a vm."""
from azure.loganalytics.models import QueryBody
vm = get_vm(cmd, resource_group_name, vm_name)
workspace = None
extension_resources = vm.resources or []
for resource in extension_resources:
if resource.name == "MicrosoftMonitoringAgent" or resource.name == "OmsAgentForLinux":
workspace = resource.settings.get('workspaceId', None)
if workspace is None:
raise CLIError('Cannot find the corresponding log analytics workspace. '
'Please check the status of log analytics workpsace.')
return client.query(workspace, QueryBody(query=analytics_query, timespan=timespan))
def _set_log_analytics_workspace_extension(cmd, resource_group_name, vm, vm_name, workspace_name):
is_linux_os = _is_linux_os(vm)
vm_extension_name = _LINUX_OMS_AGENT_EXT if is_linux_os else _WINDOWS_OMS_AGENT_EXT
log_client = _get_log_analytics_client(cmd)
customer_id = log_client.workspaces.get(resource_group_name, workspace_name).customer_id
settings = {
'workspaceId': customer_id,
'stopOnMultipleConnections': 'true'
}
primary_shared_key = log_client.shared_keys.get_shared_keys(resource_group_name, workspace_name).primary_shared_key
protected_settings = {
'workspaceKey': primary_shared_key,
}
return set_extension(cmd, resource_group_name, vm_name, vm_extension_name,
extension_mappings[vm_extension_name]['publisher'],
extension_mappings[vm_extension_name]['version'],
settings,
protected_settings)
# endregion
# disk encryption set
def create_disk_encryption_set(cmd, client, resource_group_name, disk_encryption_set_name,
key_url, source_vault, location=None, tags=None, no_wait=False):
from msrestazure.tools import resource_id, is_valid_resource_id
DiskEncryptionSet, EncryptionSetIdentity, KeyVaultAndKeyReference, SourceVault = cmd.get_models(
'DiskEncryptionSet', 'EncryptionSetIdentity', 'KeyVaultAndKeyReference', 'SourceVault')
encryption_set_identity = EncryptionSetIdentity(type='SystemAssigned')
if not is_valid_resource_id(source_vault):
source_vault = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.KeyVault', type='vaults', name=source_vault)
source_vault = SourceVault(id=source_vault)
keyVault_and_key_reference = KeyVaultAndKeyReference(source_vault=source_vault, key_url=key_url)
disk_encryption_set = DiskEncryptionSet(location=location, tags=tags, identity=encryption_set_identity,
active_key=keyVault_and_key_reference)
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, disk_encryption_set_name,
disk_encryption_set)
def list_disk_encryption_sets(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def update_disk_encryption_set(instance, client, resource_group_name, key_url=None, source_vault=None):
from msrestazure.tools import resource_id, is_valid_resource_id
if not is_valid_resource_id(source_vault):
source_vault = resource_id(subscription=client.config.subscription_id, resource_group=resource_group_name,
namespace='Microsoft.KeyVault', type='vaults', name=source_vault)
if key_url:
instance.active_key.key_url = key_url
if source_vault:
instance.active_key.source_vault.id = source_vault
return instance
# endregion
| 51.204076 | 163 | 0.69343 |
2733c18d71d1e9aecc9b71be42ee98848e07bffe | 6,228 | py | Python | garnett/ciffilewriter.py | glotzerlab/garne | f9cb7bad391299e28feb4010eb77447fdc4512cb | [
"BSD-3-Clause"
] | 4 | 2019-07-30T00:12:44.000Z | 2020-03-03T19:58:34.000Z | garnett/ciffilewriter.py | glotzerlab/garne | f9cb7bad391299e28feb4010eb77447fdc4512cb | [
"BSD-3-Clause"
] | 62 | 2019-07-29T20:05:46.000Z | 2022-02-16T15:22:01.000Z | garnett/ciffilewriter.py | glotzerlab/garne | f9cb7bad391299e28feb4010eb77447fdc4512cb | [
"BSD-3-Clause"
] | 2 | 2020-03-03T19:59:09.000Z | 2021-03-22T14:48:56.000Z | # Copyright (c) 2020 The Regents of the University of Michigan
# All rights reserved.
# This software is licensed under the BSD 3-Clause License.
"""cif-file writer for the Glotzer Group, University of Michigan.
Authors: Julia Dshemuchadse, Carl Simon Adorf
.. code::
writer = CifFileWriter()
# write to screen:
write.write(trajectory)
# write to file:
with open('a_ciffile.pos', 'w') as ciffile:
writer.write(trajectory, ciffile)
"""
import io
import sys
import logging
import math
import datetime
from collections import defaultdict
import numpy as np
logger = logging.getLogger(__name__)
def _determine_unitcell(box):
lengths = np.sqrt(np.sum(np.array(box.get_box_matrix())**2, axis=0)) # a, b, c
gamma = math.degrees(
np.arccos(
box.xy / math.sqrt(1 + box.xy ** 2)))
beta = math.degrees(
np.arccos(
box.xz / math.sqrt(1 + box.xz ** 2 + box.yz ** 2)))
alpha = math.degrees(
np.arccos(
(box.xy * box.xz + box.yz) /
(math.sqrt(1 + box.xy ** 2)
* math.sqrt(1 + box.xz ** 2 + box.yz ** 2))))
angles = np.array([alpha, beta, gamma])
return lengths, angles
class CifFileWriter(object):
"""cif-file writer for the Glotzer Group, University of Michigan.
Authors: Julia Dshemuchadse, Carl Simon Adorf
.. code::
writer = CifFileWriter()
# write to screen:
write.write(trajectory)
# write to file:
with open('a_ciffile.pos', 'w') as ciffile:
writer.write(trajectory, ciffile)
"""
def _write_frame(self, frame, file, data, occupancies, fractional, raw):
from . import __version__
if occupancies is None:
occupancies = np.ones(frame.position.shape[0])
def _write(msg='', end='\n'):
file.write(msg + end)
unitcell_lengths, unitcell_angles = _determine_unitcell(frame.box)
# write title
_write("data_" + data)
# _write("data_" + os.path.splitext(ciffilename)[0])
_write("_audit_creation_method "
"'generated by garnett {}'".format(__version__))
_write("_audit_creation_date {}".format(str(datetime.date.today())))
# write unit cell parameters
_write("_cell_length_a {}".format(unitcell_lengths[0]))
_write("_cell_length_b {}".format(unitcell_lengths[1]))
_write("_cell_length_c {}".format(unitcell_lengths[2]))
_write("_cell_angle_alpha {}".format(unitcell_angles[0]))
_write("_cell_angle_beta {}".format(unitcell_angles[1]))
_write("_cell_angle_gamma {}".format(unitcell_angles[2]))
_write()
# write symmetry - P1
_write("_symmetry_space_group_name_H-M " + "'P 1'")
_write("_symmetry_Int_Tables_number " + str(1))
_write()
# write header for particle positions
_write("loop_")
_write("_atom_site_label")
_write("_atom_site_type_symbol")
_write("_atom_site_occupancy")
_write("_atom_site_fract_x")
_write("_atom_site_fract_y")
_write("_atom_site_fract_z")
# write header particle positions
if raw:
fractions = frame.cif_coordinates.copy()
else:
if fractional:
fractions = frame.position.copy()
else:
invbox = np.linalg.inv(frame.box.get_box_matrix())
fractions = np.dot(invbox, frame.position.T).T
fractions += 0.5
type_counter = defaultdict(int)
n_digits = len(str(len(frame.position)))
particle_str = "{ptype}{pnum:0" + str(n_digits) + "d} {ptype} {occ:3.2f} {position}"
for i, (position, typeid, occupancy) in enumerate(zip(fractions, frame.typeid, occupancies)):
particle_type = frame.types[typeid]
_write(particle_str.format(
pnum=type_counter[particle_type],
ptype=particle_type,
occ=occupancy,
position=' '.join(map(str, position))))
type_counter[particle_type] += 1
def write(self, trajectory, file=sys.stdout,
data='simulation', occupancy=None, fractional=False, raw=False):
"""Serialize a trajectory into cif-format and write it to file.
:param trajectory: The trajectory to serialize
:type trajectory: :class:`~garnett.trajectory.Trajectory`
:param file: The file to write the trajectory to.
:type file: A file-like object.
:param data: Identifier which be will written to the file,
signifying the origin of the data.
:type data: str
:param occupancy: The default occupancy of individual particles.
:type occupancy: int
:param fractional: Whether or not the input coordinates are fractional
:type fractional: bool
:param raw: Whether or not to write the raw CIF coordinates (with no transformations)
:type raw: bool
"""
for i, frame in enumerate(trajectory):
self._write_frame(
frame=frame,
file=file,
data='{}_frame_{}'.format(data, i),
occupancies=occupancy,
fractional=fractional,
raw=raw)
logger.debug("Wrote frame {}.".format(i + 1))
logger.info("Wrote {} frames.".format(i + 1))
def dump(self, trajectory, data='simulation', occupancy=None,
fractional=False, raw=False):
"""Serialize trajectory into cif-format.
:param trajectory: The trajectory to serialize.
:type trajectory: :class:`~garnett.trajectory.Trajectory`
:param data: Identifier which be will written to the file,
signifying the origin of the data.
:type data: str
:param occupancy: The default occupancy of individual particles.
:type occupancy: numpy.array
:rtype: str
"""
f = io.StringIO()
self.write(trajectory, f, occupancy=occupancy, fractional=fractional, raw=raw)
return f.getvalue()
| 35.793103 | 101 | 0.599229 |
9645b8b1a1be6bc1dce74ed03c4d4350c5759b91 | 3,990 | py | Python | win32_definitions.py | jacoblusk/lwatch | c30b326da92cb90847be108c626b486a5707bb0f | [
"MIT"
] | null | null | null | win32_definitions.py | jacoblusk/lwatch | c30b326da92cb90847be108c626b486a5707bb0f | [
"MIT"
] | null | null | null | win32_definitions.py | jacoblusk/lwatch | c30b326da92cb90847be108c626b486a5707bb0f | [
"MIT"
] | null | null | null | from ctypes import windll, CDLL
from ctypes import POINTER, Structure, sizeof, byref, cast, Union
from ctypes import c_ulong, c_int, c_char, c_ubyte, c_ushort, c_char_p, \
c_void_p, c_wchar, c_wchar_p, c_long
from ctypes import create_string_buffer, string_at, memset, \
create_unicode_buffer, wstring_at
TRUE = 1
FALSE = 0
MAX_PATH = 260
TH32CS_SNAPPROCESS = 0x00000002
ERROR_INSUFFICIENT_BUFFER = 122
ERROR_INVALID_PARAMETER = 87
ERROR_NOT_SUPPORTED = 50
INVALID_HANDLE_VALUE = -1
NO_ERROR = 0
UDP_TABLE_OWNER_PID = 1
AF_INET = 2
AF_INET6 = 10
class S_un_b(Structure):
_fields_ = [
("s_b1", c_ubyte),
("s_b2", c_ubyte),
("s_b3", c_ubyte),
("s_b4", c_ubyte)
]
class S_un_w(Structure):
_fields_ = [
("s_w1", c_ushort),
("s_w2", c_ushort)
]
class S_un(Union):
_fields_ = [
("S_un_b", S_un_b),
("S_un_w", S_un_w),
("S_addr", c_ulong)
]
class in_addr(Structure):
_fields_ = [
("S_un", S_un)
]
class MIB_TCPROW2(Structure):
_fields_ = [
("dwState", c_ulong),
("dwLocalAddr", c_ulong),
("dwLocalPort", c_ulong),
("dwRemoteAddr", c_ulong),
("dwRemotePort", c_ulong),
("dwOwningPid", c_ulong),
("dwOffloadState", c_int)
]
class MIB_UDPROW_OWNER_PID(Structure):
_fields_ = [
("dwLocalAddr", c_ulong),
("dwLocalPort", c_ulong),
("dwOwningPid", c_ulong)
]
def MIB_UDPTABLE_OWNER_PID_FACTORY(size: int):
class MIB_UDPTABLE_OWNER_PID(Structure):
_fields_ = [
("dwNumEntries", c_ulong),
("table", MIB_UDPROW_OWNER_PID * size)
]
return MIB_UDPTABLE_OWNER_PID
def MIB_TCPTABLE2_FACTORY(size: int):
class MIB_TCPTABLE2(Structure):
_fields_ = [
("dwNumEntries", c_ulong),
("table", MIB_TCPROW2 * size)
]
return MIB_TCPTABLE2
class PROCESSENTRY32W(Structure):
_fields_ = [
("dwSize", c_ulong),
("cntUsage", c_ulong),
("th32ProcessID", c_ulong),
("th32DefaultHeapID", POINTER(c_ulong)),
("th32ModuleId", c_ulong),
("cntThreads", c_ulong),
("th32ParentProcessID", c_ulong),
("pcPriClassBase" , c_long),
("dwFlags", c_ulong),
("szExeFile", c_wchar * MAX_PATH)
]
libc = CDLL("msvcrt")
libc.wcslen.argtypes = [c_wchar_p]
inet_nota = windll.ws2_32.inet_ntoa
inet_nota.argtypes = [in_addr]
inet_nota.restype = c_char_p
MIB_TCPTABLE2_1 = MIB_TCPTABLE2_FACTORY(1)
MIB_UDPTABLE_OWNER_PID_1 = MIB_UDPTABLE_OWNER_PID_FACTORY(1)
GetTcpTable2 = windll.iphlpapi.GetTcpTable2
GetTcpTable2.argtypes = [c_void_p, POINTER(c_ulong), c_int]
GetTcpTable2.restype = c_ulong
GetExtendedUdpTable = windll.iphlpapi.GetExtendedUdpTable
GetExtendedUdpTable.argtypes = [c_void_p, POINTER(c_ulong), c_int, c_ulong, c_int, c_ulong]
GetExtendedUdpTable.restype = c_ulong
CreateToolhelp32Snapshot = windll.kernel32.CreateToolhelp32Snapshot
CreateToolhelp32Snapshot.argtypes = [c_ulong, POINTER(c_ulong)]
CreateToolhelp32Snapshot.restype = c_ulong
Process32First = windll.kernel32.Process32First
Process32First.argtypes = [c_ulong, POINTER(PROCESSENTRY32W)]
Process32First.restype = c_int
Process32Next = windll.kernel32.Process32Next
Process32Next.argtypes = [c_ulong, POINTER(PROCESSENTRY32W)]
Process32Next.restype = c_int
OpenProcess = windll.kernel32.OpenProcess
OpenProcess.argtypes = [c_ulong, c_ubyte, c_ulong]
OpenProcess.restype = c_ulong
GetModuleBaseName = windll.psapi.GetModuleBaseNameW
GetModuleBaseName.argtypes = [c_ulong, c_ulong, c_wchar_p, c_ulong]
GetModuleBaseName.restype = c_ulong
GetProcessImageFileName = windll.psapi.GetProcessImageFileNameW
GetProcessImageFileName.argtypes = [c_ulong, c_wchar_p, c_ulong]
GetProcessImageFileName.restype = c_ulong
CloseHandle = windll.kernel32.CloseHandle
CloseHandle.argtypes = [c_ulong]
CloseHandle.restype = c_ubyte | 26.778523 | 91 | 0.693985 |
6c1adcbc9c93b894fc6132358d2abd337a8f9e0b | 25,970 | py | Python | publications_bibtex/parser.py | lukacu/django-publications | 663ace605925f53835f441c7761a6f4b0d2d4143 | [
"BSD-3-Clause"
] | null | null | null | publications_bibtex/parser.py | lukacu/django-publications | 663ace605925f53835f441c7761a6f4b0d2d4143 | [
"BSD-3-Clause"
] | 3 | 2020-02-12T03:15:47.000Z | 2021-06-10T22:05:24.000Z | publications_bibtex/parser.py | lukacu/django-publications | 663ace605925f53835f441c7761a6f4b0d2d4143 | [
"BSD-3-Clause"
] | 1 | 2018-07-23T11:46:37.000Z | 2018-07-23T11:46:37.000Z | # -*- Mode: python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import re
import sys
## {{{ http://code.activestate.com/recipes/81611/ (r2)
def int_to_roman(input):
"""
Convert an integer to Roman numerals.
Examples:
>>> int_to_roman(0)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(-1)
Traceback (most recent call last):
ValueError: Argument must be between 1 and 3999
>>> int_to_roman(1.5)
Traceback (most recent call last):
TypeError: expected integer, got <type 'float'>
>>> for i in range(1, 21): print int_to_roman(i)
...
I
II
III
IV
V
VI
VII
VIII
IX
X
XI
XII
XIII
XIV
XV
XVI
XVII
XVIII
XIX
XX
>>> print int_to_roman(2000)
MM
>>> print int_to_roman(1999)
MCMXCIX
"""
if type(input) != type(1):
raise TypeError, "expected integer, got %s" % type(input)
if not 0 < input < 4000:
raise ValueError, "Argument must be between 1 and 3999"
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')
result = ""
for i in range(len(ints)):
count = int(input / ints[i])
result += nums[i] * count
input -= ints[i] * count
return result
def roman_to_int(input):
"""
Convert a roman numeral to an integer.
>>> r = range(1, 4000)
>>> nums = [int_to_roman(i) for i in r]
>>> ints = [roman_to_int(n) for n in nums]
>>> print r == ints
1
>>> roman_to_int('VVVIV')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: VVVIV
>>> roman_to_int(1)
Traceback (most recent call last):
...
TypeError: expected string, got <type 'int'>
>>> roman_to_int('a')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: A
>>> roman_to_int('IL')
Traceback (most recent call last):
...
ValueError: input is not a valid roman numeral: IL
"""
if type(input) != type(""):
raise TypeError, "expected string, got %s" % type(input)
input = input.upper()
nums = ['M', 'D', 'C', 'L', 'X', 'V', 'I']
ints = [1000, 500, 100, 50, 10, 5, 1]
places = []
for c in input:
if not c in nums:
raise ValueError, "input is not a valid roman numeral: %s" % input
for i in range(len(input)):
c = input[i]
value = ints[nums.index(c)]
# If the next place holds a larger number, this value is negative.
try:
nextvalue = ints[nums.index(input[i +1])]
if nextvalue > value:
value *= -1
except IndexError:
# there is no next place.
pass
places.append(value)
sum = 0
for n in places: sum += n
# Easiest test for validity...
if int_to_roman(sum) == input:
return sum
else:
raise ValueError, 'input is not a valid roman numeral: %s' % input
## end of http://code.activestate.com/recipes/81611/ }}}
class BibTeXParser:
def parse(self, raw):
self.raw = raw
self.position = 0
self.state = 0
self.line = 1
self.column = 1
self.errors = list()
result = list()
exit = False
while True:
# waiting for new segment
if self.state == 0:
first = True
entry = {"type" : None, "fields" : {} }
expect = [" ", "\t", "\n", "\r"]
if len(result) > 0:
expect.append(",")
if not self._advance(expect):
break
if not self._requireNext(["@", "%"]):
self._error("Expected '@' or '%'")
return None
c = self._peek(-1)
if c == "%":
pre_comment = 0
self.state = 11
continue
entry["line"] = self.line
entry["column"] = self.column
self.state = 1
continue
# figuring out what kind of segment it is
elif self.state == 1:
start = self.position
if not self._advanceTo([" ", "\t", "\n", "\r", "{"]):
self._error("Expected '{'")
return None
entry_type = self.raw[start : self.position]
if (entry_type == ""):
self._error("Expected segment type")
return None
entry['type'] = entry_type
self.state = 2
continue
# moving to start of segment
elif self.state == 2:
if not self._advance() or not self._validateNext("{"):
self._error("Expected '{'")
return None
self.state = 3
continue
# moving to field key
elif self.state == 3:
if not self._advance():
self._error("Expected '}'")
return None
c = self._peek()
if c == None:
self._error("Expected '}'")
return None
if (c == "}"):
self._pop()
self.state = 0
result.append(entry)
continue
self.state = 4
continue
# parsing field key
elif self.state == 4:
start = self.position
expect = [" ", "\t", "\n", "\r", "="]
if first:
expect.append(",") # in case this can also be the key
if not self._advanceTo(expect):
if first:
self._error("Expected '=' or ','")
else:
self._error("Expected '='")
return None;
key = self.raw[start : self.position]
if entry_type == "":
self._error("Expected field key")
return None
if not self._advance():
if first:
self._error("Expected '=' or ','")
else:
self._error("Expected '='")
return None
c = self._peek()
if c == ",":
if not first:
self._error("Entry key not expected here")
return None
first = False
self._pop()
entry['key'] = key
self.state = 3
continue
self.state = 5
continue
# move to field value
elif self.state == 5:
if not self._advance():
self._error("Expected '='")
return None
if not self._validateNext("="):
self._error("Expected '='")
return None
if not self._advance():
self._error("Expected field value")
return None
self.state = 6;
continue
# start processing field value
elif self.state == 6:
c = self._peek()
if (c == "{"):
brackets = 1
self.state = 7
self._pop()
start = self.position
continue
elif (c == "\""):
self.state = 8;
self._pop();
start = self.position;
continue
self.state = 9;
start = self.position;
continue
# find matching }
elif self.state == 7:
if not self._advanceTo(["{", "}"]):
self._error("Expected '}'")
return None
c = self._peek(-1);
if c == "\\":
continue
c = self._pop()
if c == "{":
brackets = brackets + 1
continue
if c == "}":
brackets = brackets - 1;
if brackets == 0:
value = self.raw[start : self.position - 1];
entry["fields"][key] = self._cleanValue(value)
self.state = 10
continue
continue
# find matching "
elif self.state == 8:
if not self._advanceTo(["\""]):
self._error("Expected '\"'")
return None
c = self._peek(-1);
if c == "\\":
continue
else:
value = self.raw[start : self.position];
entry[key] = self._cleanValue(value)
self._pop()
self.state = 10
continue
# find whole word
elif self.state == 9:
if not self._advanceTo([" ", "\t", "\n", "\r", ",", "}"]):
self._error("Expected a closure")
return None
c = self._peek(-1)
if c == "\\":
continue
else:
value = self.raw[start : self.position];
entry["fields"][key] = value;
self.state = 10;
continue
# finish processing field
elif self.state == 10:
if not self._advance():
self._error("Expected '}' or ','")
return None
if not self._peekNext(["}", ","]):
self._error("Expected '}' or ','")
return None
c = self._pop()
if c == "}":
self.state = 0
else:
self.state = 3
if self.state == 0:
result.append(entry)
continue
# comments
elif self.state == 11:
if (self._advanceTo(["\n", "\r"])):
self._pop();
self.state = pre_comment
continue
return result;
def _pop(self):
if self._eos():
return None
c = self.raw[self.position]
self.position = self.position + 1;
if c == "\n":
self.line = self.line + 1;
self.column = 1;
else:
self.column = self.column + 1;
return c
def _peek(self, offset = 0):
if self._eos(offset):
return None
c = self.raw[self.position + offset]
return c
def _advance(self, allowed = [" ", "\t", "\n", "\r"]):
if self._eos():
return False
while True:
c = self._peek()
if (c == None):
return False
if c in allowed:
self._pop()
continue
return True
def _advanceTo(self, allowed = [" ", "\t", "\n", "\r"]):
if self._eos():
return False
while True:
c = self._peek()
if c == None:
return False
if c in allowed:
return True
self._pop()
def _validateNext(self, allowed = " "):
if self._eos(1):
return False
c = self._pop()
if type(allowed) == list:
for a in allowed:
if c == a:
return True
else:
if c == allowed:
return True
return False
def _requireNext(self, allowed = " "):
if self._eos(1):
if type(allowed) == list:
expected = "', '".join(allowed)
else:
expected = allowed
self._error("Expected 'expected' but end of input found")
c = self._pop()
if type(allowed) == list:
for a in allowed:
if (c == a):
return True;
else:
if (c == allowed):
return True;
if type(allowed) == list:
expected = "', '".join(allowed)
else:
expected = allowed
self._error("Expected 'expected' but 'c' found")
return False
def _peekNext(self, allowed = " "):
if self._eos():
return False
c = self._peek()
if type(allowed) == list:
for a in allowed:
if c == a:
return True
else:
if (c == allowed):
return True
return False
def _eos(self, advance = 0):
return len(self.raw) <= self.position + advance;
def _error(self, message, line = None, column = None):
if (line == None):
line = self.line
if (column == None):
column = self.column
self.errors.append({"message" : message, "line" : line, "column" : column, "state" : self.state})
def getErrors(self) :
return self.errors
def _cleanValue(self, value):
value = re.sub( r'[\t\n\r]', " ", value).strip()
return re.sub(r"/ +([^ ])/", " \\1", value)
def parsePeople(self, raw):
auth = explode(" and ", raw);
result = list();
for value in auth:
r = parsePerson(trim(value));
if empty(r):
continue
result.append(r)
return result;
# Parses a single name. Tries to figure out what is name and what surname.
# Returns an array with two elements: surname and name.
def parsePerson(raw):
matches = re.match("/^(?:([^ ,]+) *, *([^ ].*))$/", raw)
if (matches != None):
return (matches[1], matches[2])
matches = re.match("/^(?:([^ ]+) *([^ ].*))$/", raw)
if (matches != None):
return (matches[2], matches[1])
return None
BIBTEX_FIELDS = {
"address" : {"description" : "Publisher's address (usually just the city, but can be the full address for lesser-known publishers)", "type" : "string"},
"annote" : {"description" : "An annotation for annotated bibliography styles (not typical)", "type" : "string"},
"author" : {"description" : "The name(s) of the author(s) (in the case of more than one author, separated by and)", "type" : "people"},
"booktitle" : {"description" : "The title of the book, if only part of it is being cited", "type" : "string"},
"chapter" : {"description" : "The chapter number", "type" : "string"},
"crossref" : {"description" : "The key of the cross-referenced entry", "type" : "string"},
"edition" : {"description" : "The edition of a book, long form (such as first or second)", "type" : "string"},
"editor" : {"description" : "The name(s) of the editor(s)", "type" : "people"},
"eprint" : {"description" : "A specification of an electronic publication, often a preprint or a technical report", "type" : "string"},
"howpublished" : {"description" : "How it was published, if the publishing method is nonstandard", "type" : "string"},
"institution" : {"description" : "The institution that was involved in the publishing, but not necessarily the publisher", "type" : "string"},
"journal" : {"description" : "The journal or magazine the work was published in", "type" : "string"},
"key" : {"description" : "A hidden field used for specifying or overriding the alphabetical order of entries (when the author and editor fields are missing). Note that this is very different from the key (mentioned just after this list) that is used to cite or cross-reference the entry.", "type" : "string"},
"month" : {"description" : "The month of publication (or, if unpublished, the month of creation)", "type" : "string"},
"note" : {"description" : "Miscellaneous extra information", "type" : "text"},
"number" : {"description" : "The number of a journal, magazine, or tech-report, if applicable. (Most publications have a volume, but no number field.)", "type" : "number"},
"organization" : {"description" : "The conference sponsor", "type" : "string"},
"pages" : {"description" : "Page numbers, separated either by commas or double-hyphens", "type" : "range"},
"publisher" : {"description" : "The publisher's name", "type" : "string"},
"school" : {"description" : "The school where the thesis was written", "type" : "string"},
"series" : {"description" : "The series of books the book was published in", "type" : "string"},
"title" : {"description" : "The title of the work", "type" : "string"},
"type" : {"description" : "The type of tech-report, for example, Research Note", "type" : "string"},
"url" : {"description" : "The WWW address to the electronic version of document", "type" : "url"},
"volume" : {"description" : "The volume of a journal or multi-volume book", "type" : "range"},
"year" : {"description" : "The year of publication (or, if unpublished, the year of creation)", "type" : "number"},
# the fields that are not part of the original BibTeX standard
"abstract" : {"description" : "An abstract of the work", "type" : "text"},
"doi" : {"description" : "Digital Object Identifier", "type" : "string"},
"isbn" : {"description" : "The International Standard Book Number", "type" : "string"},
"issn" : {"description" : "The International Standard Serial Number. Used to identify a journal.", "type" : "string"},
"keywords" : {"description" : "Keywords associated with this entry.", "type" : "terms"},
"owner" : {"description" : "Owner of the entry.", "type" : "string"},
"timestamp" : {"description" : "Timestamp of the entry.", "type" : "date"},
"groups" : {"description" : "Comma-separated list of groups that the entry belongs to.", "type" : "terms"}
}
BIBTEX_TYPES = {
"article" : {"required" : {"author", "title", "journal", "year"},
"optional" : {"volume", "number", "pages", "month", "note", "url", "abstract", "ISSN"},
"description" : "An article from a journal or magazine", "name" : "Article"
},
"book" : {"required" : {"author", "title", "publisher", "year"},
"optional" : {"editor", "volume", "series", "address", "edition", "month", "note", "url", "abstract", "ISBN"},
"description" : "A book with an explicit publisher", "name" : "Book"
},
"booklet" : {"required" : {"title"},
"optional" : {"author", "howpublished", "address", "month", "year", "note", "url"},
"description" : "A work that is printed and bound, but without a named publisher or sponsoring institution.",
"name" : "Booklet"
},
"conference" : {"required" : {"author", "title", "booktitle", "year"},
"optional" : {"editor", "pages", "organization", "publisher", "address", "month", "note", "url"},
"description" : "The same as inproceedings, included for Scribe (markup language) compatibility.", "name" : "Conference"
},
"inbook" : {"required" : {"author", "title", "chapter", "pages", "year"},
"optional" : {"editor", "volume", "series", "address", "edition", "month", "note", "url", "abstract", "ISBN"},
"description" : "A part of a book, which may be a chapter (or section or whatever) and/or a range of pages.",
"name" : "In book"
},
"incollection" : {"required" : {"author", "title", "booktitle", "year"},
"optional" : {"editor", "pages", "organization", "address", "publisher", "month", "note", "url", "abstract"},
"description" : "A part of a book having its own title.",
"name" : "In collection"
},
"inproceedings" : {"required" : {"author", "title", "booktitle", "year"},
"optional" : {"editor", "pages", "organization", "address", "publisher", "month", "note", "url", "abstract"},
"description" : "An article in a conference proceedings.",
"name" : "In proceedings"
},
"manual" : {"required" : {"title"},
"optional" : {"author", "organization", "address", "edition", "month", "year", "note", "url"},
"description" : "Technical documentation",
"name" : "Manual"
},
"mastersthesis" : {"required" : {"author", "title", "school", "year"},
"optional" : {"address", "month", "note", "url", "abstract"},
"description" : "A Masters thesis.",
"name" : "Master thesis"
},
"misc" : {"required" : {},
"optional" : {"author", "title", "howpublished", "month", "year", "note", "url"},
"description" : "For use when nothing else fits.",
"name" : "Misc"
},
"phdthesis" : {"required" : {"author", "title", "school", "year"},
"optional" : {"address", "month", "note", "url", "abstract"},
"description" : "A Ph.D. Thesis",
"name" : "PhD Thesis"
},
"proceedings" : {"required" : {"title", "year"},
"optional" : {"editor", "publisher", "organization", "address", "month", "note", "url"},
"description" : " The proceedings of a conference.",
"name" : "Proceedings"
},
"techreport" : {"required" : {"author", "title", "institution", "year"},
"optional" : {"type", "number", "address", "month", "note", "url", "abstract"},
"description" : "A report published by a school or other institution, usually numbered within a series.",
"name" : "Tech report"
},
"unpublished" : {"required" : {"author", "title", "note"},
"optional" : {"month", "year", "url"},
"description" : "A document having an author and title, but not formally published.",
"name" : "Unpublished"
}
}
class BibTeXProcessor:
def __init__(self, strict = True, require = []):
self.errors = [];
self._replace = {};
self.required = require;
self.strict = strict;
def registerReplacement(self, key, value):
self._replace[' ' + key + ' '] = ' ' + value + ' '
def process(self, entry):
self.errors = list()
bibtex_type = entry["type"].lower()
bibtex_key = entry["key"]
self.line = entry["line"]
self.column = entry["column"]
if not BIBTEX_TYPES.has_key(bibtex_type):
self._error("Unsupported entry type '%s'" % bibtex_type)
return None
fields = BIBTEX_TYPES[bibtex_type]
required = fields['required'].copy()
required.update(self.required)
result = {}
error = False
for key, value in entry["fields"].items():
new_key = key.lower()
errormsg = self.validate(key, value)
if not errormsg is None:
self._error("Incorrect format for field '%s': %s" % (bibtex_type, errormsg))
error = True
continue
result[new_key] = self.decode(key, value)
keys = result.keys();
missing = set(required) ^ (set(keys) & set(required));
for key in missing:
self._error("Missing required field '%s'" % key)
error = True
# second processing stage
entry = result
result = {}
for key, value in entry.items():
new_key = self.renameField(key);
if self.strict and not BIBTEX_FIELDS.has_key(new_key):
self._error("Unknown field '%s'" % key)
error = True
elif not self.strict and not BIBTEX_FIELDS.has_key(new_key):
continue
result[new_key] = self.parseField(new_key, value)
result["type"] = bibtex_type
result["key"] = bibtex_key
if error:
return None
return result
def _error(self, message, line = None, column = None):
if line is None:
line = self.line
if column is None:
column = self.column
self.errors.append({"message" : message, "line" : line, "column" : column})
def getErrors(self):
return self.errors
def validate(self, field, value):
return None
def decode(self, field, value):
if BIBTEX_FIELDS.has_key(field):
t = BIBTEX_FIELDS[field]["type"]
else:
t = "string"
if t == "string" or t == "text":
return self._substitute(self._unicode(value)).strip()
if t == "number":
value = value.strip()
try:
return str(int(value))
except:
if value == "":
return value
else:
try:
return str(roman_to_int(value))
except:
return ""
if t == "range":
value = value.strip()
m = re.match(r'([0-9]+) *-+ *([0-9]+)', value)
if m:
return "%s-%s" % (m.group(1), m.group(2))
try:
return str(int(value))
except:
if value == "":
return value
else:
try:
return str(roman_to_int(value))
except:
return ""
elif t == "people":
value = self._unicode(value).strip()
if " and " in value.lower():
people_raw = [e.strip() for e in re.split(' and ', value, flags=re.IGNORECASE)]
elif ";" in value:
people_raw = [e.strip() for e in value.split(";")]
else:
people_raw = [e.strip() for e in value.split(",")]
# it is possible that there is only one author with reverse name notation
# in this case there are only two elements
if len(people_raw) == 2:
people_raw = [", ".join(people_raw)]
people = []
for person_raw in people_raw:
if "," in person_raw:
parts = [e.strip() for e in person_raw.split(",")]
name = parts[1]
surname = parts[0]
else:
parts = [e.strip() for e in person_raw.split(" ")]
if len(parts) > 2:
name = " ".join(parts[0:-1])
else:
name = parts[0]
surname = parts[-1]
people.append((surname, name))
return " and ".join([ "%s, %s" % e for e in people ])
elif t == "terms":
value = self._unicode(value).strip()
if " and " in value.lower():
terms_raw = [e.strip() for e in re.split(' and ', value, flags=re.IGNORECASE)]
elif ";" in value:
terms_raw = [e.strip() for e in value.split(";")]
elif "," in value:
terms_raw = [e.strip() for e in value.split(",")]
else:
terms_raw = [e.strip() for e in value.split(" ")]
return ', '.join([s.strip() for s in terms_raw])
return value.strip()
def _substitute(self, value):
tmp = value
for key, value in self._replace.items():
tmp = tmp.replace(key, value)
return tmp
def _unicode(self, text):
from publications_bibtex.transcode import tex_to_unicode
text = tex_to_unicode(text)
return re.sub(r'([^\\\\]?)([{}])', "\\1", text)
def renameField(self, key):
return key
def parseField(self, key, value):
return value
class BibTeXFormatter:
def format(self, entry):
from publications_bibtex.transcode import unicode_to_tex
bibtex_type = entry["type"]
bibtex_key = entry["key"]
o = list()
for key, value in entry.items():
if key == "type" or key == "key":
continue
o.append("\t" + key + " = {" + unicode_to_tex(value) + "}")
return "@" + bibtex_type + " {" + bibtex_key + ",\n" + ",\n".join(o) + "\n}\n"
def formatPeople(self, people, nice = False):
if not type(people) == list:
people = parsePeople(people)
if nice:
last = array_pop(people)
temp = list()
for person in people:
temp.apppend(person[1] + " " + person[0])
if len(temp) < 1:
return last[1] + " " + last[0]
return ", ".join(temp) + " and " + last[1] + " " + last[0]
else:
processed = list();
for a in people:
processed.append(", ".join(a))
return " and ".join(processed)
if __name__ == "__main__":
f = open(sys.argv[1], 'r')
content = f.read()
f.close()
parser = BibTeXParser()
entries = parser.parse(content)
errors = parser.getErrors()
if len(errors) > 0:
print errors
processor = BibTeXProcessor(strict=False)
formatter = BibTeXFormatter()
for entry in entries:
e = processor.process(entry)
if e is None:
print processor.getErrors()
else:
print formatter.format(e)
| 29.11435 | 311 | 0.555988 |
e0ba835f9957849eaa18a9bf856b4314ae8864a0 | 19,694 | py | Python | test/functional/feature_backwards_compatibility.py | crptec/sinovate | 345a81f99ec7e624e0ec244a7dbe1ebb3698c347 | [
"MIT"
] | 159 | 2016-07-09T13:02:19.000Z | 2022-03-11T08:15:56.000Z | test/functional/feature_backwards_compatibility.py | crptec/sinovate | 345a81f99ec7e624e0ec244a7dbe1ebb3698c347 | [
"MIT"
] | 40 | 2016-07-22T17:26:37.000Z | 2022-03-22T19:37:32.000Z | test/functional/feature_backwards_compatibility.py | crptec/sinovate | 345a81f99ec7e624e0ec244a7dbe1ebb3698c347 | [
"MIT"
] | 57 | 2016-10-21T23:57:47.000Z | 2022-03-26T20:51:23.000Z | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Backwards compatibility functional test
Test various backwards compatibility scenarios. Requires previous releases binaries,
see test/README.md.
v0.15.2 is not required by this test, but it is used in wallet_upgradewallet.py.
Due to a hardfork in regtest, it can't be used to sync nodes.
Due to RPC changes introduced in various versions the below tests
won't work for older versions without some patches or workarounds.
Use only the latest patch version of each release, unless a test specifically
needs an older patch version.
"""
import os
import shutil
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class BackwardsCompatibilityTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
# Add new version after each release:
self.extra_args = [
["-addresstype=bech32"], # Pre-release: use to mine blocks
["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # Pre-release: use to receive coins, swap wallets, etc
["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.19.1
["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.18.1
["-nowallet", "-walletrbf=1", "-addresstype=bech32"], # v0.17.2
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-wallet=wallet.dat"], # v0.16.3
]
self.wallet_names = [self.default_wallet_name]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_previous_releases()
def setup_nodes(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[
None,
None,
190100,
180100,
170200,
160300,
])
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def run_test(self):
self.nodes[0].generatetoaddress(COINBASE_MATURITY + 1, self.nodes[0].getnewaddress())
self.sync_blocks()
# Sanity check the test framework:
res = self.nodes[self.num_nodes - 1].getblockchaininfo()
assert_equal(res['blocks'], COINBASE_MATURITY + 1)
node_master = self.nodes[self.num_nodes - 5]
node_v19 = self.nodes[self.num_nodes - 4]
node_v18 = self.nodes[self.num_nodes - 3]
node_v17 = self.nodes[self.num_nodes - 2]
node_v16 = self.nodes[self.num_nodes - 1]
self.log.info("Test wallet backwards compatibility...")
# Create a number of wallets and open them in older versions:
# w1: regular wallet, created on master: update this test when default
# wallets can no longer be opened by older versions.
node_master.createwallet(wallet_name="w1")
wallet = node_master.get_wallet_rpc("w1")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
# Create a confirmed transaction, receiving coins
address = wallet.getnewaddress()
self.nodes[0].sendtoaddress(address, 10)
self.sync_mempools()
self.nodes[0].generate(1)
self.sync_blocks()
# Create a conflicting transaction using RBF
return_address = self.nodes[0].getnewaddress()
tx1_id = self.nodes[1].sendtoaddress(return_address, 1)
tx2_id = self.nodes[1].bumpfee(tx1_id)["txid"]
# Confirm the transaction
self.sync_mempools()
self.nodes[0].generate(1)
self.sync_blocks()
# Create another conflicting transaction using RBF
tx3_id = self.nodes[1].sendtoaddress(return_address, 1)
tx4_id = self.nodes[1].bumpfee(tx3_id)["txid"]
# Abandon transaction, but don't confirm
self.nodes[1].abandontransaction(tx3_id)
# w1_v19: regular wallet, created with v0.19
node_v19.rpc.createwallet(wallet_name="w1_v19")
wallet = node_v19.get_wallet_rpc("w1_v19")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
# Use addmultisigaddress (see #18075)
address_18075 = wallet.rpc.addmultisigaddress(1, ["0296b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52", "037211a824f55b505228e4c3d5194c1fcfaa15a456abdf37f9b9d97a4040afc073"], "", "legacy")["address"]
assert wallet.getaddressinfo(address_18075)["solvable"]
# w1_v18: regular wallet, created with v0.18
node_v18.rpc.createwallet(wallet_name="w1_v18")
wallet = node_v18.get_wallet_rpc("w1_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
# w2: wallet with private keys disabled, created on master: update this
# test when default wallets private keys disabled can no longer be
# opened by older versions.
node_master.createwallet(wallet_name="w2", disable_private_keys=True)
wallet = node_master.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
# w2_v19: wallet with private keys disabled, created with v0.19
node_v19.rpc.createwallet(wallet_name="w2_v19", disable_private_keys=True)
wallet = node_v19.get_wallet_rpc("w2_v19")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
# w2_v18: wallet with private keys disabled, created with v0.18
node_v18.rpc.createwallet(wallet_name="w2_v18", disable_private_keys=True)
wallet = node_v18.get_wallet_rpc("w2_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
# w3: blank wallet, created on master: update this
# test when default blank wallets can no longer be opened by older versions.
node_master.createwallet(wallet_name="w3", blank=True)
wallet = node_master.get_wallet_rpc("w3")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
# w3_v19: blank wallet, created with v0.19
node_v19.rpc.createwallet(wallet_name="w3_v19", blank=True)
wallet = node_v19.get_wallet_rpc("w3_v19")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
# w3_v18: blank wallet, created with v0.18
node_v18.rpc.createwallet(wallet_name="w3_v18", blank=True)
wallet = node_v18.get_wallet_rpc("w3_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
# Copy the wallets to older nodes:
node_master_wallets_dir = os.path.join(node_master.datadir, "regtest/wallets")
node_v19_wallets_dir = os.path.join(node_v19.datadir, "regtest/wallets")
node_v18_wallets_dir = os.path.join(node_v18.datadir, "regtest/wallets")
node_v17_wallets_dir = os.path.join(node_v17.datadir, "regtest/wallets")
node_v16_wallets_dir = os.path.join(node_v16.datadir, "regtest")
node_master.unloadwallet("w1")
node_master.unloadwallet("w2")
node_v19.unloadwallet("w1_v19")
node_v19.unloadwallet("w2_v19")
node_v18.unloadwallet("w1_v18")
node_v18.unloadwallet("w2_v18")
# Copy wallets to v0.16
for wallet in os.listdir(node_master_wallets_dir):
shutil.copytree(
os.path.join(node_master_wallets_dir, wallet),
os.path.join(node_v16_wallets_dir, wallet)
)
# Copy wallets to v0.17
for wallet in os.listdir(node_master_wallets_dir):
shutil.copytree(
os.path.join(node_master_wallets_dir, wallet),
os.path.join(node_v17_wallets_dir, wallet)
)
for wallet in os.listdir(node_v18_wallets_dir):
shutil.copytree(
os.path.join(node_v18_wallets_dir, wallet),
os.path.join(node_v17_wallets_dir, wallet)
)
# Copy wallets to v0.18
for wallet in os.listdir(node_master_wallets_dir):
shutil.copytree(
os.path.join(node_master_wallets_dir, wallet),
os.path.join(node_v18_wallets_dir, wallet)
)
# Copy wallets to v0.19
for wallet in os.listdir(node_master_wallets_dir):
shutil.copytree(
os.path.join(node_master_wallets_dir, wallet),
os.path.join(node_v19_wallets_dir, wallet)
)
if not self.options.descriptors:
# Descriptor wallets break compatibility, only run this test for legacy wallet
# Open the wallets in v0.19
node_v19.loadwallet("w1")
wallet = node_v19.get_wallet_rpc("w1")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
txs = wallet.listtransactions()
assert_equal(len(txs), 5)
assert_equal(txs[1]["txid"], tx1_id)
assert_equal(txs[2]["walletconflicts"], [tx1_id])
assert_equal(txs[1]["replaced_by_txid"], tx2_id)
assert not(txs[1]["abandoned"])
assert_equal(txs[1]["confirmations"], -1)
assert_equal(txs[2]["blockindex"], 1)
assert txs[3]["abandoned"]
assert_equal(txs[4]["walletconflicts"], [tx3_id])
assert_equal(txs[3]["replaced_by_txid"], tx4_id)
assert not(hasattr(txs[3], "blockindex"))
node_v19.loadwallet("w2")
wallet = node_v19.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
node_v19.loadwallet("w3")
wallet = node_v19.get_wallet_rpc("w3")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
# Open the wallets in v0.18
node_v18.loadwallet("w1")
wallet = node_v18.get_wallet_rpc("w1")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
txs = wallet.listtransactions()
assert_equal(len(txs), 5)
assert_equal(txs[1]["txid"], tx1_id)
assert_equal(txs[2]["walletconflicts"], [tx1_id])
assert_equal(txs[1]["replaced_by_txid"], tx2_id)
assert not(txs[1]["abandoned"])
assert_equal(txs[1]["confirmations"], -1)
assert_equal(txs[2]["blockindex"], 1)
assert txs[3]["abandoned"]
assert_equal(txs[4]["walletconflicts"], [tx3_id])
assert_equal(txs[3]["replaced_by_txid"], tx4_id)
assert not(hasattr(txs[3], "blockindex"))
node_v18.loadwallet("w2")
wallet = node_v18.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
node_v18.loadwallet("w3")
wallet = node_v18.get_wallet_rpc("w3")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] == 0
node_v17.loadwallet("w1")
wallet = node_v17.get_wallet_rpc("w1")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
node_v17.loadwallet("w2")
wallet = node_v17.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
else:
# Descriptor wallets appear to be corrupted wallets to old software
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v19.loadwallet, "w1")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v19.loadwallet, "w2")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v19.loadwallet, "w3")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v18.loadwallet, "w1")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v18.loadwallet, "w2")
assert_raises_rpc_error(-4, "Wallet file verification failed: wallet.dat corrupt, salvage failed", node_v18.loadwallet, "w3")
# Open the wallets in v0.17
node_v17.loadwallet("w1_v18")
wallet = node_v17.get_wallet_rpc("w1_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled']
assert info['keypoolsize'] > 0
node_v17.loadwallet("w2_v18")
wallet = node_v17.get_wallet_rpc("w2_v18")
info = wallet.getwalletinfo()
assert info['private_keys_enabled'] == False
assert info['keypoolsize'] == 0
# RPC loadwallet failure causes bitcoind to exit, in addition to the RPC
# call failure, so the following test won't work:
# assert_raises_rpc_error(-4, "Wallet loading failed.", node_v17.loadwallet, 'w3_v18')
# Instead, we stop node and try to launch it with the wallet:
self.stop_node(4)
node_v17.assert_start_raises_init_error(["-wallet=w3_v18"], "Error: Error loading w3_v18: Wallet requires newer version of Bitcoin Core")
if self.options.descriptors:
# Descriptor wallets appear to be corrupted wallets to old software
node_v17.assert_start_raises_init_error(["-wallet=w1"], "Error: wallet.dat corrupt, salvage failed")
node_v17.assert_start_raises_init_error(["-wallet=w2"], "Error: wallet.dat corrupt, salvage failed")
node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: wallet.dat corrupt, salvage failed")
else:
node_v17.assert_start_raises_init_error(["-wallet=w3"], "Error: Error loading w3: Wallet requires newer version of Bitcoin Core")
self.start_node(4)
if not self.options.descriptors:
# Descriptor wallets break compatibility, only run this test for legacy wallets
# Open most recent wallet in v0.16 (no loadwallet RPC)
self.restart_node(5, extra_args=["-wallet=w2"])
wallet = node_v16.get_wallet_rpc("w2")
info = wallet.getwalletinfo()
assert info['keypoolsize'] == 1
# Create upgrade wallet in v0.16
self.restart_node(-1, extra_args=["-wallet=u1_v16"])
wallet = node_v16.get_wallet_rpc("u1_v16")
v16_addr = wallet.getnewaddress('', "bech32")
v16_info = wallet.validateaddress(v16_addr)
v16_pubkey = v16_info['pubkey']
self.stop_node(-1)
self.log.info("Test wallet upgrade path...")
# u1: regular wallet, created with v0.17
node_v17.rpc.createwallet(wallet_name="u1_v17")
wallet = node_v17.get_wallet_rpc("u1_v17")
address = wallet.getnewaddress("bech32")
v17_info = wallet.getaddressinfo(address)
hdkeypath = v17_info["hdkeypath"]
pubkey = v17_info["pubkey"]
if self.is_bdb_compiled():
# Old wallets are BDB and will only work if BDB is compiled
# Copy the 0.16 wallet to the last Bitcoin Core version and open it:
shutil.copyfile(
os.path.join(node_v16_wallets_dir, "wallets/u1_v16"),
os.path.join(node_master_wallets_dir, "u1_v16")
)
load_res = node_master.loadwallet("u1_v16")
# Make sure this wallet opens without warnings. See https://github.com/bitcoin/bitcoin/pull/19054
assert_equal(load_res['warning'], '')
wallet = node_master.get_wallet_rpc("u1_v16")
info = wallet.getaddressinfo(v16_addr)
descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + v16_pubkey + ")"
assert_equal(info["desc"], descsum_create(descriptor))
# Now copy that same wallet back to 0.16 to make sure no automatic upgrade breaks it
os.remove(os.path.join(node_v16_wallets_dir, "wallets/u1_v16"))
shutil.copyfile(
os.path.join(node_master_wallets_dir, "u1_v16"),
os.path.join(node_v16_wallets_dir, "wallets/u1_v16")
)
self.start_node(-1, extra_args=["-wallet=u1_v16"])
wallet = node_v16.get_wallet_rpc("u1_v16")
info = wallet.validateaddress(v16_addr)
assert_equal(info, v16_info)
# Copy the 0.17 wallet to the last Bitcoin Core version and open it:
node_v17.unloadwallet("u1_v17")
shutil.copytree(
os.path.join(node_v17_wallets_dir, "u1_v17"),
os.path.join(node_master_wallets_dir, "u1_v17")
)
node_master.loadwallet("u1_v17")
wallet = node_master.get_wallet_rpc("u1_v17")
info = wallet.getaddressinfo(address)
descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + pubkey + ")"
assert_equal(info["desc"], descsum_create(descriptor))
# Now copy that same wallet back to 0.17 to make sure no automatic upgrade breaks it
node_master.unloadwallet("u1_v17")
shutil.rmtree(os.path.join(node_v17_wallets_dir, "u1_v17"))
shutil.copytree(
os.path.join(node_master_wallets_dir, "u1_v17"),
os.path.join(node_v17_wallets_dir, "u1_v17")
)
node_v17.loadwallet("u1_v17")
wallet = node_v17.get_wallet_rpc("u1_v17")
info = wallet.getaddressinfo(address)
assert_equal(info, v17_info)
# Copy the 0.19 wallet to the last Bitcoin Core version and open it:
shutil.copytree(
os.path.join(node_v19_wallets_dir, "w1_v19"),
os.path.join(node_master_wallets_dir, "w1_v19")
)
node_master.loadwallet("w1_v19")
wallet = node_master.get_wallet_rpc("w1_v19")
assert wallet.getaddressinfo(address_18075)["solvable"]
# Now copy that same wallet back to 0.19 to make sure no automatic upgrade breaks it
node_master.unloadwallet("w1_v19")
shutil.rmtree(os.path.join(node_v19_wallets_dir, "w1_v19"))
shutil.copytree(
os.path.join(node_master_wallets_dir, "w1_v19"),
os.path.join(node_v19_wallets_dir, "w1_v19")
)
node_v19.loadwallet("w1_v19")
wallet = node_v19.get_wallet_rpc("w1_v19")
assert wallet.getaddressinfo(address_18075)["solvable"]
if __name__ == '__main__':
BackwardsCompatibilityTest().main()
| 45.90676 | 223 | 0.636387 |
75b3bdee35d0357933a73cae0df317855d835228 | 2,313 | py | Python | scripts/automation/trex_control_plane/interactive/trex/emu/emu_plugins/emu_plugin_cdp.py | GabrielGanne/trex-core | 688a0fe0adb890964691473723d70ffa98e00dd3 | [
"Apache-2.0"
] | 956 | 2015-06-24T15:04:55.000Z | 2022-03-30T06:25:04.000Z | scripts/automation/trex_control_plane/interactive/trex/emu/emu_plugins/emu_plugin_cdp.py | hjat2005/trex-core | 400f03c86c844a0096dff3f6b13e58a808aaefff | [
"Apache-2.0"
] | 782 | 2015-09-20T15:19:00.000Z | 2022-03-31T23:52:05.000Z | scripts/automation/trex_control_plane/interactive/trex/emu/emu_plugins/emu_plugin_cdp.py | hjat2005/trex-core | 400f03c86c844a0096dff3f6b13e58a808aaefff | [
"Apache-2.0"
] | 429 | 2015-06-27T19:34:21.000Z | 2022-03-23T11:02:51.000Z | from trex.emu.api import *
from trex.emu.emu_plugins.emu_plugin_base import *
import trex.utils.parsing_opts as parsing_opts
import json
class CDPPlugin(EMUPluginBase):
'''Defines CDP plugin '''
plugin_name = 'CDP'
# init jsons example for SDK
INIT_JSON_NS = {'cdp': {}}
"""
:parameters:
Empty.
"""
INIT_JSON_CLIENT = {'cdp': {}}
"""
:parameters:
timer: uint32
time in seconds betwean packets
ver : uint32
1 or 2
cs : uint16
replace the checksum to generate bad checksum. in case of zero calculate the right one.
options: dict
the options to add to cdp
:options:
raw: array of byte
generic options array for cdp
[60,8,77,83,70,84,32,53,46,48]
"""
def __init__(self, emu_client):
super(CDPPlugin, self).__init__(emu_client, client_cnt_rpc_cmd='cdp_client_cnt')
# API methods
@client_api('getter', True)
@update_docstring(EMUPluginBase._get_client_counters.__doc__.replace("$PLUGIN_NAME", plugin_name))
def get_counters(self, c_key, cnt_filter=None, zero=True, verbose=True):
return self._get_client_counters(c_key, cnt_filter, zero, verbose)
@client_api('command', True)
@update_docstring(EMUPluginBase._clear_client_counters.__doc__.replace("$PLUGIN_NAME", plugin_name))
def clear_counters(self, c_key):
return self._clear_client_counters(c_key)
# Plugins methods
@plugin_api('cdp_show_counters', 'emu')
def cdp_show_counters_line(self, line):
'''Show cdp counters (per client).\n'''
parser = parsing_opts.gen_parser(self,
"show_counters_cdp",
self.cdp_show_counters_line.__doc__,
parsing_opts.EMU_SHOW_CNT_GROUP,
parsing_opts.EMU_NS_GROUP,
parsing_opts.EMU_CLIENT_GROUP,
parsing_opts.EMU_DUMPS_OPT
)
opts = parser.parse_args(line.split())
self.emu_c._base_show_counters(self.client_data_cnt, opts, req_ns = True)
return True
| 31.684932 | 104 | 0.588413 |
4ef47e6a9333fbaf13ac074dc96bf892919d1d8c | 542 | py | Python | 0155.min_stack/solution.py | WZMJ/Algorithms | 07f648541d38e24df38bda469665c12df6a50637 | [
"MIT"
] | 5 | 2020-05-23T02:18:26.000Z | 2021-07-05T05:36:01.000Z | 0155.min_stack/solution.py | WZMJ/Algorithms | 07f648541d38e24df38bda469665c12df6a50637 | [
"MIT"
] | 1 | 2020-06-10T07:17:24.000Z | 2020-07-20T02:21:24.000Z | 0155.min_stack/solution.py | WZMJ/Algorithms | 07f648541d38e24df38bda469665c12df6a50637 | [
"MIT"
] | 1 | 2019-04-23T13:01:50.000Z | 2019-04-23T13:01:50.000Z | class MinStack:
def __init__(self):
self.stack = []
def push(self, x: int) -> None:
""" add a min value and x """
if self.stack:
self.stack.append(min(self.stack[-2], x))
else:
self.stack.append(x)
self.stack.append(x)
def pop(self) -> None:
self.stack.pop()
self.stack.pop()
def top(self) -> int:
if self.stack:
return self.stack[-1]
def get_min(self) -> int:
if self.stack:
return self.stack[-2]
| 22.583333 | 53 | 0.498155 |
809a7b38fc4f2445899579223bd4667e107e0e30 | 972 | py | Python | src/djanban/apps/repositories/migrations/0008_githubpublicrepository.py | diegojromerolopez/djanban | 6451688d49cf235d03c604b19a6a8480b33eed87 | [
"MIT"
] | 33 | 2017-06-14T18:04:25.000Z | 2021-06-15T07:07:56.000Z | src/djanban/apps/repositories/migrations/0008_githubpublicrepository.py | diegojromerolopez/djanban | 6451688d49cf235d03c604b19a6a8480b33eed87 | [
"MIT"
] | 1 | 2017-05-10T08:45:55.000Z | 2017-05-10T08:45:55.000Z | src/djanban/apps/repositories/migrations/0008_githubpublicrepository.py | diegojromerolopez/djanban | 6451688d49cf235d03c604b19a6a8480b33eed87 | [
"MIT"
] | 8 | 2017-08-27T11:14:25.000Z | 2021-03-03T12:11:16.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-23 18:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('repositories', '0007_commit_assessment_datetime'),
]
operations = [
migrations.CreateModel(
name='GitHubPublicRepository',
fields=[
('repository_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='repositories.Repository')),
('username', models.CharField(max_length=128, verbose_name='Username')),
],
options={
'verbose_name': 'GitHub public repository',
'verbose_name_plural': 'GitHub public repositories',
},
bases=('repositories.repository',),
),
]
| 33.517241 | 204 | 0.632716 |
1d6a85c72d7fda34bec0b32b48e4e16a4222d246 | 2,182 | py | Python | Lib/site-packages/fixtures/testcase.py | inging44/python3 | fcd8d9d2ee54b46b757ecf34f284b4e60a43097a | [
"bzip2-1.0.6"
] | 33 | 2015-06-22T09:27:08.000Z | 2022-01-29T11:03:03.000Z | Lib/site-packages/fixtures/testcase.py | inging44/python3 | fcd8d9d2ee54b46b757ecf34f284b4e60a43097a | [
"bzip2-1.0.6"
] | 46 | 2015-03-11T23:18:46.000Z | 2022-02-22T16:42:53.000Z | Lib/site-packages/fixtures/testcase.py | inging44/python3 | fcd8d9d2ee54b46b757ecf34f284b4e60a43097a | [
"bzip2-1.0.6"
] | 27 | 2015-02-09T15:04:38.000Z | 2022-02-05T23:29:38.000Z | # fixtures: Fixtures with cleanups for testing and convenience.
#
# Copyright (c) 2010, Robert Collins <robertc@robertcollins.net>
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
__all__ = [
'TestWithFixtures',
]
import unittest
from fixtures.fixture import gather_details
class TestWithFixtures(unittest.TestCase):
"""A TestCase with a helper function to use fixtures.
Normally used as a mix-in class to add useFixture.
Note that test classes such as testtools.TestCase which already have a
``useFixture`` method do not need this mixed in.
"""
def useFixture(self, fixture):
"""Use fixture in a test case.
The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called.
:param fixture: The fixture to use.
:return: The fixture, after setting it up and scheduling a cleanup for
it.
"""
use_details = (
gather_details is not None and
getattr(self, "addDetail", None) is not None)
try:
fixture.setUp()
except:
if use_details:
# Capture the details now, in case the fixture goes away.
gather_details(fixture.getDetails(), self.getDetails())
raise
else:
self.addCleanup(fixture.cleanUp)
if use_details:
# Capture the details from the fixture during test teardown;
# this will evaluate the details before tearing down the
# fixture.
self.addCleanup(gather_details, fixture, self)
return fixture
| 35.770492 | 79 | 0.664528 |
3c79d3398e96edd5c49b9dfb2ca080487bdc28d3 | 58,786 | py | Python | vspk/v4_0/nusubnet.py | cldelcourt/vspk-python | cdea810cd220e6ddc131407735941b9a26b2edda | [
"BSD-3-Clause"
] | null | null | null | vspk/v4_0/nusubnet.py | cldelcourt/vspk-python | cdea810cd220e6ddc131407735941b9a26b2edda | [
"BSD-3-Clause"
] | null | null | null | vspk/v4_0/nusubnet.py | cldelcourt/vspk-python | cdea810cd220e6ddc131407735941b9a26b2edda | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUAddressRangesFetcher
from .fetchers import NUDHCPOptionsFetcher
from .fetchers import NUEventLogsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUIKEGatewayConnectionsFetcher
from .fetchers import NUIPReservationsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUQOSsFetcher
from .fetchers import NUVMResyncsFetcher
from .fetchers import NUStatisticsFetcher
from .fetchers import NUStatisticsPoliciesFetcher
from .fetchers import NUTCAsFetcher
from .fetchers import NUVirtualIPsFetcher
from .fetchers import NUVMsFetcher
from .fetchers import NUVMInterfacesFetcher
from .fetchers import NUVPortsFetcher
from bambou import NURESTObject
class NUSubnet(NURESTObject):
""" Represents a Subnet in the VSD
Notes:
This is the definition of a subnet associated with a Zone.
"""
__rest_name__ = "subnet"
__resource_name__ = "subnets"
## Constants
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_APPD_FLOW_FORWARDING_POLICY = "APPD_FLOW_FORWARDING_POLICY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_BGP_NEIGHBOR_MED_RESPONSE = "BGP_NEIGHBOR_MED_RESPONSE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_KEYSERVER_MEMBER = "KEYSERVER_MEMBER"
CONST_PAT_ENABLED_INHERITED = "INHERITED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_SERVICE_CONFIG = "GATEWAY_SERVICE_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VSD_COMPONENT = "VSD_COMPONENT"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INGRESS_ACL = "INGRESS_ACL"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_EVPN_BGP_COMMUNITY_TAG_SEQ_NO = "EVPN_BGP_COMMUNITY_TAG_SEQ_NO"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SYSTEM_CONFIG_RESP = "SYSTEM_CONFIG_RESP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INFRASTRUCTURE_PORT_PROFILE = "INFRASTRUCTURE_PORT_PROFILE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_APPLICATION = "APPLICATION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ENTERPRISE_CONFIG = "ENTERPRISE_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VIRTUAL_MACHINE = "VIRTUAL_MACHINE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_EGRESS_QOS_PRIMITIVE = "EGRESS_QOS_PRIMITIVE"
CONST_UNDERLAY_ENABLED_ENABLED = "ENABLED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SYSTEM_MONITORING = "SYSTEM_MONITORING"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_POLICY_GROUP = "POLICY_GROUP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DC_CONFIG = "DC_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NSPORT_STATIC_CONFIG = "NSPORT_STATIC_CONFIG"
CONST_UNDERLAY_ENABLED_INHERITED = "INHERITED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ENTERPRISE_PROFILE = "ENTERPRISE_PROFILE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_FLOATING_IP_ACL_TEMPLATE_ENTRY = "FLOATING_IP_ACL_TEMPLATE_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_SECURITY_PROFILE_RESPONSE = "GATEWAY_SECURITY_PROFILE_RESPONSE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VMWARE_VCENTER_CLUSTER = "VMWARE_VCENTER_CLUSTER"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_CERTIFICATE = "CERTIFICATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ROUTING_POL_MED_RESPONSE = "ROUTING_POL_MED_RESPONSE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_IKE_GATEWAY_PROFILE = "IKE_GATEWAY_PROFILE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NETWORK_POLICY_GROUP = "NETWORK_POLICY_GROUP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_STATS_COLLECTOR = "STATS_COLLECTOR"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ENTERPRISE_NETWORK = "ENTERPRISE_NETWORK"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_IKE_PSK = "IKE_PSK"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VPORT_GATEWAY_RESPONSE = "VPORT_GATEWAY_RESPONSE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_SECURED_DATA = "GATEWAY_SECURED_DATA"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INGRESS_ACL_TEMPLATE_ENTRY = "INGRESS_ACL_TEMPLATE_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ACLENTRY_LOCATION = "ACLENTRY_LOCATION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_RTRD_ENTITY = "RTRD_ENTITY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ZONE = "ZONE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DSCP_FORWARDING_CLASS_MAPPING = "DSCP_FORWARDING_CLASS_MAPPING"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VPORT_MIRROR = "VPORT_MIRROR"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DOMAIN_FLOATING_IP_ACL_TEMPLATE = "DOMAIN_FLOATING_IP_ACL_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_MC_CHANNEL_MAP = "MC_CHANNEL_MAP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ENTERPRISE_SECURED_DATA = "ENTERPRISE_SECURED_DATA"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_PORT_TEMPLATE = "PORT_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SERVICE_VRF_SEQUENCENO = "SERVICE_VRF_SEQUENCENO"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_FLOATINGIP_ACL = "FLOATINGIP_ACL"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_BRIDGEINTERFACE = "BRIDGEINTERFACE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_POLICING_POLICY = "POLICING_POLICY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_SECURITY_RESPONSE = "GATEWAY_SECURITY_RESPONSE"
CONST_PAT_ENABLED_ENABLED = "ENABLED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_IKE_GATEWAY = "IKE_GATEWAY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INGRESS_ADV_FWD_TEMPLATE = "INGRESS_ADV_FWD_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GROUP = "GROUP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_BGP_DAMPENING_MED_RESPONSE = "BGP_DAMPENING_MED_RESPONSE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_KEYSERVER_MONITOR = "KEYSERVER_MONITOR"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NSGATEWAY_TEMPLATE = "NSGATEWAY_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_CONFIG_RESP = "GATEWAY_CONFIG_RESP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_MC_RANGE = "MC_RANGE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_BACK_HAUL_SERVICE_RESP = "BACK_HAUL_SERVICE_RESP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_SECURITY_PROFILE_REQUEST = "GATEWAY_SECURITY_PROFILE_REQUEST"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SUBNET_POOL_ENTRY = "SUBNET_POOL_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_IKE_GATEWAY_CONNECTION = "IKE_GATEWAY_CONNECTION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VM_DESCRIPTION = "VM_DESCRIPTION"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_APPD_TIER = "APPD_TIER"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VM_INTERFACE = "VM_INTERFACE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_EGRESS_QOS_QUEUE_MR = "EGRESS_QOS_QUEUE_MR"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VLAN = "VLAN"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ADDRESS_RANGE = "ADDRESS_RANGE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_EGRESS_ACL_TEMPLATE = "EGRESS_ACL_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_PORT_PUSH = "PORT_PUSH"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_EGRESS_QOS_MR = "EGRESS_QOS_MR"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GEO_VM_RES = "GEO_VM_RES"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DISKSTATS = "DISKSTATS"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VSP = "VSP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NEXT_HOP_RESP = "NEXT_HOP_RESP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DOMAIN = "DOMAIN"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_TEMPLATE = "GATEWAY_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INGRESS_ADV_FWD = "INGRESS_ADV_FWD"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VMWARE_VCENTER_HYPERVISOR = "VMWARE_VCENTER_HYPERVISOR"
CONST_IP_TYPE_IPV6 = "IPV6"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ENTERPRISE_PERMISSION = "ENTERPRISE_PERMISSION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VSC = "VSC"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VMWARE_VCENTER = "VMWARE_VCENTER"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_SECURITY = "GATEWAY_SECURITY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SYSTEM_CONFIG_REQ = "SYSTEM_CONFIG_REQ"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_MULTI_NIC_VPORT = "MULTI_NIC_VPORT"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VMWARE_VRS_ADDRESS_RANGE = "VMWARE_VRS_ADDRESS_RANGE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VSG_REDUNDANT_PORT = "VSG_REDUNDANT_PORT"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INFRASTRUCTURE_CONFIG = "INFRASTRUCTURE_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_LIBVIRT_INTERFACE = "LIBVIRT_INTERFACE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ENTITY_METADATA_BINDING = "ENTITY_METADATA_BINDING"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_REDUNDANT_GW_GRP = "REDUNDANT_GW_GRP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_L2DOMAIN_TEMPLATE = "L2DOMAIN_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_METADATA = "METADATA"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_POLICY_GROUP_TEMPLATE = "POLICY_GROUP_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DOMAIN_CONFIG_RESP = "DOMAIN_CONFIG_RESP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VMWARE_VCENTER_VRS_BASE_CONFIG = "VMWARE_VCENTER_VRS_BASE_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INGRESS_EXT_SERVICE_TEMPLATE_ENTRY = "INGRESS_EXT_SERVICE_TEMPLATE_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_USER = "USER"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_CUSTOMER_VRF_SEQUENCENO = "CUSTOMER_VRF_SEQUENCENO"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VLAN_CONFIG_RESPONSE = "VLAN_CONFIG_RESPONSE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_BGPPEER = "BGPPEER"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_RTRD_SEQUENCENO = "RTRD_SEQUENCENO"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_IP_BINDING = "IP_BINDING"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_HSC = "HSC"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_IKE_CERTIFICATE = "IKE_CERTIFICATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_APPD_SERVICE = "APPD_SERVICE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VIRTUAL_MACHINE_REPORT = "VIRTUAL_MACHINE_REPORT"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_RATE_LIMITER = "RATE_LIMITER"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_APPD_EXTERNAL_APP_SERVICE = "APPD_EXTERNAL_APP_SERVICE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ENTERPRISE = "ENTERPRISE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_BGP_PROFILE_MED_RESPONSE = "BGP_PROFILE_MED_RESPONSE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INGRESS_ACL_TEMPLATE = "INGRESS_ACL_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SITE_RES = "SITE_RES"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_KEYSERVER_NOTIFICATION = "KEYSERVER_NOTIFICATION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DOMAIN_FLOATING_IP_ACL_TEMPLATE_ENTRY = "DOMAIN_FLOATING_IP_ACL_TEMPLATE_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ENTERPRISE_CONFIG_RESP = "ENTERPRISE_CONFIG_RESP"
CONST_MULTICAST_DISABLED = "DISABLED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_EVPN_BGP_COMMUNITY_TAG_ENTRY = "EVPN_BGP_COMMUNITY_TAG_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_FLOATING_IP_ACL_TEMPLATE = "FLOATING_IP_ACL_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_AUTO_DISC_GATEWAY = "AUTO_DISC_GATEWAY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_PUBLIC_NETWORK = "PUBLIC_NETWORK"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_LDAP_CONFIG = "LDAP_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_EXPORTIMPORT = "EXPORTIMPORT"
CONST_ENCRYPTION_ENABLED = "ENABLED"
CONST_UNDERLAY_ENABLED_DISABLED = "DISABLED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_MIRROR_DESTINATION = "MIRROR_DESTINATION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VPORT = "VPORT"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INGRESS_ADV_FWD_TEMPLATE_ENTRY = "INGRESS_ADV_FWD_TEMPLATE_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VMWARE_VCENTER_VRS_CONFIG = "VMWARE_VCENTER_VRS_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SUBNET_TEMPLATE = "SUBNET_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INFRASTRUCTURE_VSC_PROFILE = "INFRASTRUCTURE_VSC_PROFILE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VMWARE_VCENTER_DATACENTER = "VMWARE_VCENTER_DATACENTER"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NSPORT_TEMPLATE = "NSPORT_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GEO_VM_REQ = "GEO_VM_REQ"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VPORT_MEDIATION_REQUEST = "VPORT_MEDIATION_REQUEST"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_UPLINK_RD = "UPLINK_RD"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NSG_NOTIFICATION = "NSG_NOTIFICATION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DHCP_OPTION = "DHCP_OPTION"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INGRESS_EXT_SERVICE_TEMPLATE = "INGRESS_EXT_SERVICE_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VPRN_LABEL_SEQUENCENO = "VPRN_LABEL_SEQUENCENO"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_IKE_GATEWAY_CONFIG = "IKE_GATEWAY_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NSGATEWAY = "NSGATEWAY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DHCP_ALLOC_MESSAGE = "DHCP_ALLOC_MESSAGE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SUBNET_ENTRY = "SUBNET_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DSCP_FORWARDING_CLASS_TABLE = "DSCP_FORWARDING_CLASS_TABLE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_KEYSERVER_MONITOR_SEK = "KEYSERVER_MONITOR_SEK"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_PORT_VLAN_CONFIG_RESPONSE = "PORT_VLAN_CONFIG_RESPONSE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_EVENT_LOG = "EVENT_LOG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NETWORK_MACRO_GROUP = "NETWORK_MACRO_GROUP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_EXTERNAL_SERVICE = "EXTERNAL_SERVICE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VMWARE_VCENTER_EAM_CONFIG = "VMWARE_VCENTER_EAM_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ZONE_TEMPLATE = "ZONE_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VIRTUAL_IP = "VIRTUAL_IP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SYSTEM_CONFIG = "SYSTEM_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_STATIC_ROUTE_RESP = "STATIC_ROUTE_RESP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_HEALTH_REQ = "HEALTH_REQ"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SITE_REQ = "SITE_REQ"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_EGRESS_ACL_ENTRY = "EGRESS_ACL_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_LICENSE = "LICENSE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SHARED_RESOURCE = "SHARED_RESOURCE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VRS = "VRS"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_FLOATINGIP_ACL_ENTRY = "FLOATINGIP_ACL_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_MC_LIST = "MC_LIST"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_SECURITY_REQUEST = "GATEWAY_SECURITY_REQUEST"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NSPORT = "NSPORT"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_FLOATINGIP = "FLOATINGIP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INGRESS_ADV_FWD_ENTRY = "INGRESS_ADV_FWD_ENTRY"
CONST_MULTICAST_INHERITED = "INHERITED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VSD = "VSD"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DOMAIN_TEMPLATE = "DOMAIN_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_APPD_APPLICATION = "APPD_APPLICATION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_L2DOMAIN_SHARED = "L2DOMAIN_SHARED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_MONITORING_PORT = "MONITORING_PORT"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GEO_VM_EVENT = "GEO_VM_EVENT"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INGRESS_EXT_SERVICE_ENTRY = "INGRESS_EXT_SERVICE_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_CLOUD_MGMT_SYSTEM = "CLOUD_MGMT_SYSTEM"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_PATNATPOOL = "PATNATPOOL"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_VPORT_CONFIG = "GATEWAY_VPORT_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NS_REDUNDANT_PORT = "NS_REDUNDANT_PORT"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_STATSSERVER = "STATSSERVER"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NODE_EXECUTION_ERROR = "NODE_EXECUTION_ERROR"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INFRASTRUCTURE_GATEWAY_PROFILE = "INFRASTRUCTURE_GATEWAY_PROFILE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SUBNET = "SUBNET"
CONST_IP_TYPE_IPV4 = "IPV4"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_HOSTINTERFACE = "HOSTINTERFACE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_KEYSERVER_MONITOR_SEED = "KEYSERVER_MONITOR_SEED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_PORT = "PORT"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_EGRESS_ACL = "EGRESS_ACL"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_BOOTSTRAP = "BOOTSTRAP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SERVICE_GATEWAY_RESPONSE = "SERVICE_GATEWAY_RESPONSE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NSREDUNDANT_GW_GRP = "NSREDUNDANT_GW_GRP"
CONST_ENCRYPTION_INHERITED = "INHERITED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VPORTTAG = "VPORTTAG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ROUTING_POLICY = "ROUTING_POLICY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_RD_SEQUENCENO = "RD_SEQUENCENO"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GROUPKEY_ENCRYPTION_PROFILE = "GROUPKEY_ENCRYPTION_PROFILE"
CONST_PAT_ENABLED_DISABLED = "DISABLED"
CONST_ENCRYPTION_DISABLED = "DISABLED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SUBNET_MAC_ENTRY = "SUBNET_MAC_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DHCP_CONFIG_RESP = "DHCP_CONFIG_RESP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_IKE_SUBNET = "IKE_SUBNET"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_L2DOMAIN = "L2DOMAIN"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_APPD_FLOW_SECURITY_POLICY = "APPD_FLOW_SECURITY_POLICY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_JOB = "JOB"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_SERVICE_CONFIG_RESP = "GATEWAY_SERVICE_CONFIG_RESP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_CHILD_ENTITY_POLICY_CHANGE = "CHILD_ENTITY_POLICY_CHANGE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SITE = "SITE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_BGP_NEIGHBOR = "BGP_NEIGHBOR"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NETWORK_LAYOUT = "NETWORK_LAYOUT"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ADDRESS_RANGE_STATE = "ADDRESS_RANGE_STATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NETWORK_ELEMENT = "NETWORK_ELEMENT"
CONST_MAINTENANCE_MODE_DISABLED = "DISABLED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_PERMITTED_ACTION = "PERMITTED_ACTION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_CONFIG = "GATEWAY_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_STATS_POLICY = "STATS_POLICY"
CONST_MAINTENANCE_MODE_ENABLED_INHERITED = "ENABLED_INHERITED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SHAPING_POLICY = "SHAPING_POLICY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_SERVICES_GATEWAY_RESPONSE = "SERVICES_GATEWAY_RESPONSE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INGRESS_ACL_ENTRY = "INGRESS_ACL_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_BOOTSTRAP_ACTIVATION = "BOOTSTRAP_ACTIVATION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_STATS_TCA = "STATS_TCA"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_IKE_ENCRYPTION_PROFILE = "IKE_ENCRYPTION_PROFILE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_APPD_FLOW = "APPD_FLOW"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VPORT_TAG_BASE = "VPORT_TAG_BASE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_WAN_SERVICE = "WAN_SERVICE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ALARM = "ALARM"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NSGATEWAY_CONFIG = "NSGATEWAY_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_PERMISSION = "PERMISSION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VMWARE_RELOAD_CONFIG = "VMWARE_RELOAD_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY_VPORT_CONFIG_RESP = "GATEWAY_VPORT_CONFIG_RESP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NATMAPENTRY = "NATMAPENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_INGRESS_EXT_SERVICE = "INGRESS_EXT_SERVICE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_RESYNC = "RESYNC"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_DOMAIN_CONFIG = "DOMAIN_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_NSPORT_VLAN_CONFIG = "NSPORT_VLAN_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ENDPOINT = "ENDPOINT"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VPN_CONNECT = "VPN_CONNECT"
CONST_MULTICAST_ENABLED = "ENABLED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_LOCATION = "LOCATION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_BGP_PROFILE = "BGP_PROFILE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_PORT_MR = "PORT_MR"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_PORT_VLAN_CONFIG = "PORT_VLAN_CONFIG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_EGRESS_ACL_TEMPLATE_ENTRY = "EGRESS_ACL_TEMPLATE_ENTRY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_KEYSERVER_MONITOR_ENCRYPTED_SEED = "KEYSERVER_MONITOR_ENCRYPTED_SEED"
CONST_MAINTENANCE_MODE_ENABLED = "ENABLED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_STATISTICS = "STATISTICS"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_GATEWAY = "GATEWAY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_PATCONFIG_CONFIG_RESP = "PATCONFIG_CONFIG_RESP"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VNID_SEQUENCENO = "VNID_SEQUENCENO"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ENTERPRISE_SECURITY = "ENTERPRISE_SECURITY"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_ESI_SEQUENCENO = "ESI_SEQUENCENO"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VLAN_TEMPLATE = "VLAN_TEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_METADATA_TAG = "METADATA_TAG"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_UNSUPPORTED = "UNSUPPORTED"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_QOS_PRIMITIVE = "QOS_PRIMITIVE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_POLICY_DECISION = "POLICY_DECISION"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_VPORTTAGTEMPLATE = "VPORTTAGTEMPLATE"
CONST_ASSOCIATED_APPLICATION_OBJECT_TYPE_STATIC_ROUTE = "STATIC_ROUTE"
def __init__(self, **kwargs):
""" Initializes a Subnet instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> subnet = NUSubnet(id=u'xxxx-xxx-xxx-xxx', name=u'Subnet')
>>> subnet = NUSubnet(data=my_dict)
"""
super(NUSubnet, self).__init__()
# Read/Write Attributes
self._ip_type = None
self._pat_enabled = None
self._address = None
self._associated_application_id = None
self._associated_application_object_id = None
self._associated_application_object_type = None
self._associated_multicast_channel_map_id = None
self._associated_shared_network_resource_id = None
self._description = None
self._encryption = None
self._entity_scope = None
self._external_id = None
self._gateway = None
self._gateway_mac_address = None
self._last_updated_by = None
self._maintenance_mode = None
self._multicast = None
self._name = None
self._netmask = None
self._policy_group_id = None
self._proxy_arp = None
self._public = None
self._route_distinguisher = None
self._route_target = None
self._service_id = None
self._split_subnet = None
self._template_id = None
self._underlay = None
self._underlay_enabled = None
self._vn_id = None
self.expose_attribute(local_name="ip_type", remote_name="IPType", attribute_type=str, is_required=False, is_unique=False, choices=[u'IPV4', u'IPV6'])
self.expose_attribute(local_name="pat_enabled", remote_name="PATEnabled", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_application_id", remote_name="associatedApplicationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_application_object_id", remote_name="associatedApplicationObjectID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_application_object_type", remote_name="associatedApplicationObjectType", attribute_type=str, is_required=False, is_unique=False, choices=[u'ACLENTRY_LOCATION', u'ADDRESS_RANGE', u'ADDRESS_RANGE_STATE', u'ALARM', u'APPD_APPLICATION', u'APPD_EXTERNAL_APP_SERVICE', u'APPD_FLOW', u'APPD_FLOW_FORWARDING_POLICY', u'APPD_FLOW_SECURITY_POLICY', u'APPD_SERVICE', u'APPD_TIER', u'APPLICATION', u'AUTO_DISC_GATEWAY', u'BACK_HAUL_SERVICE_RESP', u'BGP_DAMPENING_MED_RESPONSE', u'BGP_NEIGHBOR', u'BGP_NEIGHBOR_MED_RESPONSE', u'BGP_PROFILE', u'BGP_PROFILE_MED_RESPONSE', u'BGPPEER', u'BOOTSTRAP', u'BOOTSTRAP_ACTIVATION', u'BRIDGEINTERFACE', u'CERTIFICATE', u'CHILD_ENTITY_POLICY_CHANGE', u'CLOUD_MGMT_SYSTEM', u'CUSTOMER_VRF_SEQUENCENO', u'DC_CONFIG', u'DHCP_ALLOC_MESSAGE', u'DHCP_CONFIG_RESP', u'DHCP_OPTION', u'DISKSTATS', u'DOMAIN', u'DOMAIN_CONFIG', u'DOMAIN_CONFIG_RESP', u'DOMAIN_FLOATING_IP_ACL_TEMPLATE', u'DOMAIN_FLOATING_IP_ACL_TEMPLATE_ENTRY', u'DOMAIN_TEMPLATE', u'DSCP_FORWARDING_CLASS_MAPPING', u'DSCP_FORWARDING_CLASS_TABLE', u'EGRESS_ACL', u'EGRESS_ACL_ENTRY', u'EGRESS_ACL_TEMPLATE', u'EGRESS_ACL_TEMPLATE_ENTRY', u'EGRESS_QOS_MR', u'EGRESS_QOS_PRIMITIVE', u'EGRESS_QOS_QUEUE_MR', u'ENDPOINT', u'ENTERPRISE', u'ENTERPRISE_CONFIG', u'ENTERPRISE_CONFIG_RESP', u'ENTERPRISE_NETWORK', u'ENTERPRISE_PERMISSION', u'ENTERPRISE_PROFILE', u'ENTERPRISE_SECURED_DATA', u'ENTERPRISE_SECURITY', u'ENTITY_METADATA_BINDING', u'ESI_SEQUENCENO', u'EVENT_LOG', u'EVPN_BGP_COMMUNITY_TAG_ENTRY', u'EVPN_BGP_COMMUNITY_TAG_SEQ_NO', u'EXPORTIMPORT', u'EXTERNAL_SERVICE', u'FLOATING_IP_ACL_TEMPLATE', u'FLOATING_IP_ACL_TEMPLATE_ENTRY', u'FLOATINGIP', u'FLOATINGIP_ACL', u'FLOATINGIP_ACL_ENTRY', u'GATEWAY', u'GATEWAY_CONFIG', u'GATEWAY_CONFIG_RESP', u'GATEWAY_SECURED_DATA', u'GATEWAY_SECURITY', u'GATEWAY_SECURITY_PROFILE_REQUEST', u'GATEWAY_SECURITY_PROFILE_RESPONSE', u'GATEWAY_SECURITY_REQUEST', u'GATEWAY_SECURITY_RESPONSE', u'GATEWAY_SERVICE_CONFIG', u'GATEWAY_SERVICE_CONFIG_RESP', u'GATEWAY_TEMPLATE', u'GATEWAY_VPORT_CONFIG', u'GATEWAY_VPORT_CONFIG_RESP', u'GEO_VM_EVENT', u'GEO_VM_REQ', u'GEO_VM_RES', u'GROUP', u'GROUPKEY_ENCRYPTION_PROFILE', u'HEALTH_REQ', u'HOSTINTERFACE', u'HSC', u'IKE_CERTIFICATE', u'IKE_ENCRYPTION_PROFILE', u'IKE_GATEWAY', u'IKE_GATEWAY_CONFIG', u'IKE_GATEWAY_CONNECTION', u'IKE_GATEWAY_PROFILE', u'IKE_PSK', u'IKE_SUBNET', u'INFRASTRUCTURE_CONFIG', u'INFRASTRUCTURE_GATEWAY_PROFILE', u'INFRASTRUCTURE_PORT_PROFILE', u'INFRASTRUCTURE_VSC_PROFILE', u'INGRESS_ACL', u'INGRESS_ACL_ENTRY', u'INGRESS_ACL_TEMPLATE', u'INGRESS_ACL_TEMPLATE_ENTRY', u'INGRESS_ADV_FWD', u'INGRESS_ADV_FWD_ENTRY', u'INGRESS_ADV_FWD_TEMPLATE', u'INGRESS_ADV_FWD_TEMPLATE_ENTRY', u'INGRESS_EXT_SERVICE', u'INGRESS_EXT_SERVICE_ENTRY', u'INGRESS_EXT_SERVICE_TEMPLATE', u'INGRESS_EXT_SERVICE_TEMPLATE_ENTRY', u'IP_BINDING', u'JOB', u'KEYSERVER_MEMBER', u'KEYSERVER_MONITOR', u'KEYSERVER_MONITOR_ENCRYPTED_SEED', u'KEYSERVER_MONITOR_SEED', u'KEYSERVER_MONITOR_SEK', u'KEYSERVER_NOTIFICATION', u'L2DOMAIN', u'L2DOMAIN_SHARED', u'L2DOMAIN_TEMPLATE', u'LDAP_CONFIG', u'LIBVIRT_INTERFACE', u'LICENSE', u'LOCATION', u'MC_CHANNEL_MAP', u'MC_LIST', u'MC_RANGE', u'METADATA', u'METADATA_TAG', u'MIRROR_DESTINATION', u'MONITORING_PORT', u'MULTI_NIC_VPORT', u'NATMAPENTRY', u'NETWORK_ELEMENT', u'NETWORK_LAYOUT', u'NETWORK_MACRO_GROUP', u'NETWORK_POLICY_GROUP', u'NEXT_HOP_RESP', u'NODE_EXECUTION_ERROR', u'NS_REDUNDANT_PORT', u'NSG_NOTIFICATION', u'NSGATEWAY', u'NSGATEWAY_CONFIG', u'NSGATEWAY_TEMPLATE', u'NSPORT', u'NSPORT_STATIC_CONFIG', u'NSPORT_TEMPLATE', u'NSPORT_VLAN_CONFIG', u'NSREDUNDANT_GW_GRP', u'PATCONFIG_CONFIG_RESP', u'PATNATPOOL', u'PERMISSION', u'PERMITTED_ACTION', u'POLICING_POLICY', u'POLICY_DECISION', u'POLICY_GROUP', u'POLICY_GROUP_TEMPLATE', u'PORT', u'PORT_MR', u'PORT_PUSH', u'PORT_TEMPLATE', u'PORT_VLAN_CONFIG', u'PORT_VLAN_CONFIG_RESPONSE', u'PUBLIC_NETWORK', u'QOS_PRIMITIVE', u'RATE_LIMITER', u'RD_SEQUENCENO', u'REDUNDANT_GW_GRP', u'RESYNC', u'ROUTING_POL_MED_RESPONSE', u'ROUTING_POLICY', u'RTRD_ENTITY', u'RTRD_SEQUENCENO', u'SERVICE_GATEWAY_RESPONSE', u'SERVICE_VRF_SEQUENCENO', u'SERVICES_GATEWAY_RESPONSE', u'SHAPING_POLICY', u'SHARED_RESOURCE', u'SITE', u'SITE_REQ', u'SITE_RES', u'STATIC_ROUTE', u'STATIC_ROUTE_RESP', u'STATISTICS', u'STATS_COLLECTOR', u'STATS_POLICY', u'STATS_TCA', u'STATSSERVER', u'SUBNET', u'SUBNET_ENTRY', u'SUBNET_MAC_ENTRY', u'SUBNET_POOL_ENTRY', u'SUBNET_TEMPLATE', u'SYSTEM_CONFIG', u'SYSTEM_CONFIG_REQ', u'SYSTEM_CONFIG_RESP', u'SYSTEM_MONITORING', u'UNSUPPORTED', u'UPLINK_RD', u'USER', u'VIRTUAL_IP', u'VIRTUAL_MACHINE', u'VIRTUAL_MACHINE_REPORT', u'VLAN', u'VLAN_CONFIG_RESPONSE', u'VLAN_TEMPLATE', u'VM_DESCRIPTION', u'VM_INTERFACE', u'VMWARE_RELOAD_CONFIG', u'VMWARE_VCENTER', u'VMWARE_VCENTER_CLUSTER', u'VMWARE_VCENTER_DATACENTER', u'VMWARE_VCENTER_EAM_CONFIG', u'VMWARE_VCENTER_HYPERVISOR', u'VMWARE_VCENTER_VRS_BASE_CONFIG', u'VMWARE_VCENTER_VRS_CONFIG', u'VMWARE_VRS_ADDRESS_RANGE', u'VNID_SEQUENCENO', u'VPN_CONNECT', u'VPORT', u'VPORT_GATEWAY_RESPONSE', u'VPORT_MEDIATION_REQUEST', u'VPORT_MIRROR', u'VPORT_TAG_BASE', u'VPORTTAG', u'VPORTTAGTEMPLATE', u'VPRN_LABEL_SEQUENCENO', u'VRS', u'VSC', u'VSD', u'VSD_COMPONENT', u'VSG_REDUNDANT_PORT', u'VSP', u'WAN_SERVICE', u'ZONE', u'ZONE_TEMPLATE'])
self.expose_attribute(local_name="associated_multicast_channel_map_id", remote_name="associatedMulticastChannelMapID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_shared_network_resource_id", remote_name="associatedSharedNetworkResourceID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="encryption", remote_name="encryption", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="gateway", remote_name="gateway", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_mac_address", remote_name="gatewayMACAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="maintenance_mode", remote_name="maintenanceMode", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'ENABLED_INHERITED'])
self.expose_attribute(local_name="multicast", remote_name="multicast", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="netmask", remote_name="netmask", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="policy_group_id", remote_name="policyGroupID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="proxy_arp", remote_name="proxyARP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="public", remote_name="public", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="route_distinguisher", remote_name="routeDistinguisher", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="route_target", remote_name="routeTarget", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="service_id", remote_name="serviceID", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="split_subnet", remote_name="splitSubnet", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="template_id", remote_name="templateID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="underlay", remote_name="underlay", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="underlay_enabled", remote_name="underlayEnabled", attribute_type=str, is_required=False, is_unique=False, choices=[u'DISABLED', u'ENABLED', u'INHERITED'])
self.expose_attribute(local_name="vn_id", remote_name="vnId", attribute_type=int, is_required=False, is_unique=False)
# Fetchers
self.address_ranges = NUAddressRangesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.dhcp_options = NUDHCPOptionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ike_gateway_connections = NUIKEGatewayConnectionsFetcher.fetcher_with_object(parent_object=self, relationship="member")
self.ip_reservations = NUIPReservationsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.qoss = NUQOSsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vm_resyncs = NUVMResyncsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.statistics = NUStatisticsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.statistics_policies = NUStatisticsPoliciesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.tcas = NUTCAsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.virtual_ips = NUVirtualIPsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vms = NUVMsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vm_interfaces = NUVMInterfacesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vports = NUVPortsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def ip_type(self):
""" Get ip_type value.
Notes:
IPv4 or IPv6
This attribute is named `IPType` in VSD API.
"""
return self._ip_type
@ip_type.setter
def ip_type(self, value):
""" Set ip_type value.
Notes:
IPv4 or IPv6
This attribute is named `IPType` in VSD API.
"""
self._ip_type = value
@property
def pat_enabled(self):
""" Get pat_enabled value.
Notes:
This attribute is named `PATEnabled` in VSD API.
"""
return self._pat_enabled
@pat_enabled.setter
def pat_enabled(self, value):
""" Set pat_enabled value.
Notes:
This attribute is named `PATEnabled` in VSD API.
"""
self._pat_enabled = value
@property
def address(self):
""" Get address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
"""
return self._address
@address.setter
def address(self, value):
""" Set address value.
Notes:
IP address of the subnet defined. In case of zone, this is an optional field for and allows users to allocate an IP address range to a zone. The VSD will auto-assign IP addresses to subnets from this range if a specific IP address is not defined for the subnet
"""
self._address = value
@property
def associated_application_id(self):
""" Get associated_application_id value.
Notes:
The associated application ID.
This attribute is named `associatedApplicationID` in VSD API.
"""
return self._associated_application_id
@associated_application_id.setter
def associated_application_id(self, value):
""" Set associated_application_id value.
Notes:
The associated application ID.
This attribute is named `associatedApplicationID` in VSD API.
"""
self._associated_application_id = value
@property
def associated_application_object_id(self):
""" Get associated_application_object_id value.
Notes:
The associated application object ID.
This attribute is named `associatedApplicationObjectID` in VSD API.
"""
return self._associated_application_object_id
@associated_application_object_id.setter
def associated_application_object_id(self, value):
""" Set associated_application_object_id value.
Notes:
The associated application object ID.
This attribute is named `associatedApplicationObjectID` in VSD API.
"""
self._associated_application_object_id = value
@property
def associated_application_object_type(self):
""" Get associated_application_object_type value.
Notes:
The associated application object type. Refer to API section for supported types.
This attribute is named `associatedApplicationObjectType` in VSD API.
"""
return self._associated_application_object_type
@associated_application_object_type.setter
def associated_application_object_type(self, value):
""" Set associated_application_object_type value.
Notes:
The associated application object type. Refer to API section for supported types.
This attribute is named `associatedApplicationObjectType` in VSD API.
"""
self._associated_application_object_type = value
@property
def associated_multicast_channel_map_id(self):
""" Get associated_multicast_channel_map_id value.
Notes:
The ID of the Multi Cast Channel Map this Subnet/Subnet Template is associated with. This has to be set when enableMultiCast is set to ENABLED
This attribute is named `associatedMulticastChannelMapID` in VSD API.
"""
return self._associated_multicast_channel_map_id
@associated_multicast_channel_map_id.setter
def associated_multicast_channel_map_id(self, value):
""" Set associated_multicast_channel_map_id value.
Notes:
The ID of the Multi Cast Channel Map this Subnet/Subnet Template is associated with. This has to be set when enableMultiCast is set to ENABLED
This attribute is named `associatedMulticastChannelMapID` in VSD API.
"""
self._associated_multicast_channel_map_id = value
@property
def associated_shared_network_resource_id(self):
""" Get associated_shared_network_resource_id value.
Notes:
The ID of public subnet that is associated with this subnet
This attribute is named `associatedSharedNetworkResourceID` in VSD API.
"""
return self._associated_shared_network_resource_id
@associated_shared_network_resource_id.setter
def associated_shared_network_resource_id(self, value):
""" Set associated_shared_network_resource_id value.
Notes:
The ID of public subnet that is associated with this subnet
This attribute is named `associatedSharedNetworkResourceID` in VSD API.
"""
self._associated_shared_network_resource_id = value
@property
def description(self):
""" Get description value.
Notes:
A description field provided by the user that identifies the subnet
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description field provided by the user that identifies the subnet
"""
self._description = value
@property
def encryption(self):
""" Get encryption value.
Notes:
Determines whether or not IPSEC is enabled.
"""
return self._encryption
@encryption.setter
def encryption(self, value):
""" Set encryption value.
Notes:
Determines whether or not IPSEC is enabled.
"""
self._encryption = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def gateway(self):
""" Get gateway value.
Notes:
The IP address of the gateway of this subnet
"""
return self._gateway
@gateway.setter
def gateway(self, value):
""" Set gateway value.
Notes:
The IP address of the gateway of this subnet
"""
self._gateway = value
@property
def gateway_mac_address(self):
""" Get gateway_mac_address value.
Notes:
This attribute is named `gatewayMACAddress` in VSD API.
"""
return self._gateway_mac_address
@gateway_mac_address.setter
def gateway_mac_address(self, value):
""" Set gateway_mac_address value.
Notes:
This attribute is named `gatewayMACAddress` in VSD API.
"""
self._gateway_mac_address = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def maintenance_mode(self):
""" Get maintenance_mode value.
Notes:
maintenanceMode is an enum that indicates if the SubNetwork is accepting VM activation requests.
This attribute is named `maintenanceMode` in VSD API.
"""
return self._maintenance_mode
@maintenance_mode.setter
def maintenance_mode(self, value):
""" Set maintenance_mode value.
Notes:
maintenanceMode is an enum that indicates if the SubNetwork is accepting VM activation requests.
This attribute is named `maintenanceMode` in VSD API.
"""
self._maintenance_mode = value
@property
def multicast(self):
""" Get multicast value.
Notes:
multicast is enum that indicates multicast policy on Subnet/Subnet Template.
"""
return self._multicast
@multicast.setter
def multicast(self, value):
""" Set multicast value.
Notes:
multicast is enum that indicates multicast policy on Subnet/Subnet Template.
"""
self._multicast = value
@property
def name(self):
""" Get name value.
Notes:
Name of the current entity(Zone or zone template or subnet etc..) Valid characters are alphabets, numbers, space and hyphen( - ).
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the current entity(Zone or zone template or subnet etc..) Valid characters are alphabets, numbers, space and hyphen( - ).
"""
self._name = value
@property
def netmask(self):
""" Get netmask value.
Notes:
Netmask of the subnet defined
"""
return self._netmask
@netmask.setter
def netmask(self, value):
""" Set netmask value.
Notes:
Netmask of the subnet defined
"""
self._netmask = value
@property
def policy_group_id(self):
""" Get policy_group_id value.
Notes:
PG ID for the subnet. This is unique per domain and will be in the range 1-4095
This attribute is named `policyGroupID` in VSD API.
"""
return self._policy_group_id
@policy_group_id.setter
def policy_group_id(self, value):
""" Set policy_group_id value.
Notes:
PG ID for the subnet. This is unique per domain and will be in the range 1-4095
This attribute is named `policyGroupID` in VSD API.
"""
self._policy_group_id = value
@property
def proxy_arp(self):
""" Get proxy_arp value.
Notes:
when set VRS will act as ARP Proxy
This attribute is named `proxyARP` in VSD API.
"""
return self._proxy_arp
@proxy_arp.setter
def proxy_arp(self, value):
""" Set proxy_arp value.
Notes:
when set VRS will act as ARP Proxy
This attribute is named `proxyARP` in VSD API.
"""
self._proxy_arp = value
@property
def public(self):
""" Get public value.
Notes:
when set to true means public subnet under a public zone
"""
return self._public
@public.setter
def public(self, value):
""" Set public value.
Notes:
when set to true means public subnet under a public zone
"""
self._public = value
@property
def route_distinguisher(self):
""" Get route_distinguisher value.
Notes:
The Route Distinguisher value assigned by VSD for this subnet that is used by the BGP-EVPN protocol in VSC
This attribute is named `routeDistinguisher` in VSD API.
"""
return self._route_distinguisher
@route_distinguisher.setter
def route_distinguisher(self, value):
""" Set route_distinguisher value.
Notes:
The Route Distinguisher value assigned by VSD for this subnet that is used by the BGP-EVPN protocol in VSC
This attribute is named `routeDistinguisher` in VSD API.
"""
self._route_distinguisher = value
@property
def route_target(self):
""" Get route_target value.
Notes:
The Route Target value assigned by VSD for this subnet that is used by the BGP-EVPN protocol in VSC
This attribute is named `routeTarget` in VSD API.
"""
return self._route_target
@route_target.setter
def route_target(self, value):
""" Set route_target value.
Notes:
The Route Target value assigned by VSD for this subnet that is used by the BGP-EVPN protocol in VSC
This attribute is named `routeTarget` in VSD API.
"""
self._route_target = value
@property
def service_id(self):
""" Get service_id value.
Notes:
The service ID used by the VSCs to identify this subnet
This attribute is named `serviceID` in VSD API.
"""
return self._service_id
@service_id.setter
def service_id(self, value):
""" Set service_id value.
Notes:
The service ID used by the VSCs to identify this subnet
This attribute is named `serviceID` in VSD API.
"""
self._service_id = value
@property
def split_subnet(self):
""" Get split_subnet value.
Notes:
Need to add correct description
This attribute is named `splitSubnet` in VSD API.
"""
return self._split_subnet
@split_subnet.setter
def split_subnet(self, value):
""" Set split_subnet value.
Notes:
Need to add correct description
This attribute is named `splitSubnet` in VSD API.
"""
self._split_subnet = value
@property
def template_id(self):
""" Get template_id value.
Notes:
The ID of the subnet template that this subnet object was derived from
This attribute is named `templateID` in VSD API.
"""
return self._template_id
@template_id.setter
def template_id(self, value):
""" Set template_id value.
Notes:
The ID of the subnet template that this subnet object was derived from
This attribute is named `templateID` in VSD API.
"""
self._template_id = value
@property
def underlay(self):
""" Get underlay value.
Notes:
Boolean flag to indicate whether underlay is enabled directly or indirectly
"""
return self._underlay
@underlay.setter
def underlay(self, value):
""" Set underlay value.
Notes:
Boolean flag to indicate whether underlay is enabled directly or indirectly
"""
self._underlay = value
@property
def underlay_enabled(self):
""" Get underlay_enabled value.
Notes:
Indicates whether UNDERLAY is enabled for the subnets in this domain
This attribute is named `underlayEnabled` in VSD API.
"""
return self._underlay_enabled
@underlay_enabled.setter
def underlay_enabled(self, value):
""" Set underlay_enabled value.
Notes:
Indicates whether UNDERLAY is enabled for the subnets in this domain
This attribute is named `underlayEnabled` in VSD API.
"""
self._underlay_enabled = value
@property
def vn_id(self):
""" Get vn_id value.
Notes:
Current Network's globally unique VXLAN network identifier generated by VSD
This attribute is named `vnId` in VSD API.
"""
return self._vn_id
@vn_id.setter
def vn_id(self, value):
""" Set vn_id value.
Notes:
Current Network's globally unique VXLAN network identifier generated by VSD
This attribute is named `vnId` in VSD API.
"""
self._vn_id = value
## Custom methods
def is_template(self):
""" Verify that the object is a template
Returns:
(bool): True if the object is a template
"""
return False
def is_from_template(self):
""" Verify if the object has been instantiated from a template
Note:
The object has to be fetched. Otherwise, it does not
have information from its parent
Returns:
(bool): True if the object is a template
"""
return self.template_id
| 38.02458 | 5,302 | 0.70675 |
a138b7e7930e8f6d049045d2731de939b653bb33 | 10,849 | py | Python | src/gifmaze/gifmaze/algorithms.py | wroldwiedbwe/pywonderland | f9eceba86cc79f476fe99d3dcbe40906a3273713 | [
"MIT"
] | 4,390 | 2016-11-15T17:41:45.000Z | 2022-03-30T09:40:31.000Z | src/gifmaze/gifmaze/algorithms.py | wroldwiedbwe/pywonderland | f9eceba86cc79f476fe99d3dcbe40906a3273713 | [
"MIT"
] | 19 | 2016-11-28T11:03:10.000Z | 2021-12-08T16:55:23.000Z | src/gifmaze/gifmaze/algorithms.py | wroldwiedbwe/pywonderland | f9eceba86cc79f476fe99d3dcbe40906a3273713 | [
"MIT"
] | 468 | 2016-11-20T22:03:21.000Z | 2022-01-28T02:26:34.000Z | # -*- coding: utf-8 -*-
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Maze generation and maze solving algorithms
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the maze generation and maze solving algorithms.
Each algorithm is implemented as a generator function which runs on
a `maze` instance and calls an `encode_func` function to yield the data.
All algorithms have a similar interface:
algo(maze, encode_func, speed, **kwargs)
Here `maze` is the Maze object that the algorithm runs on,
`encode_func` is the function that encodes the animation into frames,
`speed` controls how often `encode_func` is called.
"""
import heapq
import random
from collections import deque
from operator import itemgetter
from tqdm import tqdm
from .gifmaze import Maze
def wilson(maze, encode_func, speed=50, root=(0, 0)):
"""
Maze generation using Wilson's uniform spanning tree algorithm.
"""
bar = tqdm(total=len(maze.cells) - 1, desc="Running Wilson's algorithm")
def add_to_path(path, cell):
"""
Add a cell to the path of current random walk.
Note `path` is modified inside this function.
"""
maze.mark_cell(cell, Maze.PATH)
maze.mark_space(path[-1], cell, Maze.PATH)
path.append(cell)
def erase_loop(path, cell):
"""
When a cell is visited twice then a loop is created, erase it.
Note this function returns a new version of the path.
"""
index = path.index(cell)
# erase the loop
maze.mark_path(path[index:], Maze.WALL)
maze.mark_cell(path[index], Maze.PATH)
return path[: index + 1]
# initially the tree contains only the root.
maze.mark_cell(root, Maze.TREE)
# for each cell that is not in the tree,
# start a loop erased random walk from this cell until the walk hits the tree.
for cell in maze.cells:
if not maze.in_tree(cell):
# a list that holds the path of the loop erased random walk.
lerw = [cell]
maze.mark_cell(cell, Maze.PATH)
current_cell = cell
while not maze.in_tree(current_cell):
next_cell = random.choice(maze.get_neighbors(current_cell))
# if it's already in the path then a loop is found.
if maze.in_path(next_cell):
lerw = erase_loop(lerw, next_cell)
# if the walk hits the tree then finish the walk.
elif maze.in_tree(next_cell):
add_to_path(lerw, next_cell)
# `add_to_path` will change the cell to `PATH` so we need to reset it.
maze.mark_cell(next_cell, Maze.TREE)
# continue the walk from this new cell.
else:
add_to_path(lerw, next_cell)
current_cell = next_cell
if maze.num_changes >= speed:
yield encode_func(maze)
# once the walk hits the tree then add its path to the tree.
maze.mark_path(lerw, Maze.TREE)
bar.update(len(lerw) - 1)
if maze.num_changes > 0:
yield encode_func(maze)
bar.close()
def bfs(maze, encode_func, speed=20, start=None, end=None):
"""
Solve a maze using breadth-first search.
The cells are marked by their distances to the starting cell plus three.
This is because we must distinguish a 'flooded' cell from walls and tree.
"""
if start is None:
start = (0, 0)
if end is None:
end = (maze.width - 1, maze.height - 1)
bar = tqdm(total=len(maze.cells) - 1, desc="Solving maze by bfs")
init_dist = 3
came_from = {start: start}
queue = deque([(start, init_dist)])
maze.mark_cell(start, init_dist)
visited = set([start])
while len(queue) > 0:
child, dist = queue.popleft()
parent = came_from[child]
maze.mark_cell(child, dist)
maze.mark_space(parent, child, dist)
bar.update(1)
for next_cell in maze.get_neighbors(child):
if (next_cell not in visited) and (not maze.barrier(child, next_cell)):
came_from[next_cell] = child
queue.append((next_cell, dist + 1))
visited.add(next_cell)
if maze.num_changes >= speed:
yield encode_func(maze)
if maze.num_changes > 0:
yield encode_func(maze)
# retrieve the path
path = [end]
v = end
while v != start:
v = came_from[v]
path.append(v)
maze.mark_path(path, Maze.PATH)
# show the path
yield encode_func(maze)
bar.close()
def random_dfs(maze, encode_func, speed=10, start=(0, 0)):
"""
Maze generation using random depth-first search.
"""
bar = tqdm(total=len(maze.cells) - 1, desc="Running random depth first search")
stack = [(start, v) for v in maze.get_neighbors(start)]
maze.mark_cell(start, Maze.TREE)
while len(stack) > 0:
parent, child = stack.pop()
if maze.in_tree(child):
continue
maze.mark_cell(child, Maze.TREE)
maze.mark_space(parent, child, Maze.TREE)
bar.update(1)
neighbors = maze.get_neighbors(child)
random.shuffle(neighbors)
for v in neighbors:
stack.append((child, v))
if maze.num_changes >= speed:
yield encode_func(maze)
if maze.num_changes > 0:
yield encode_func(maze)
bar.close()
def dfs(maze, encode_func, speed=20, start=None, end=None):
"""
Solve a maze using depth-first search.
"""
if start is None:
start = (0, 0)
if end is None:
end = (maze.width - 1, maze.height - 1)
bar = tqdm(total=len(maze.cells) - 1, desc="Running dfs search.")
came_from = {start: start} # a dict to remember each step.
stack = [start]
maze.mark_cell(start, Maze.FILL)
visited = set([start])
while len(stack) > 0:
child = stack.pop()
if child == end:
break
parent = came_from[child]
maze.mark_cell(child, Maze.FILL)
maze.mark_space(parent, child, Maze.FILL)
bar.update(1)
for next_cell in maze.get_neighbors(child):
if (next_cell not in visited) and (not maze.barrier(child, next_cell)):
came_from[next_cell] = child
stack.append(next_cell)
visited.add(next_cell)
if maze.num_changes >= speed:
yield encode_func(maze)
if maze.num_changes > 0:
yield encode_func(maze)
# retrieve the path
path = [end]
v = end
while v != start:
v = came_from[v]
path.append(v)
maze.mark_path(path, Maze.PATH)
yield encode_func(maze)
bar.close()
def prim(maze, encode_func, speed=30, start=(0, 0)):
"""
Maze generation using Prim's algorithm.
"""
bar = tqdm(total=len(maze.cells) - 1, desc="Running Prim's algorithm")
queue = [(random.random(), start, v) for v in maze.get_neighbors(start)]
maze.mark_cell(start, Maze.TREE)
while len(queue) > 0:
_, parent, child = heapq.heappop(queue)
if maze.in_tree(child):
continue
maze.mark_cell(child, Maze.TREE)
maze.mark_space(parent, child, Maze.TREE)
bar.update(1)
for v in maze.get_neighbors(child):
# assign a weight to this edge only when it's needed.
weight = random.random()
heapq.heappush(queue, (weight, child, v))
if maze.num_changes >= speed:
yield encode_func(maze)
if maze.num_changes > 0:
yield encode_func(maze)
bar.close()
def kruskal(maze, encode_func, speed=30):
"""
Maze generation using Kruskal's algorithm.
"""
bar = tqdm(total=len(maze.cells) - 1, desc="Running Kruskal's algorithm")
parent = {v: v for v in maze.cells}
rank = {v: 0 for v in maze.cells}
edges = [
(random.random(), u, v)
for u in maze.cells
for v in maze.get_neighbors(u)
if u < v
]
def find(v):
"""find the root of the subtree that v belongs to."""
while parent[v] != v:
v = parent[v]
return v
def union(u, v):
root1 = find(u)
root2 = find(v)
if root1 != root2:
if rank[root1] > rank[root2]:
parent[root2] = root1
elif rank[root1] < rank[root2]:
parent[root1] = root2
else:
parent[root1] = root2
rank[root2] += 1
for _, u, v in sorted(edges, key=itemgetter(0)):
if find(u) != find(v):
union(u, v)
maze.mark_cell(u, Maze.TREE)
maze.mark_cell(v, Maze.TREE)
maze.mark_space(u, v, Maze.TREE)
bar.update(1)
if maze.num_changes >= speed:
yield encode_func(maze)
if maze.num_changes > 0:
yield encode_func(maze)
bar.close()
def astar(maze, encode_func, speed, start=None, end=None):
"""
Solving a maze by A* search.
"""
if start is None:
start = (0, 0)
if end is None:
end = (maze.width - 1, maze.height - 1)
def manhattan(cellA, cellB):
"""
The Manhattan distance between two cells.
"""
xA, yA = cellA
xB, yB = cellB
return abs(xA - xB) + abs(yA - yB)
def euclidean(cellA, cellB):
"""
The Euclidean distance between two cells.
"""
xA, yA = cellA
xB, yB = cellB
return ((xA - yA) * (xA - yA) + (xB - yB) * (xB - yB)) ** 0.5
bar = tqdm(total=len(maze.cells) - 1, desc="Solving maze by A*")
came_from = {start: start}
visited = {start}
queue = [(0, start)]
heapq.heapify(queue)
maze.mark_cell(start, Maze.FILL)
while len(queue) > 0:
dist, current = heapq.heappop(queue)
if current == end:
break
parent = came_from[current]
maze.mark_cell(current, Maze.FILL)
maze.mark_space(parent, current, Maze.FILL)
bar.update(1)
for child in maze.get_neighbors(current):
if (child not in visited) and (not maze.barrier(current, child)):
came_from[child] = current
dist = 0.2 * euclidean(child, start) + 0.8 * manhattan(child, end)
heapq.heappush(queue, (dist, child))
visited.add(child)
if maze.num_changes >= speed:
yield encode_func(maze)
if maze.num_changes > 0:
yield encode_func(maze)
# retrieve the path
path = [end]
v = end
while v != start:
v = came_from[v]
path.append(v)
maze.mark_path(path, Maze.PATH)
yield encode_func(maze)
bar.close()
| 28.701058 | 90 | 0.574154 |
39c8f27d855a6bbfc2f36896de9217e9b29f551b | 392 | py | Python | new_pro/shorten/migrations/0004_bitly_user.py | Lavkushmani/URL_SHORTNER | dd97a6b1c63010690b0d06a8c55607a4e1d7865f | [
"MIT"
] | 1 | 2020-05-24T03:40:10.000Z | 2020-05-24T03:40:10.000Z | new_pro/shorten/migrations/0004_bitly_user.py | Lavkushmani/URL_SHORTNER | dd97a6b1c63010690b0d06a8c55607a4e1d7865f | [
"MIT"
] | null | null | null | new_pro/shorten/migrations/0004_bitly_user.py | Lavkushmani/URL_SHORTNER | dd97a6b1c63010690b0d06a8c55607a4e1d7865f | [
"MIT"
] | null | null | null | # Generated by Django 2.2.2 on 2019-06-30 08:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shorten', '0003_auto_20190626_1627'),
]
operations = [
migrations.AddField(
model_name='bitly',
name='user',
field=models.CharField(max_length=15, null=True),
),
]
| 20.631579 | 61 | 0.596939 |
2a003ef00da350111713f7cdc82be3c853d954af | 50 | py | Python | pyAssignment/Readers/__init__.py | CD3/pyAssignment | bf618457ff10542b1c1f334c89f48f1de72da32b | [
"MIT"
] | 1 | 2020-03-21T15:50:54.000Z | 2020-03-21T15:50:54.000Z | pyAssignment/Readers/__init__.py | CD3/pyAssignment | bf618457ff10542b1c1f334c89f48f1de72da32b | [
"MIT"
] | 22 | 2018-03-24T15:04:35.000Z | 2022-01-14T20:55:09.000Z | pyAssignment/Readers/__init__.py | CD3/pyAssignment | bf618457ff10542b1c1f334c89f48f1de72da32b | [
"MIT"
] | null | null | null | from .ReaderBase import *
from .Markdown import *
| 16.666667 | 25 | 0.76 |
0349eba703c514146234be496d5cdcb7e912a302 | 4,522 | py | Python | core/models/CornerNet_Squeeze.py | ybai62868/CornerNet-Lite | cad0fb248be1da38451042ff6c5b9979e67a0729 | [
"BSD-3-Clause"
] | 2 | 2019-12-10T02:11:32.000Z | 2019-12-13T14:26:14.000Z | core/models/CornerNet_Squeeze.py | ybai62868/CornerNet-Lite | cad0fb248be1da38451042ff6c5b9979e67a0729 | [
"BSD-3-Clause"
] | null | null | null | core/models/CornerNet_Squeeze.py | ybai62868/CornerNet-Lite | cad0fb248be1da38451042ff6c5b9979e67a0729 | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn as nn
from .py_utils import TopPool, BottomPool, LeftPool, RightPool # corner pooling
from .py_utils.utils import convolution, corner_pool, residual
from .py_utils.losses import CornerNet_Loss
from .py_utils.modules import hg_module, hg, hg_net
class fire_module(nn.Module):
def __init__(self, inp_dim, out_dim, sr=2, stride=1):
super(fire_module, self).__init__()
self.conv1 = nn.Conv2d(inp_dim, out_dim // sr, kernel_size=1, stride=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_dim // sr)
self.conv_1x1 = nn.Conv2d(out_dim // sr, out_dim // 2, kernel_size=1, stride=stride, bias=False)
self.conv_3x3 = nn.Conv2d(out_dim // sr, out_dim // 2, kernel_size=3, padding=1,
stride=stride, groups=out_dim // sr, bias=False)
self.bn2 = nn.BatchNorm2d(out_dim)
self.skip = (stride == 1 and inp_dim == out_dim)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv1 = self.conv1(x)
bn1 = self.bn1(conv1)
conv2 = torch.cat((self.conv_1x1(bn1), self.conv_3x3(bn1)), 1)
bn2 = self.bn2(conv2)
if self.skip:
return self.relu(bn2 + x)
else:
return self.relu(bn2)
def make_pool_layer(dim):
return nn.Sequential()
def make_unpool_layer(dim):
return nn.ConvTranspose2d(dim, dim, kernel_size=4, stride=2, padding=1)
def make_layer(inp_dim, out_dim, modules):
layers = [fire_module(inp_dim, out_dim)]
layers += [fire_module(out_dim, out_dim) for _ in range(1, modules)]
return nn.Sequential(*layers)
def make_layer_revr(inp_dim, out_dim, modules):
layers = [fire_module(inp_dim, inp_dim) for _ in range(modules - 1)]
layers += [fire_module(inp_dim, out_dim)]
return nn.Sequential(*layers)
def make_hg_layer(inp_dim, out_dim, modules):
layers = [fire_module(inp_dim, out_dim, stride=2)]
layers += [fire_module(out_dim, out_dim) for _ in range(1, modules)]
return nn.Sequential(*layers)
class model(hg_net):
def _pred_mod(self, dim):
return nn.Sequential(
convolution(1, 256, 256, with_bn=False),
nn.Conv2d(256, dim, (1, 1))
)
def _merge_mod(self):
return nn.Sequential(
nn.Conv2d(256, 256, (1, 1), bias=False),
nn.BatchNorm2d(256)
)
def __init__(self):
stacks = 2
pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(128, 256, stride=2),
residual(256, 256, stride=2)
)
hg_mods = nn.ModuleList([
hg_module(
4, [256, 256, 384, 384, 512], [2, 2, 2, 2, 4],
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_up_layer=make_layer,
make_low_layer=make_layer,
make_hg_layer_revr=make_layer_revr,
make_hg_layer=make_hg_layer
) for _ in range(stacks)
])
cnvs = nn.ModuleList([convolution(3, 256, 256) for _ in range(stacks)])
inters = nn.ModuleList([residual(256, 256) for _ in range(stacks - 1)])
cnvs_ = nn.ModuleList([self._merge_mod() for _ in range(stacks - 1)])
inters_ = nn.ModuleList([self._merge_mod() for _ in range(stacks - 1)])
hgs = hg(pre, hg_mods, cnvs, inters, cnvs_, inters_)
tl_modules = nn.ModuleList([corner_pool(256, TopPool, LeftPool) for _ in range(stacks)])
br_modules = nn.ModuleList([corner_pool(256, BottomPool, RightPool) for _ in range(stacks)])
tl_heats = nn.ModuleList([self._pred_mod(80) for _ in range(stacks)])
br_heats = nn.ModuleList([self._pred_mod(80) for _ in range(stacks)])
for tl_heat, br_heat in zip(tl_heats, br_heats):
torch.nn.init.constant_(tl_heat[-1].bias, -2.19)
torch.nn.init.constant_(br_heat[-1].bias, -2.19)
tl_tags = nn.ModuleList([self._pred_mod(1) for _ in range(stacks)])
br_tags = nn.ModuleList([self._pred_mod(1) for _ in range(stacks)])
tl_offs = nn.ModuleList([self._pred_mod(2) for _ in range(stacks)])
br_offs = nn.ModuleList([self._pred_mod(2) for _ in range(stacks)])
super(model, self).__init__(
hgs, tl_modules, br_modules, tl_heats, br_heats,
tl_tags, br_tags, tl_offs, br_offs
)
self.loss = CornerNet_Loss(pull_weight=1e-1, push_weight=1e-1)
| 40.375 | 104 | 0.618974 |
860d57e4aafc5464f26a2c202f689ffb07f12c30 | 13,171 | py | Python | training/main.py | ZTao-z/ProxyNAS | c879afdadd7f71fafc2db383a85fc00c94bb6576 | [
"Apache-2.0"
] | 1 | 2021-03-08T07:58:18.000Z | 2021-03-08T07:58:18.000Z | training/main.py | ZTao-z/ProxylessNAS | c879afdadd7f71fafc2db383a85fc00c94bb6576 | [
"Apache-2.0"
] | null | null | null | training/main.py | ZTao-z/ProxylessNAS | c879afdadd7f71fafc2db383a85fc00c94bb6576 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os, os.path as osp
import math
import argparse
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data.distributed
from torchvision import datasets, transforms
import horovod.torch as hvd
import tensorboardX
from tqdm import tqdm
import net224x224 as models
from utils.bags_of_tricks import cross_encropy_with_label_smoothing
import subprocess
subprocess.call("ulimit -n 65536", shell=True)
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
# Training settings
parser = argparse.ArgumentParser(description='PyTorch ImageNet Example',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('--num-classes', type=int, default=1000,
help='The number of classes in the dataset.')
parser.add_argument('--train-dir', default=os.path.expanduser('/ssd/dataset/imagenet/train'),
help='path to training data')
parser.add_argument('--val-dir', default=os.path.expanduser('/ssd/dataset/imagenet/val'),
help='path to validation data')
parser.add_argument('--log-dir', default='./logs',
help='tensorboard log directory')
parser.add_argument('--format', default='./checkpoint-{epoch}.pth.tar',
help='checkpoint file format')
parser.add_argument('--fp16-allreduce', action='store_true', default=False,
help='use fp16 compression during allreduce')
# Default settings from https://arxiv.org/abs/1706.02677.
parser.add_argument('--batch-size', type=int, default=64,
help='input batch size for training')
parser.add_argument('--val-batch-size', type=int, default=64,
help='input batch size for validation')
parser.add_argument('--epochs', type=int, default=150,
help='number of epochs to train')
parser.add_argument('--base-lr', type=float, default=0.0125,
help='learning rate for a single GPU')
parser.add_argument('--warmup-epochs', type=float, default=5,
help='number of warmup epochs')
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum')
parser.add_argument('--wd', type=float, default=0.00005,
help='weight decay')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42,
help='random seed')
# tricks to boost accuracy
parser.add_argument('--lr-scheduler', type=str, default="cosine", choices=["linear", "cosine"],
help='how to schedule learning rate')
parser.add_argument("--color-jitter", action='store_true', default=False,
help="To apply color augmentation or not.")
parser.add_argument("--label-smoothing", action='store_true', default=False,
help="To use label smoothing or not.")
parser.add_argument("--no-wd-bn", action='store_true', default=False,
help="Whether to remove the weight decay on BN")
args = parser.parse_args()
name_componenets = [args.arch, str(args.epochs), args.lr_scheduler]
if args.color_jitter:
name_componenets.append("color_jitter")
if args.label_smoothing:
name_componenets.append("label_smoothing")
args.log_dir = osp.join(args.log_dir, "-".join(name_componenets))
args.checkpoint_format = osp.join(args.log_dir, args.format)
# linearly scale the learning rate.
args.base_lr = args.base_lr * (args.batch_size / 64)
args.cuda = not args.no_cuda and torch.cuda.is_available()
hvd.init()
torch.manual_seed(args.seed)
if args.cuda:
# Horovod: pin GPU to local rank.
# torch.cuda.set_device(hvd.local_rank())
torch.cuda.set_device(4)
torch.cuda.manual_seed(args.seed)
cudnn.benchmark = True
# If set > 0, will resume training from a given checkpoint.
resume_from_epoch = 0
for try_epoch in range(args.epochs, 0, -1):
if os.path.exists(args.checkpoint_format.format(epoch=try_epoch)):
resume_from_epoch = try_epoch
break
# Horovod: broadcast resume_from_epoch from rank 0 (which will have
# checkpoints) to other ranks.
resume_from_epoch = hvd.broadcast(torch.tensor(resume_from_epoch), root_rank=0,
name='resume_from_epoch').item()
# Horovod: print logs on the first worker.
verbose = 1 if hvd.rank() == 0 else 0
# Horovod: write TensorBoard logs on first worker.
log_writer = tensorboardX.SummaryWriter(args.log_dir) if hvd.rank() == 0 else None
best_val_acc = 0.0
kwargs = {'num_workers': 5, 'pin_memory': True} if args.cuda else {}
# Training transform
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
pre_process = [
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
]
if args.color_jitter:
pre_process += [transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)]
pre_process += [
transforms.ToTensor(),
normalize
]
train_dataset = datasets.ImageFolder(args.train_dir,
transform=transforms.Compose(pre_process))
# Horovod: use DistributedSampler to partition data among workers. Manually specify
# `num_replicas=hvd.size()` and `rank=hvd.rank()`.
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, sampler=train_sampler, **kwargs)
val_dataset = datasets.ImageFolder(args.val_dir,
transform=transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
]))
val_sampler = torch.utils.data.distributed.DistributedSampler(
val_dataset, num_replicas=hvd.size(), rank=hvd.rank())
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.val_batch_size,
sampler=val_sampler, **kwargs)
# Set up standard ResNet-50 model.
# model = models.resnet50()
model = models.__dict__[args.arch](net_config='proxyless_gpu.config')
if args.cuda:
# Move model to GPU.
model.cuda()
# Horovod: scale learning rate by the number of GPUs.
optimizer = optim.SGD(model.parameters(), lr=args.base_lr * hvd.size(),
momentum=args.momentum, weight_decay=args.wd)
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters(),
compression=compression)
# Restore from a previous checkpoint, if initial_epoch is specified.
# Horovod: restore on the first worker which will broadcast weights to other workers.
if resume_from_epoch > 0 and hvd.rank() == 0:
filepath = args.checkpoint_format.format(epoch=resume_from_epoch)
checkpoint = torch.load(filepath)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
if args.label_smoothing:
criterion = cross_encropy_with_label_smoothing
else:
criterion = nn.CrossEntropyLoss()
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
def train(epoch):
model.train()
train_sampler.set_epoch(epoch)
train_loss = Metric('train_loss')
train_accuracy = Metric('train_accuracy')
with tqdm(total=len(train_loader),
desc='Train Epoch #{}'.format(epoch + 1),
disable=not verbose) as t:
for batch_idx, (data, target) in enumerate(train_loader):
lr_cur = adjust_learning_rate(epoch, batch_idx, type=args.lr_scheduler)
if args.cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss.update(loss)
train_accuracy.update(accuracy(output, target))
t.set_postfix({'loss': train_loss.avg.item(),
'accuracy': 100. * train_accuracy.avg.item(),
'lr': lr_cur})
t.update(1)
if log_writer:
log_writer.add_scalar('train/loss', train_loss.avg, epoch)
log_writer.add_scalar('train/accuracy', train_accuracy.avg, epoch)
def validate(epoch, ):
global best_val_acc
model.eval()
val_loss = Metric('val_loss')
val_accuracy = Metric('val_accuracy')
with tqdm(total=len(val_loader),
desc='Validate Epoch #{}'.format(epoch + 1),
disable=not verbose) as t:
with torch.no_grad():
for data, target in val_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
val_loss.update(criterion(output, target))
val_accuracy.update(accuracy(output, target))
t.set_postfix({'loss': val_loss.avg.item(),
'accuracy': 100. * val_accuracy.avg.item()})
t.update(1)
if log_writer:
log_writer.add_scalar('val/loss', val_loss.avg, epoch)
log_writer.add_scalar('val/accuracy', val_accuracy.avg, epoch)
best_val_acc = max(best_val_acc, val_accuracy.avg)
log_writer.add_scalar('val/best_acc', best_val_acc, epoch)
return val_accuracy.avg
import torch.optim.lr_scheduler as lr_scheduler
# Horovod: using `lr = base_lr * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = base_lr` ---> `lr = base_lr * hvd.size()` during
# the first five epochs. See https://arxiv.org/abs/1706.02677 for details.
# After the warmup reduce learning rate by 10 on the 30th, 60th and 80th epochs.
def adjust_learning_rate(epoch, batch_idx, type="cosine"):
if epoch < args.warmup_epochs:
epoch += float(batch_idx + 1) / len(train_loader)
lr_adj = 1. / hvd.size() * (epoch * (hvd.size() - 1) / args.warmup_epochs + 1)
elif type == "linear":
if epoch < 30:
lr_adj = 1.
elif epoch < 60:
lr_adj = 1e-1
elif epoch < 90:
lr_adj = 1e-2
else:
lr_adj = 1e-3
elif type == "cosine":
# self.init_lr * 0.5 * (1 + math.cos(math.pi * T_cur / T_total))
run_epochs = epoch - args.warmup_epochs
total_epochs = args.epochs - args.warmup_epochs
T_cur = float(run_epochs * len(train_loader)) + batch_idx
T_total = float(total_epochs * len(train_loader))
lr_adj = 0.5 * (1 + math.cos(math.pi * T_cur / T_total))
for param_group in optimizer.param_groups:
param_group['lr'] = args.base_lr * hvd.size() * lr_adj
return args.base_lr * hvd.size() * lr_adj
def accuracy(output, target):
# get the index of the max log-probability
pred = output.max(1, keepdim=True)[1]
return pred.eq(target.view_as(pred)).cpu().float().mean()
def save_checkpoint(epoch):
if hvd.rank() == 0:
os.remove(args.checkpoint_format.format(epoch=epoch))
filepath = args.checkpoint_format.format(epoch=epoch + 1)
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, filepath)
# Horovod: average metrics from distributed training.
class Metric(object):
def __init__(self, name):
self.name = name
self.sum = torch.tensor(0.)
self.n = torch.tensor(0.)
def update(self, val):
self.sum += hvd.allreduce(val.detach().cpu(), name=self.name)
self.n += 1
@property
def avg(self):
return self.sum / self.n
best_acc = 0.0
last_saved_epoch = None
for epoch in range(resume_from_epoch, args.epochs):
train(epoch)
val_acc = validate(epoch)
# save checkpoint for the master
if hvd.rank() == 0:
if last_saved_epoch is not None:
os.remove(args.checkpoint_format.format(epoch=last_saved_epoch))
filepath = args.checkpoint_format.format(epoch=epoch)
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, filepath)
last_saved_epoch = epoch
| 38.176812 | 98 | 0.650292 |
19bfc894ac7278d17b7a23a3658d5d893afbc7cf | 9,709 | py | Python | pn.py | nelsonag/Pn_data | 59da11f13efbfd5cdf74cf6ac343888d9f9f28e2 | [
"MIT"
] | null | null | null | pn.py | nelsonag/Pn_data | 59da11f13efbfd5cdf74cf6ac343888d9f9f28e2 | [
"MIT"
] | null | null | null | pn.py | nelsonag/Pn_data | 59da11f13efbfd5cdf74cf6ac343888d9f9f28e2 | [
"MIT"
] | null | null | null | #!/usr/bin/env ipython2
import numpy as np
import scipy.special as ss
import scipy.interpolate as sint
from statepoint import StatePoint
from matplotlib import pyplot as plt
from uncertainties import ufloat
from gen_mgxs import mgxs
import pickle
from bisect import bisect
import os
import sys
def Pn_solve(sigtn, sigsn, Qn, deriv_term):
# d/dx[(n/(2n+1))*psinm1+(n+1)/(2n+1)*psip1]+sigtn*psin=sigsn*psin+Qn
# deriv_term+sigtn*psin=sigsn*psin+Qn
# psin = (Qn-deriv_term)/(sigtn-sigsn)
# psin = (Qn - deriv_term) / (sigtn - np.sum(sigsn,axis=0))
psin = (Qn - deriv_term) / (sigtn)
return psin[:]
def solve1g(N, sigtns1g, sigsns1g, Qnsg, psinsg, x, invdx, n_ratios):
# Loops through each of the n orders, sets up the derivitave term,
# and calls Pn_solve on it.
# n_ratios is [(n/(2n+1),(n+1)/(2n+1)) for n in range(N+1)]
for n in xrange(N+1):
# N+1 so we get an n==N in this loop
# Set up the deriv_term.
# Due to assumed reflective BC, deriv_term will always be 0 for
# ix ==0 and ix == last one, so we can skip those
if n > 0:
nm1_interp = sint.KroghInterpolator(x, psinsg[n - 1])
else:
nm1_interp = sint.KroghInterpolator([x[0],x[-1]],[0.0, 0.0])
if n < N:
np1_interp = sint.KroghInterpolator(x, psinsg[n + 1])
else:
np1_interp = sint.KroghInterpolator([x[0],x[-1]],[0.0, 0.0])
deriv_term = n_ratios[n][0] * nm1_interp.derivative(x) + \
n_ratios[n][1] * np1_interp.derivative(x)
# Now adjust for BC
deriv_term[0] = 0.0
deriv_term[-1] = 0.0
# Now we can pass this to Pn_solve to get our new psin values
psinsg[n,:] = Pn_solve(sigtns1g[n], sigsns1g[n], Qnsg[n], deriv_term)
return psinsg[:,:]
def fixedsrc(N, G, sigtns, sigsns, Qns, psins, x, invdx, n_ratios, eps_psi, max_inner):
# Not implemented yet. This wll be the MG solver.
eps = 1.0E4
iter = 0
newQns = np.zeros_like(Qns[:,0,:])
# import pdb; pdb.set_trace()
while ((eps > eps_psi) and (iter <= max_inner)):
# Develop scattering source
for g in range(G):
for n in range(N):
for ix in range(len(invdx) + 1):
newQns[n,ix] = Qns[g,n,ix] + \
np.dot(sigsns[:,n,g,ix], psins[:,n,ix])
# Run fixed src solver
psins[g,:,:] = solve1g(N, sigtns[g,:,:], sigsns[g,:,:,:], newQns,
psins[g,:,:], x, invdx, n_ratios)
# eps =
iter += 1
for g in xrange(G):
plt.plot(x,psins[g,0,:],label='Pn')
plt.plot(x,omcflux[g,0,:],label='OMC')
plt.legend(loc='best')
plt.show()
plt.close()
print "Inner Iterations = " + str(iter)
def init(x, G, N, flux_guess):
invdx = np.zeros(len(x) - 1)
for ix in xrange(len(invdx)):
invdx[ix] = 1.0 / (x[ix + 1] - x[ix])
n_ratios = [(float(n)/float(2 * n + 1), float(n + 1)/float(2 * n + 1))
for n in range(N + 1)]
psins = np.ones(shape=(G, N + 1, len(x)))
for g in xrange(G):
for n in xrange(N + 1):
psins[g,n,:] = flux_guess[g,n,:] / np.sum(flux_guess[g,n,:])
return invdx, n_ratios, psins
def get_openmc_mesh(spFile, tid, sid, G, N, extent):
sp = StatePoint(spFile)
sp.read_results()
sp.generate_stdev()
keff = ufloat(sp.k_combined[0], sp.k_combined[1])
GN = [[0.0 for n in xrange(N)] for g in xrange(G)]
data = np.array(GN[:][:])
dx = extent / float(N)
x = [(float(i) + 0.5) * dx for i in xrange(N)]
for g in xrange(G):
myg = G - g - 1
for n in xrange(N):
m, u = sp.get_value(tid, [('mesh',(1,1,n+1)),('energyin',g)], sid)
data[myg,n] = m
return x, data[:,:], keff
def get_openmc_mesh_matrix(spFile, tid, sid, G, N, extent):
sp = StatePoint(spFile)
sp.read_results()
sp.generate_stdev()
keff = ufloat(sp.k_combined[0], sp.k_combined[1])
GGN = [[[0.0 for n in xrange(N)] for go in xrange(G)] for g in xrange(G)]
data = np.array(GGN[:][:][:])
dx = extent / float(N)
x = [(float(i) + 0.5) * dx for i in xrange(N)]
for g in xrange(G):
myg = G - g - 1
for go in xrange(G):
mygo = G - go - 1
for n in xrange(N):
m, u = sp.get_value(tid, [('mesh',(1,1,n+1)),('energyin',g),
('energyout',go)], sid)
data[myg,mygo,n] = m
return x, data[:,:,:], keff
def get_omc_mgxs(sp, mesh_tids, mesh_sids, order, G, Nmesh, extent, xstype):
# Get flux-yN
fluxyn = np.zeros(shape=(order, G, Nmesh))
for l in range(order):
tid = mesh_tids[0]
sid = mesh_sids[0][l]
x, fluxyn[l,:,:], omck = get_openmc_mesh(sp,tid,sid,G,Nmesh,extent)
# Get scatt-pN
scattpn = np.zeros(shape=(order, G, G, Nmesh))
for l in range(order):
tid = mesh_tids[1]
sid = mesh_sids[1][l]
x, scattpn[l,:, :, :], omck = get_openmc_mesh_matrix(sp,tid,sid,G,Nmesh,extent)
# Get scatt-yN
scattyn = np.zeros(shape=(order, G, G, Nmesh))
for l in range(order):
tid = mesh_tids[2]
sid = mesh_sids[2][l]
x, scattyn[l,:,:,:], omck = get_openmc_mesh_matrix(sp,tid,sid,G,Nmesh,extent)
# Get total-yN
totalyn = np.zeros(shape=(order, G, Nmesh))
for l in range(order):
tid = mesh_tids[3]
sid = mesh_sids[3][l]
x, totalyn[l,:,:], omck = get_openmc_mesh(sp,tid,sid,G,Nmesh,extent)
# Get nu-fission (right now only doing iso weighting)
nusigfns = np.zeros(shape=(order, G, G, Nmesh))
tid = mesh_tids[4]
sid = mesh_sids[4][0]
# Now only doing iso weighting so l=0
x, nusigfns[0,:,:,:], omck = get_openmc_mesh_matrix(sp,tid,sid,G,Nmesh,extent)
Qns = np.zeros(shape=(order, G, Nmesh))
# put Q in nusigfns, leave as isotropic now
l = 0
Qsum = 0.0
for go in range(G):
for n in range(Nmesh):
Qns[l,go,n] = 0.0
for g in range(G):
Qns[l,go,n] += nusigfns[0,g,go,n]
Qsum += Qns[l,go,n]
Qns[l,:,:] /= Qsum
for l in range(1,order):
for g in range(G):
for n in range(Nmesh):
Qns[l,g,n] = 0.0
totaliso = totalyn[0,:,:]
for l in range(order):
for g in range(G):
for n in range(Nmesh):
# Nmeshormalize by flux
flux = fluxyn[l,g,n]
flux0 = fluxyn[0,g,n]
if flux0 != 0.0:
for go in range(G):
scattpn[l,g,go,n] /= flux0
if l == 0:
totaliso[g,n] /= flux0
if flux != 0.0:
for go in range(G):
scattyn[l,g,go,n] /= flux
nusigfns[l,g,go,n] /= flux
totalyn[l,g,n] /= flux
# Apply correction
if xstype == 'consP':
corr = totaliso[g,n] - totalyn[l,g,n]
for go in range(G):
scattyn[l,g,go,n] += corr
if xstype == 'iso':
sigtns = [totaliso for l in range(order)]
sigsns = scattpn[:]
elif xstype == 'consP':
sigtns = [totaliso for l in range(order)]
sigsns = scattyn[:]
elif xstype == 'yN':
sigtns = totalyn[:]
sigsns = scattyn[:]
return omck, np.swapaxes(fluxyn,0,1), x, np.swapaxes(sigtns,0,1), \
np.swapaxes(sigsns,0,1), np.swapaxes(nusigfns,0,1), np.swapaxes(Qns,0,1)
if __name__ == "__main__":
rcdef = plt.rcParams.copy
newparams = {'savefig.dpi': 100, 'figure.figsize': (24, 13.5),
'font.size': 16}
plt.rcParams.update(newparams)
if len(sys.argv) != 3:
raise ValueError("Must Provide Cross-Section Type [consP, iso, yN] & " +
"Run Type [FS, k]!")
else:
xstype = sys.argv[1]
if xstype not in ["consP", "iso", "yN"]:
raise ValueError("Invalid Cross-Section Type!")
runtype = sys.argv[2]
if runtype not in ["FS", "k"]:
raise ValueError("Invalid Run Type!")
show = False
save = True
G = 4
N = 1
Nmesh = 16
extent = 0.64
sp = './statepoint.08000.binary'
eps_psi = 1.0E-6
max_inner = 2
# First get the mgxs data and create x/s
if xstype == 'iso':
momWgt = False
trcorr = None
elif xstype == 'consP':
momWgt = True
trcorr = 'consP'
elif xstype == 'yN':
momWgt = True
trcorr = None
mesh_tids = [0, 1, 1, 0, 2]
mesh_sids = [[0,2,6,12], [0,1,2,3], [4,6,10,16], [16,18,22,27], [0]]
omck, omcflux, x, sigtns, sigsns, nusigfns, Qns = \
get_omc_mgxs(sp, mesh_tids, mesh_sids, N+1, G, Nmesh, extent, xstype)
print 'OpenMC k_eff=' + "{:12.5E}".format(omck)
# Set up some of our data we will use during the sweep
invdx, n_ratios, psins = init(x, G, N, omcflux)
if runtype == 'FS':
fixedsrc(N, G, sigtns, sigsns, Qns, psins, x, invdx, n_ratios, eps_psi, max_inner)
# Estimate k to compare with the openMC k
pnk = 0.0
for g in xrange(G):
for ix in xrange(Nmesh):
if Qns[g,0,ix] > 0.0:
pnk += np.sum(nusigfns[g,0,:,ix])*psins[g,0,ix] / Qns[g,0,ix]
else:
print "k-eigenvalue solver not yet implemented!"
pcm = 1.0E5*(pnk-omck)/omck
print "Pn k_eff = " + "{:12.5E}".format(pnk)
print "pcm = " + "{:12.5E}".format(pcm)
| 32.580537 | 90 | 0.527552 |
5c65a9e47f8fa0e664aa98db54c1b525c38a79f5 | 5,883 | py | Python | __init__.py | cdoebler1/winston-chatbot | 8bd625b767a5def313961a2fd5ed8223a3b2b6c7 | [
"MIT"
] | null | null | null | __init__.py | cdoebler1/winston-chatbot | 8bd625b767a5def313961a2fd5ed8223a3b2b6c7 | [
"MIT"
] | null | null | null | __init__.py | cdoebler1/winston-chatbot | 8bd625b767a5def313961a2fd5ed8223a3b2b6c7 | [
"MIT"
] | null | null | null | # fallback-aiml
# Copyright (C) 2017 Mycroft AI
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import aiml
import os
from os import listdir, remove as remove_file
from os.path import dirname, isfile
from mycroft.api import DeviceApi
from mycroft.skills.core import MycroftSkill
from mycroft.skills.core import intent_handler, intent_file_handler
from adapt.intent import IntentBuilder
class Chatbot(MycroftSkill):
chatting = False
def __init__(self):
super(Chatbot, self).__init__(name='Winston_Chatbot')
self.kernel = aiml.Kernel()
# chatbot_brain = self.settings.get('chatbot_brain')
chatbot_brain = "AnnaL"
self.aiml_path = os.path.join(dirname(__file__), chatbot_brain)
self.brain_path = os.path.join(self.file_system.path, 'bot_brain.brn')
# reloading skills will also reset this 'timer', so ideally it should
# not be too high
self.line_count = 1
self.save_loop_threshold = int(self.settings.get('save_loop_threshold',
4))
self.brain_loaded = False
def load_brain(self):
"""Set up the aiml engine using available device information."""
self.log.info('Loading Brain')
if isfile(self.brain_path):
self.kernel.bootstrap(brainFile=self.brain_path)
else:
aimls = listdir(self.aiml_path)
for aiml_file in aimls:
self.kernel.learn(os.path.join(self.aiml_path, aiml_file))
self.kernel.saveBrain(self.brain_path)
try:
device = DeviceApi().get()
except Exception:
device = {
"name": "Mycroft",
"platform": "AI"
}
self.kernel.setBotPredicate("name", device["name"])
self.kernel.setBotPredicate("species", device["platform"])
self.kernel.setBotPredicate("genus", "Mycroft")
self.kernel.setBotPredicate("family", "virtual personal assistant")
self.kernel.setBotPredicate("order", "artificial intelligence")
self.kernel.setBotPredicate("class", "computer program")
self.kernel.setBotPredicate("kingdom", "machine")
self.kernel.setBotPredicate("hometown", "127.0.0.1")
self.kernel.setBotPredicate("botmaster", "master")
self.kernel.setBotPredicate("master", "the community")
# IDEA: extract age from
# https://api.github.com/repos/MycroftAI/mycroft-core created_at date
self.kernel.setBotPredicate("age", "20")
self.brain_loaded = True
return
@intent_handler(IntentBuilder("ResetMemoryIntent").require("Reset")
.require("Memory"))
def handle_reset_brain(self, message):
"""Delete the stored memory, effectively resetting the brain state."""
self.log.debug('Deleting brain file')
# delete the brain file and reset memory
self.speak_dialog("reset.memory")
remove_file(self.brain_path)
self.soft_reset_brain()
return
def soft_reset_brain(self):
# Only reset the active kernel memory
self.kernel.resetBrain()
self.brain_loaded = False
return
def shutdown(self):
"""Shut down any loaded brain."""
if self.brain_loaded:
self.kernel.saveBrain(self.brain_path)
self.kernel.resetBrain() # Manual remove
self.remove_fallback(self.handle_fallback)
super(Chatbot, self).shutdown()
@intent_file_handler("start_parrot.intent")
def handle_start_parrot_intent(self, message):
self.chatting = True
self.speak_dialog("chat_start", expect_response=True)
@intent_file_handler("stop_parrot.intent")
def handle_stop_parrot_intent(self, message):
if self.chatting:
self.chatting = False
self.speak_dialog("chat_stop")
else:
self.speak_dialog("not_chatting")
def stop(self):
if self.chatting:
self.chatting = False
self.speak_dialog("chat_stop")
return True
return False
def ask_brain(self, utterance):
"""Send a query to the AIML brain.
Saves the state to disk once in a while.
"""
response = self.kernel.respond(utterance)
# make a security copy once in a while
if (self.line_count % self.save_loop_threshold) == 0:
self.kernel.saveBrain(self.brain_path)
self.line_count += 1
return response
def converse(self, utterances, lang="en-us"):
if self.chatting:
if self.voc_match(utterances[0], "StopKeyword") and self.voc_match(utterances[0], "ChatKeyword"):
return False
if not self.brain_loaded:
self.load_brain()
utterance = utterances
answer = self.ask_brain(utterance)
if answer != "":
asked_question = False
if answer.endswith("?"):
asked_question = True
self.speak(answer, expect_response=asked_question)
return True
return True
else:
return False
def create_skill():
return Chatbot()
| 36.540373 | 109 | 0.631141 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.