text
stringlengths 2
999k
|
|---|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Dynamics.TurbineGovernors.TurbineGovernor import TurbineGovernor
class GovSteam0(TurbineGovernor):
"""A simplified steam turbine-governor model.
"""
def __init__(self, t2=0.0, t1=0.0, vmin=0.0, dt=0.0, vmax=0.0, mwbase=0.0, r=0.0, t3=0.0, *args, **kw_args):
"""Initialises a new 'GovSteam0' instance.
@param t2: Numerator time constant of T2/T3 block
@param t1: Steam bowl time constant
@param vmin: Minimum valve position, p.u. of mwcap
@param dt: Turbine damping coefficient
@param vmax: Maximum valve position, p.u. of mwcap
@param mwbase: Base for power values (> 0.)
@param r: Permanent droop
@param t3: Reheater time constant
"""
#: Numerator time constant of T2/T3 block
self.t2 = t2
#: Steam bowl time constant
self.t1 = t1
#: Minimum valve position, p.u. of mwcap
self.vmin = vmin
#: Turbine damping coefficient
self.dt = dt
#: Maximum valve position, p.u. of mwcap
self.vmax = vmax
#: Base for power values (> 0.)
self.mwbase = mwbase
#: Permanent droop
self.r = r
#: Reheater time constant
self.t3 = t3
super(GovSteam0, self).__init__(*args, **kw_args)
_attrs = ["t2", "t1", "vmin", "dt", "vmax", "mwbase", "r", "t3"]
_attr_types = {"t2": float, "t1": float, "vmin": float, "dt": float, "vmax": float, "mwbase": float, "r": float, "t3": float}
_defaults = {"t2": 0.0, "t1": 0.0, "vmin": 0.0, "dt": 0.0, "vmax": 0.0, "mwbase": 0.0, "r": 0.0, "t3": 0.0}
_enums = {}
_refs = []
_many_refs = []
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2016 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import re
from core.common import retrieve_content
__url__ = "http://rules.emergingthreats.net/open/suricata/rules/botcc.rules"
__check__ = "CnC Server"
__info__ = "potential malware site"
__reference__ = "emergingthreats.net"
def fetch():
retval = {}
content = retrieve_content(__url__)
if __check__ in content:
for match in re.finditer(r"\d+\.\d+\.\d+\.\d+", content):
retval[match.group(0)] = (__info__, __reference__)
return retval
|
"""Contains show related classes."""
import re
from collections import namedtuple
from typing import List, Dict, Any, Optional
from mpf.core.assets import AssetPool
from mpf.core.config_validator import RuntimeToken
from mpf.core.utility_functions import Util
from mpf.exceptions.config_file_error import ConfigFileError
__api__ = ['Show', 'RunningShow', 'ShowPool']
ShowConfig = namedtuple("ShowConfig", ["name", "priority", "speed", "loops", "sync_ms", "manual_advance", "show_tokens",
"events_when_played", "events_when_stopped", "events_when_looped",
"events_when_paused", "events_when_resumed", "events_when_advanced",
"events_when_stepped_back", "events_when_updated", "events_when_completed"])
class ShowPool(AssetPool):
"""A pool of shows."""
__slots__ = []
def __repr__(self):
"""Return str representation."""
return '<ShowPool: {}>'.format(self.name)
# pylint: disable-msg=too-many-arguments
def play_with_config(self, show_config: ShowConfig, start_time=None, start_callback=None, stop_callback=None,
start_step=None) -> "RunningShow":
"""Play asset from pool with config."""
return self.asset.play_with_config(show_config, start_time, start_callback, stop_callback, start_step)
# pylint: disable-msg=too-many-arguments
# pylint: disable-msg=too-many-locals
def play(self, priority=0, speed=1.0, start_step=1, callback=None,
loops=-1, sync_ms=None, manual_advance=False, show_tokens=None,
events_when_played=None, events_when_stopped=None,
events_when_looped=None, events_when_paused=None,
events_when_resumed=None, events_when_advanced=None,
events_when_stepped_back=None, events_when_updated=None,
events_when_completed=None, start_time=None, start_callback=None) -> "RunningShow":
"""Play asset from pool."""
return self.asset.play(priority, speed, start_step, callback,
loops, sync_ms, manual_advance, show_tokens,
events_when_played, events_when_stopped,
events_when_looped, events_when_paused,
events_when_resumed, events_when_advanced,
events_when_stepped_back, events_when_updated,
events_when_completed, start_time, start_callback)
# pylint: disable-msg=too-many-instance-attributes
class Show:
"""A show which can be instantiated."""
attribute = 'shows'
path_string = 'shows'
config_section = 'shows'
disk_asset_section = 'file_shows'
extensions = tuple('yaml')
class_priority = 100
pool_config_section = 'show_pools'
asset_group_class = ShowPool
__slots__ = ["_autoplay_settings", "tokens", "token_values", "token_keys", "name", "total_steps", "show_steps",
"_step_cache", "machine"]
def __init__(self, machine, name):
"""Initialise show."""
self.machine = machine
self._autoplay_settings = dict()
self.tokens = set()
self.token_values = dict()
self.token_keys = dict()
self.name = name
self.total_steps = None
self.show_steps = None # type: List[Dict[str, Any]]
self._step_cache = {}
def __lt__(self, other):
"""Compare two instances."""
return id(self) < id(other)
def _get_duration(self, data, step_num, total_step_time):
total_steps_num = len(data)
step = data[step_num]
if 'duration' not in step:
if step_num == total_steps_num - 1:
# special case with an empty last step (but longer than 1 step)
if 'time' in step and len(step) == 1 and step_num != 0:
return False
return 1
if 'time' in data[step_num + 1]:
next_step_time = data[step_num + 1]['time']
if str(next_step_time)[0] == "+":
return Util.string_to_secs(next_step_time)
if total_step_time < 0: # pragma: no cover
self._show_validation_error("Absolute timing in step {} not possible because "
"there was a duration of -1 before".format(step_num), 5)
return Util.string_to_secs(next_step_time) - total_step_time
return 1
if step_num < total_steps_num - 1 and 'time' in data[step_num + 1]: # pragma: no cover
self._show_validation_error("Found invalid 'time' entry in step after {} which contains a duration. "
"Remove either of them!".format(step_num), 2)
return Util.string_to_secs(step['duration'])
def load(self, data: Optional[Dict]):
"""Load show configuration."""
self.show_steps = list()
if not isinstance(data, list): # pragma: no cover
self._show_validation_error("Show {} does not appear to be a valid show "
"config. It should be a list of steps. Did you forget the hyphen at the start "
"of your step?".format(self.name), 1)
if not data: # pragma: no cover
self._show_validation_error("Cannot load empty show", 6)
total_step_time = 0
# add empty first step if show does not start right away
if 'time' in data[0] and data[0]['time'] != 0:
self.show_steps.append({'duration': Util.string_to_secs(data[0]['time'])})
total_step_time = Util.string_to_secs(data[0]['time'])
# Loop over all steps in the show file
for step_num, step in enumerate(data):
actions = dict()
# Note: all times are stored/calculated in seconds.
# Step time can be specified as either an absolute time elapsed
# (from the beginning of the show) or a relative time (time elapsed
# since the previous step). Time strings starting with a plus sign
# (+) are treated as relative times.
# Step times are all converted to relative times internally (time
# since the previous step).
# Make sure there is a time entry for each step in the show file.
duration = self._get_duration(data, step_num, total_step_time)
# special case: empty last step
if duration is False:
break
elif duration == 0: # pragma: no cover
self._show_validation_error("Step {} has 0 duration".format(step_num), 7)
# Calculate the time since previous step
actions['duration'] = duration
if duration > 0 and total_step_time >= 0:
total_step_time += duration
else:
total_step_time = -1
# Now process show step actions
self._process_step_actions(step, actions)
self.show_steps.append(actions)
# Count how many total steps are in the show. We need this later
# so we can know when we're at the end of a show
self.total_steps = len(self.show_steps)
if self.total_steps == 0: # pragma: no cover
self._show_validation_error("Show is empty", 2)
self._get_tokens()
def _show_validation_error(self, msg, error_code): # pragma: no cover
raise ConfigFileError("Show {}: {}".format(self.name, msg), error_code, "show", self.name)
def _process_step_actions(self, step, actions):
if not isinstance(step, dict):
raise AssertionError("Steps in show {} need to be dicts.".format(self.name))
for key, value in step.items():
# key: the section of the show, like 'leds'
# value: dic of express settings or dic of dics w full settings
# check to see if we know how to process this kind of entry
if key in self.machine.show_controller.show_players.keys():
actions[key] = self.machine.show_controller.show_players[key].validate_config_entry(value, self.name)
elif key not in ('duration', 'time'): # pragma: no cover
for player in self.machine.show_controller.show_players.values():
if key == player.config_file_section or key == player.machine_collection_name or \
key + "s" == player.show_section:
self._show_validation_error('Invalid section "{}:" found in show {}. '
'Did you mean "{}:" instead?'.format(key, self.name,
player.show_section), 3)
self._show_validation_error('Invalid section "{}:" found in show {}'.format(key, self.name), 4)
def _get_tokens(self):
self._walk_show(self.show_steps)
def _walk_show(self, data, path=None, list_index=None):
# walks a list of dicts, checking tokens
if not path:
path = list()
if isinstance(data, dict):
for k, v in data.items():
self._check_token(path, k, 'key')
self._walk_show(v, path + [k])
elif isinstance(data, list):
for i in data:
self._check_token(path, i, 'key')
if list_index is None:
list_index = 0
else:
list_index += 1
self._walk_show(i, path + [list_index], list_index)
else:
self._check_token(path, data, 'value')
@classmethod
def _copy_recursive(cls, data):
if isinstance(data, dict):
new_dict = dict()
for k, v in data.items():
new_dict[k] = cls._copy_recursive(v)
return new_dict
if isinstance(data, list):
new_list = list()
for i in data:
new_list.append(cls._copy_recursive(i))
return new_list
return data
def get_show_steps(self):
"""Return a copy of the show steps."""
copied_steps = []
for step in self.show_steps:
copied_steps.append(self._copy_recursive(step))
return copied_steps
def _check_token(self, path, data, token_type):
if isinstance(data, RuntimeToken):
self._add_token(data, data.token, path, token_type)
return
if not isinstance(data, str):
return
results = re.findall(r"\(([^)]+)\)", data)
if results:
for result in results:
self._add_token(data, result, path, token_type)
def _add_token(self, placeholder, token, path, token_type):
if token not in self.tokens:
self.tokens.add(token)
if token_type == 'key':
if token not in self.token_keys:
self.token_keys[token] = list()
self.token_keys[token].append(path + [placeholder])
elif token_type == 'value':
if token not in self.token_values:
self.token_values[token] = list()
self.token_values[token].append(path)
# pylint: disable-msg=too-many-arguments
def play_with_config(self, show_config: ShowConfig, start_time=None, start_running=True,
start_callback=None, stop_callback=None, start_step=None) -> "RunningShow":
"""Play this show with config."""
if not start_time:
start_time = self.machine.clock.get_time()
running_show = RunningShow(machine=self.machine,
show=self,
start_time=start_time,
start_step=int(start_step),
start_running=start_running,
callback=stop_callback,
start_callback=start_callback,
show_config=show_config)
return running_show
# pylint: disable-msg=too-many-arguments
# pylint: disable-msg=too-many-locals
def play(self, priority=0, speed=1.0, start_step=1, callback=None,
loops=-1, sync_ms=None, manual_advance=False, show_tokens=None,
events_when_played=None, events_when_stopped=None,
events_when_looped=None, events_when_paused=None,
events_when_resumed=None, events_when_advanced=None,
events_when_stepped_back=None, events_when_updated=None,
events_when_completed=None, start_time=None, start_callback=None,
start_running=True) -> "RunningShow":
"""Play a Show.
There are many parameters you can use here which
affect how the show is played. This includes things like the playback
speed, priority, etc. These are
all set when the show plays. (For example, you could have a Show
file which lights a bunch of lights sequentially in a circle pattern,
but you can have that circle "spin" as fast as you want depending on
how you play the show.)
Args:
priority: Integer value of the relative priority of this show. If
there's ever a situation where multiple shows want to control
the same item, the one with the higher priority will win.
("Higher" means a bigger number, so a show with priority 2 will
override a priority 1.)
speed: Float of how fast your show runs. Your Show files
specify step times in actual time values. When you play a
show,
you specify a playback rate factor that is applied to the time
values in the show (divides the relative show times). The
default value is 1.0 (uses the actual time values in specified
in the show), but you might want to speed up (speed
values > 1.0) or slow down (speed values < 1.0) the
playback rate. If you want your show to play twice as fast
(finish in half the time), you want all your time values to be
half of the specified values in the show so you would use a
speed value of 2.0. To make the show take twice as
long
to finish, you would a speed value of 0.5.
start_step: Integer of which step in the show file the show
should start in. Usually this is 1 (start at the beginning
of the show) but it's nice to start part way through. Also
used for restarting shows that you paused. A negative value
will count backwards from the end (-1 is the last position,
-2 is second to last, etc.). Note this is the "human readable"
step, so the first step is 1, not 0.
start_running: Boolean of whether this show should start in a running
state, i.e. begin advancing through steps. If false, the show will
load the first step and enter a paused state. Default value is true.
callback: A callback function that is invoked when the show is
stopped.
loops: Integer of how many times you want this show to repeat
before stopping. A value of -1 means that it repeats
indefinitely. If the show only has one step, loops will be set
to 0, regardless of the actual number of loops
sync_ms: Number of ms of the show sync cycle. A value of zero means
this show will also start playing immediately. A value of None
means the mpf:default_show_sync_ms will be used.
manual_advance: Boolean that controls whether this show should be
advanced manually (e.g. time values are ignored and the show
doesn't move to the next step until it's told to.) Default is
False.
show_tokens: Replacement tokens for the show
events_when_played: Events to post when show is started
events_when_stopped: Events to post when show is stopped
events_when_looped: Events to post when show looped
events_when_paused: Events to post when show is paused
events_when_resumed: Events to post when show is resumed after it has been
events_when_advanced: Events to post when show is advanced
events_when_stepped_back: Events to post when show is stepped back
events_when_updated: Events to post when show is updated
events_when_completed: Events to post when show completed
start_time: Time when this show was started. This used to synchronize shows
start_callback: Callback when the show is first started
Return the RunningShow() instance if this show plays now, or False if
the show is not loaded. (In this case the show will be loaded and will
automatically play once its loaded.)
"""
if not show_tokens:
show_tokens = dict()
# todo if we want to enforce that show_tokens match the tokens in the
# show exactly, uncomment below and remove the following if.
# however we don't do this today because of the default 'off' show
# that's used since it has lights and leds, so we'll have to think
# about this.
# if set(show_tokens.keys()) != self.tokens:
# raise ValueError('Token mismatch while playing show "{}". Tokens '
# 'expected: {}. Tokens submitted: {}'.format(
# self.name, self.tokens, set(show_tokens.keys())))
if not set(show_tokens.keys()).issubset(self.tokens): # pragma: no cover
raise ValueError('Token mismatch while playing show "{}". Tokens '
'expected: {}. Tokens submitted: {}'.
format(self.name, self.tokens, set(show_tokens.keys())))
show_config = self.machine.show_controller.create_show_config(
self.name, priority, speed, loops, sync_ms, manual_advance, show_tokens, events_when_played,
events_when_stopped, events_when_looped, events_when_paused, events_when_resumed, events_when_advanced,
events_when_stepped_back, events_when_updated, events_when_completed)
return self.play_with_config(show_config, start_time, start_running, start_callback, callback, start_step)
def get_show_steps_with_token(self, show_tokens):
"""Get show steps and replace additional tokens."""
if show_tokens and self.tokens:
token_hash = hash(str(show_tokens))
if token_hash in self._step_cache:
return self._step_cache[token_hash]
show_steps = self.get_show_steps()
# if we need to replace more tokens copy the show
self._replace_token_values(show_steps, show_tokens)
self._replace_token_keys(show_steps, show_tokens)
for step in show_steps:
for key, value in step.items():
if key in self.machine.show_controller.show_players.keys():
step[key] = self.machine.show_controller.show_players[key].expand_config_entry(value)
self._step_cache[token_hash] = show_steps
return show_steps
# otherwise return show steps. the caller should not change them
return self.show_steps
def _replace_token_values(self, show_steps, show_tokens):
for token, replacement in show_tokens.items():
if token in self.token_values:
for token_path in self.token_values[token]:
target = show_steps
for x in token_path[:-1]:
target = target[x]
if isinstance(target[token_path[-1]], RuntimeToken):
target[token_path[-1]] = target[token_path[-1]].validator_function(replacement, None)
elif target[token_path[-1]] == "(" + token + ")":
target[token_path[-1]] = replacement
else:
target[token_path[-1]] = target[token_path[-1]].replace("(" + token + ")", replacement)
return show_steps
def _replace_token_keys(self, show_steps, show_tokens):
keys_replaced = dict()
# pylint: disable-msg=too-many-nested-blocks
for token, replacement in show_tokens.items():
if token in self.token_keys:
key_name = '({})'.format(token)
for token_path in self.token_keys[token]:
target = show_steps
token_str = ""
for x in token_path[:-1]:
if token_str in keys_replaced:
x = keys_replaced[token_str + str(x) + "-"]
token_str += str(x) + "-"
target = target[x]
use_string_replace = bool(token_path[-1] != "(" + token + ")")
final_key = token_path[-1]
# check if key has been replaced before
final_key = keys_replaced.get(final_key, final_key)
if use_string_replace:
replaced_key = final_key.replace("(" + token + ")", replacement)
else:
replaced_key = replacement
if final_key in target:
target[replaced_key] = target.pop(final_key)
else:
raise KeyError("Could not find token {} ({}) in {}".format(final_key, key_name, target))
keys_replaced[token_str] = replaced_key
return show_steps
# This class is more or less a container
# pylint: disable-msg=too-many-instance-attributes
class RunningShow:
"""A running instance of a show."""
__slots__ = ["machine", "show", "show_steps", "show_config", "callback", "start_step", "start_running",
"start_callback", "_delay_handler", "next_step_index", "current_step_index", "next_step_time",
"name", "loops", "id", "_players", "debug", "_stopped", "_total_steps", "context"]
# pylint: disable-msg=too-many-arguments
# pylint: disable-msg=too-many-locals
def __init__(self, machine, show, start_step, start_running, callback, start_time, start_callback, show_config):
"""Initialise an instance of a show."""
self.machine = machine
self.show = show
self.show_config = show_config
self.callback = callback
self.start_step = start_step
self.start_running = start_running
self.start_callback = start_callback
self._delay_handler = None
self.next_step_index = None
self.current_step_index = None
self.next_step_time = start_time
self.name = show.name
self.loops = self.show_config.loops
self.id = self.machine.show_controller.get_next_show_id()
self.context = "show_{}".format(self.id)
self._players = set()
self.debug = False
self._stopped = False
self._total_steps = None
self.show_steps = self.show.get_show_steps_with_token(self.show_config.show_tokens)
self._start_play()
def _start_play(self):
if self._stopped:
return
self._total_steps = len(self.show_steps)
if self.start_step > 0:
self.next_step_index = self.start_step - 1
elif self.start_step < 0:
self.next_step_index = self.start_step % self._total_steps
else:
self.next_step_index = 0
# Figure out the show start time
if self.show_config.sync_ms:
# calculate next step based on synchronized start time
self.next_step_time += (self.show_config.sync_ms / 1000.0) - (self.next_step_time %
(self.show_config.sync_ms / 1000.0))
# but wait relative to real time
delay_secs = self.next_step_time - self.machine.clock.get_time()
self._delay_handler = self.machine.clock.schedule_once(
self._start_now, delay_secs)
else: # run now
self._start_now()
def _post_events(self, events):
for event in events:
self.machine.events.post(event)
def __repr__(self):
"""Return str representation."""
return 'Running Show Instance: "{}" {} {}'.format(self.name, self.show_config.show_tokens, self.next_step_index)
@property
def stopped(self):
"""Return if stopped."""
return self._stopped
def stop(self):
"""Stop show."""
if self._stopped:
return
self._stopped = True
# if the start callback has never been called then call it now
if self.start_callback:
self.start_callback()
self.start_callback = None
self._remove_delay_handler()
# clear context in used players
for player in self._players:
self.machine.show_controller.show_players[player].show_stop_callback(self.context)
self._players = set()
if self.callback and callable(self.callback):
self.callback()
if self.show_config.events_when_stopped:
self._post_events(self.show_config.events_when_stopped)
def _remove_delay_handler(self):
if self._delay_handler:
self.machine.clock.unschedule(self._delay_handler)
self._delay_handler = None
def pause(self):
"""Pause show."""
self._remove_delay_handler()
if self.show_config.events_when_paused:
self._post_events(self.show_config.events_when_paused)
def resume(self):
"""Resume paused show."""
self.next_step_time = self.machine.clock.get_time()
self._run_next_step(post_events=self.show_config.events_when_resumed)
def update(self, **kwargs):
"""Update show.
Not implemented yet.
"""
# todo
raise NotImplementedError("Show update is not implemented yet. It's "
"coming though...")
# don't forget this when we implement this feature
# self._post_events(['updated'])
def advance(self, steps=1, show_step=None):
"""Manually advance this show to the next step."""
self._remove_delay_handler()
if steps != 1:
self.next_step_index += steps - 1
elif show_step is not None:
if not isinstance(show_step, int) or show_step < 0:
raise AssertionError('Cannot advance {} to step "{}" as that is'
'not a valid step number.'.format(self, show_step))
self.next_step_index = show_step - 1
self._run_next_step(post_events=self.show_config.events_when_advanced)
def step_back(self, steps=1):
"""Manually step back this show to a previous step."""
self._remove_delay_handler()
self.next_step_index -= steps + 1
self._run_next_step(post_events=self.show_config.events_when_stepped_back)
def _start_now(self) -> None:
"""Start playing the show."""
if self.start_callback:
self.start_callback()
self.start_callback = None
pause_after_step = not self.start_running
self._run_next_step(post_events=self.show_config.events_when_played,
pause_after_step=pause_after_step)
def _run_next_step(self, post_events=None, pause_after_step=False) -> None:
"""Run the next show step."""
events = []
if post_events:
events.extend(post_events)
if self.next_step_index < 0:
self.next_step_index %= self._total_steps
# if we're at the end of the show
if self.next_step_index >= self._total_steps:
if self.loops > 0:
self.loops -= 1
self.next_step_index = 0
if self.show_config.events_when_looped:
events.extend(self.show_config.events_when_looped)
elif self.loops < 0:
self.next_step_index = 0
if self.show_config.events_when_looped:
events.extend(self.show_config.events_when_looped)
else:
self.stop()
if self.show_config.events_when_completed:
events.extend(self.show_config.events_when_completed)
self._post_events(events)
return
self.current_step_index = self.next_step_index
for item_type, item_dict in self.show_steps[self.current_step_index].items():
if item_type == 'duration':
continue
try:
player = self.machine.show_controller.show_players[item_type]
except KeyError:
raise ValueError("Invalid entry in show: {}".format(item_type))
player.show_play_callback(
settings=item_dict,
context=self.context,
calling_context=self.current_step_index,
priority=self.show_config.priority,
show_tokens=self.show_config.show_tokens,
start_time=self.next_step_time)
self._players.add(item_type)
if events:
self._post_events(events)
self.next_step_index += 1
time_to_next_step = self.show_steps[self.current_step_index]['duration'] / self.show_config.speed
if not self.show_config.manual_advance and time_to_next_step > 0 and not pause_after_step:
self.next_step_time += time_to_next_step
self._delay_handler = self.machine.clock.loop.call_at(when=self.next_step_time,
callback=self._run_next_step)
|
#!/usr/bin/python
################################################################################
# 20cb92a6-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20cb92a6-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = True
# Get Accounts
usernames = cli.get_secedit_account('NewAdministratorName')
# Output Lines
self.output = [("NewAdministratorName=")] + usernames
# Banned Usernames
banned_usernames = ("Administrator")
for user in usernames:
if user.lower().strip('"') in banned_usernames.lower():
self.is_compliant = False
return self.is_compliant
|
import click
import concurrent.futures
import sqlite_utils
from sqlite_utils.db import OperationalError
try:
import osxphotos
except ImportError:
osxphotos = None
import sqlite3
import boto3
import json
import pathlib
from .utils import (
calculate_hash,
image_paths,
CONTENT_TYPES,
get_all_keys,
osxphoto_to_row,
osxphoto_to_score_row,
to_uuid,
s3_upload,
hash_and_size_path,
)
@click.group()
@click.version_option()
def cli():
"Save details of your photos to a SQLite database and upload them to S3"
@cli.command(name="s3-auth")
@click.option(
"-a",
"--auth",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
default="auth.json",
help="Path to save tokens to, defaults to auth.json",
)
def s3_auth(auth):
"Save S3 credentials to a JSON file"
click.echo("Create S3 credentials and paste them here:")
click.echo()
bucket = click.prompt("S3 bucket")
access_key_id = click.prompt("Access key ID")
secret_access_key = click.prompt("Secret access key")
s3_endpoint = click.prompt("S3 Endpoint (Press ENTER for default)", default="")
if pathlib.Path(auth).exists():
auth_data = json.load(open(auth))
else:
auth_data = {}
auth_data.update(
{
"photos_s3_bucket": bucket,
"photos_s3_endpoint": s3_endpoint or None,
"photos_s3_access_key_id": access_key_id,
"photos_s3_secret_access_key": secret_access_key,
}
)
open(auth, "w").write(json.dumps(auth_data, indent=4) + "\n")
@cli.command()
@click.argument(
"db_path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.argument(
"directories",
nargs=-1,
type=click.Path(file_okay=False, dir_okay=True, allow_dash=False),
)
@click.option(
"-a",
"--auth",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=True),
default="auth.json",
help="Path to auth.json token file",
)
@click.option(
"--no-progress", is_flag=True, help="Don't show progress bar",
)
@click.option(
"--dry-run", is_flag=True, help="Don't upload, just show what would happen",
)
def upload(db_path, directories, auth, no_progress, dry_run):
"Upload photos from directories to S3"
creds = json.load(open(auth))
db = sqlite_utils.Database(db_path)
endpoint_url = creds.get("photos_s3_endpoint")
client = boto3.client(
"s3",
endpoint_url=endpoint_url,
aws_access_key_id=creds["photos_s3_access_key_id"],
aws_secret_access_key=creds["photos_s3_secret_access_key"],
)
click.echo("Fetching existing keys from S3...")
existing_keys = {
key.split(".")[0] for key in get_all_keys(client, creds["photos_s3_bucket"])
}
click.echo("Got {:,} existing keys".format(len(existing_keys)))
# Now calculate sizes and hashes for files
paths = list(image_paths(directories))
hash_and_size = {}
hash_bar = None
if not no_progress:
hash_bar = click.progressbar(paths, label="Calculating hashes")
# hashlib docs say: 'For better multithreading performance,the Python GIL is
# released for data larger than 2047 bytes at object creation or on update'
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_path = {
executor.submit(hash_and_size_path, path.resolve()): path for path in paths
}
for future in concurrent.futures.as_completed(future_to_path):
path, sha256, size = future.result()
if hash_bar:
hash_bar.update(1)
hash_and_size[path] = (sha256, size)
hashes = {v[0] for v in hash_and_size.values()}
new_paths = [p for p in hash_and_size if hash_and_size[p][0] not in existing_keys]
click.echo(
"\n{:,} hashed files, {:,} are not yet in S3".format(
len(hashes), len(new_paths)
)
)
uploads = db.table("uploads", pk="sha256")
total_size = None
bar = None
if dry_run or not no_progress:
# Calculate total size first
total_size = sum(hash_and_size[p][1] for p in new_paths)
click.echo(
"{verb} {num:,} files, {total_size:.2f} GB".format(
verb="Would upload" if dry_run else "Uploading",
num=len(new_paths),
total_size=total_size / (1024 * 1024 * 1024),
)
)
bar = click.progressbar(
length=len(new_paths),
label="Uploading {size:,} files".format(size=len(new_paths)),
show_eta=True,
show_pos=True,
)
if dry_run:
return
# Upload photos in a thread pool
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_path = {}
for path in new_paths:
ext = path.suffix.lstrip(".")
sha256, size = hash_and_size[path]
future = executor.submit(s3_upload, path, sha256, ext, creds)
future_to_path[future] = path
for future in concurrent.futures.as_completed(future_to_path):
path = future.result()
sha256, size = hash_and_size[path]
ext = path.suffix.lstrip(".")
uploads.upsert(
{"sha256": sha256, "filepath": str(path), "ext": ext, "size": size}
)
if bar:
bar.update(1)
@cli.command(name="apple-photos")
@click.argument(
"db_path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False),
required=True,
)
@click.option(
"--library",
type=click.Path(file_okay=False, dir_okay=True, allow_dash=False),
help="Location of Photos library to import",
)
@click.option(
"--image-url-prefix",
help="URL prefix of hosted images - suffix will be sha256.ext",
)
@click.option(
"--image-url-suffix", help="URL suffix of hosted images, e.g. ?w=600", default=""
)
def apple_photos(db_path, library, image_url_prefix, image_url_suffix):
"Import photo metadata from Apple Photos"
if osxphotos is None:
raise click.ClickException("Missing dependency osxphotos")
db = sqlite_utils.Database(db_path)
# Ensure index
try:
db["uploads"].create_index(["filepath"])
except OperationalError:
pass
if library:
photosdb = osxphotos.PhotosDB(library)
else:
photosdb = osxphotos.PhotosDB()
db.conn.execute("ATTACH DATABASE '{}' AS attached".format(photosdb._tmp_db))
if "apple_photos_scores" in db.table_names():
db["apple_photos_scores"].drop()
db.conn.execute(
"""
CREATE TABLE apple_photos_scores(
ZUUID TEXT,
ZOVERALLAESTHETICSCORE REAL,
ZCURATIONSCORE REAL,
ZPROMOTIONSCORE REAL,
ZHIGHLIGHTVISIBILITYSCORE REAL,
ZBEHAVIORALSCORE REAL,
ZFAILURESCORE REAL,
ZHARMONIOUSCOLORSCORE REAL,
ZIMMERSIVENESSSCORE REAL,
ZINTERACTIONSCORE REAL,
ZINTERESTINGSUBJECTSCORE REAL,
ZINTRUSIVEOBJECTPRESENCESCORE REAL,
ZLIVELYCOLORSCORE REAL,
ZLOWLIGHT REAL,
ZNOISESCORE REAL,
ZPLEASANTCAMERATILTSCORE REAL,
ZPLEASANTCOMPOSITIONSCORE REAL,
ZPLEASANTLIGHTINGSCORE REAL,
ZPLEASANTPATTERNSCORE REAL,
ZPLEASANTPERSPECTIVESCORE REAL,
ZPLEASANTPOSTPROCESSINGSCORE REAL,
ZPLEASANTREFLECTIONSSCORE REAL,
ZPLEASANTSYMMETRYSCORE REAL,
ZSHARPLYFOCUSEDSUBJECTSCORE REAL,
ZTASTEFULLYBLURREDSCORE REAL,
ZWELLCHOSENSUBJECTSCORE REAL,
ZWELLFRAMEDSUBJECTSCORE REAL,
ZWELLTIMEDSHOTSCORE REAL
);
"""
)
db["apple_photos_scores"].create_index(["ZUUID"])
skipped = []
with click.progressbar(photosdb.photos()) as photos:
for photo in photos:
rows = list(db["uploads"].rows_where("filepath=?", [photo.path]))
if rows:
sha256 = rows[0]["sha256"]
else:
if photo.ismissing:
print("Missing: {}".format(photo))
continue
sha256 = calculate_hash(pathlib.Path(photo.path))
photo_row = osxphoto_to_row(sha256, photo)
db["apple_photos"].insert(
photo_row, pk="uuid", replace=True, alter=True,
)
score_row = osxphoto_to_score_row(photo)
db["apple_photos_scores"].insert(score_row, pk="ZUUID", replace=True, alter=True
)
# Ensure indexes
for column in ("date", "sha256"):
try:
db["apple_photos"].create_index([column])
except OperationalError:
pass
db.create_view(
"photos_with_apple_metadata",
"""
select
apple_photos.rowid,{}
apple_photos.uuid,
apple_photos.date,
apple_photos.albums,
apple_photos.persons,
uploads.ext,
uploads.sha256,
uploads.size,
latitude,
longitude,
favorite,
portrait,
screenshot,
slow_mo,
time_lapse,
hdr,
selfie,
panorama,
place_city,
place_state_province,
place_country,
apple_photos_scores.*
from
apple_photos
join
uploads on apple_photos.sha256 = uploads.sha256
left join
apple_photos_scores on apple_photos.uuid = apple_photos_scores.ZUUID
order by
apple_photos.date desc
""".format(
"""
json_object(
'img_src',
'{}' || uploads.sha256 || '.' || uploads.ext || '{}'
) as photo,""".format(
image_url_prefix, image_url_suffix
)
if image_url_prefix
else ""
),
replace=True,
)
# Last step: import the labels
labels_db_path = photosdb._dbfile_actual.parent / "search" / "psi.sqlite"
if labels_db_path.exists():
labels_db = sqlite3.connect(str(labels_db_path))
if db["labels"].exists():
db["labels"].drop()
def all_labels():
result = labels_db.execute(
"""
select
ga.rowid,
assets.uuid_0,
assets.uuid_1,
groups.rowid as groupid,
groups.category,
groups.owning_groupid,
groups.content_string,
groups.normalized_string,
groups.lookup_identifier
from
ga
join groups on groups.rowid = ga.groupid
join assets on ga.assetid = assets.rowid
order by
ga.rowid
"""
)
cols = [c[0] for c in result.description]
for row in result.fetchall():
record = dict(zip(cols, row))
id = record.pop("rowid")
uuid = to_uuid(record.pop("uuid_0"), record.pop("uuid_1"))
# Strip out the `\u0000` characters:
for key in record:
if isinstance(record[key], str):
record[key] = record[key].replace("\x00", "")
yield {"id": id, "uuid": uuid, **record}
db["labels"].insert_all(all_labels(), pk="id", replace=True)
db["labels"].create_index(["uuid"], if_not_exists=True)
db["labels"].create_index(["normalized_string"], if_not_exists=True)
@cli.command(name="create-subset")
@click.argument(
"db_path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False, exists=True),
)
@click.argument(
"new_db_path",
type=click.Path(file_okay=True, dir_okay=False, allow_dash=False, exists=False),
)
@click.argument("sql",)
def create_subset(db_path, new_db_path, sql):
"Create a new subset database of photos with sha256 matching those returned by this SQL query"
db = sqlite_utils.Database(db_path)
new_db = sqlite_utils.Database(new_db_path)
# Use the schema from the old database to create tables in the new database
for result in db.conn.execute(
"select sql from sqlite_master where sql is not null"
):
new_db.conn.execute(result[0])
# Figure out the photos to copy across
sha256s = [r[0] for r in db.conn.execute(sql).fetchall()]
# Copy across apple_photos, apple_photos_scores, uploads
db.conn.execute("ATTACH DATABASE '{}' AS [{}]".format(str(new_db_path), "newdb"))
# First apple_photos
with db.conn:
sql = """
INSERT INTO
newdb.apple_photos
SELECT * FROM apple_photos WHERE sha256 in ({})
""".format(
", ".join("'{}'".format(sha256) for sha256 in sha256s)
)
db.conn.execute(sql)
# Now the other tables
for sql in (
"""
INSERT INTO
newdb.apple_photos_scores
SELECT * FROM apple_photos_scores WHERE ZUUID in (select uuid from newdb.apple_photos)
""",
"""INSERT INTO
newdb.labels
SELECT * FROM labels WHERE uuid in (select uuid from newdb.apple_photos)""",
"""
INSERT INTO
newdb.uploads
SELECT * FROM uploads WHERE sha256 in (select sha256 from newdb.apple_photos)
""",
):
with db.conn:
db.conn.execute(sql)
|
import argparse
import json
import logging
import os
import random
from io import open
import math
import sys
import pandas as pd
import requests
from time import gmtime, strftime
from timeit import default_timer as timer
import numpy as np
from tqdm import tqdm, trange
import torch
from torch.utils.data import DataLoader, Dataset, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from tensorboardX import SummaryWriter
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
import utils as utils
import torch.distributed as dist
def create_dataset_files(name, dataset_path):
annotations = []
captions = []
images = []
labels = ['Visible', 'Subjective', 'Action', 'Story', 'Meta', 'Irrelevant', 'Other', 'When', 'Where', 'How',
'Comment']
data = pd.read_csv(dataset_path + 'train.csv', sep=',')
counter = 0
for index, row in data.iterrows():
if not image_found(index, row['url']):
continue
captions.append(row['caption'])
images.append(row['url'])
instance_annotations = []
for l in labels:
instance_annotations.append(row[l])
annotations.append(instance_annotations)
annotations = np.array(annotations)
df_annotations = pd.DataFrame(annotations, columns=list(labels))
captions = np.array(captions)
images = np.array(images)
# np.save('all_annotations.npy', annotations)
df_annotations.to_csv(dataset_path + 'all_annotations_{}.csv'.format(name))
np.save(dataset_path + 'all_captions_{}.npy'.format(name), captions)
np.save(dataset_path + 'all_images_{}.npy'.format(name), images)
return annotations, captions, images
#
def image_found(ind, pic_url):
image_formats = ("image/png", "image/jpeg", "image/jpg")
try:
response = requests.get(pic_url, stream=True, timeout=60)
if response.headers["content-type"] in image_formats:
return True
else:
print(pic_url)
print("image with index: {} is not found \n".format(ind))
return False
except:
print(pic_url)
print("image with index: {} is not found \n".format(ind))
return False
def download_images(images_url, dir_url, output_path, train_test):
labels_headers = ["Visible", 'Subjective', 'Action', 'Story', 'Meta', 'Irrelevant', 'Other']
all_captions = np.load(dir_url + "all_captions.npy", allow_pickle=True)
# all_captions = np.load(dir_url + "all_captions_{}.npy".format(train_test), allow_pickle=True)
all_captions = all_captions.ravel()
# all_targets = pd.read_csv( dir_url + "all_annotations_{}.csv".formats(train_test), index_col= 0)
all_targets = pd.read_csv(dir_url + "all_annotations.csv", index_col=0)
print(len(all_captions))
print(len(images_url))
captions = {}
targets = {}
for ind, pic_url in enumerate(images_url):
# pic_url = images_url[10]
# print(pic_url)
image_formats = ("image/png", "image/jpeg", "image/jpg")
with open(output_path + '/pic{}.jpg'.format(ind), 'wb') as handle:
try:
response = requests.get(pic_url, stream=True, timeout=60)
if response.headers["content-type"] in image_formats:
None
else:
print("image with index: {} is not found \n".format(ind))
print(False)
captions['pic{}'.format(ind)] = all_captions[ind]
targets['pic{}'.format(ind)] = {}
for l in labels_headers:
targets['pic{}'.format(ind)][l] = str(all_targets.at[ind,l])
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
except:
print(images_url[ind])
print(response)
print(all_captions[ind])
print("[INFO] error downloading {}...skipping".format(ind))
with open( dir_url + 'captions_all_json.json', 'w') as outfile:
json.dump(captions, outfile)
with open( dir_url + 'all_targets_json.json', 'w') as outfile:
json.dump(targets, outfile)
def convert_target_json(target_json_file, main_annotation_csv, ):
target_json = json.load(open(target_json_file,'r'))
target_main = pd.read_csv(main_annotation_csv, index_col = 0)
for image_id in target_json:
id = image_id[3:]
id = int (id)
target_json[image_id]['Other'] = target_main['Other'][id]
with open( 'all_targets_json.json', 'w') as outfile:
json.dump(target_json, outfile)
def LoadDatasets(args, task_cfg, ids, split="trainval"):
tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased", do_lower_case=True
)
task_feature_reader1 = {}
task_feature_reader2 = {}
for i, task_id in enumerate(ids):
task = "TASK" + task_id + "1"
if task_cfg[task]["features_h5path1"] not in task_feature_reader1:
task_feature_reader1[task_cfg[task]["features_h5path1"]] = None
if task_cfg[task]["features_h5path2"] not in task_feature_reader2:
task_feature_reader2[task_cfg[task]["features_h5path2"]] = None
# initilzie the feature reader
# for features_h5path in task_feature_reader1.keys():
# if features_h5path != "":
# task_feature_reader1[features_h5path] = ImageFeaturesH5Reader(
# features_h5path, args.in_memory
# )
# for features_h5path in task_feature_reader2.keys():
# if features_h5path != "":
# task_feature_reader2[features_h5path] = ImageFeaturesH5Reader(
# features_h5path, args.in_memory
# )
#
# task_datasets_train = {}
# task_datasets_val = {}
# task_dataloader_train = {}
# task_dataloader_val = {}
# task_ids = []
# task_batch_size = {}
# task_num_iters = {}
#
# for i, task_id in enumerate(ids):
# task = "TASK" + task_id
# task_name = task_cfg[task]["name"]
# task_ids.append(task)
# batch_size = task_cfg[task]["batch_size"] // args.gradient_accumulation_steps
# num_workers = args.num_workers
# if args.local_rank != -1:
# batch_size = int(batch_size / dist.get_world_size())
# num_workers = int(num_workers / dist.get_world_size())
#
# # num_workers = int(num_workers / len(ids))
# logger.info(
# "Loading %s Dataset with batch size %d"
# % (task_cfg[task]["name"], batch_size)
# )
#
# task_datasets_train[task] = None
# if "train" in split:
# task_datasets_train[task] = DatasetMapTrain[task_name](
# task=task_cfg[task]["name"],
# dataroot=task_cfg[task]["dataroot"],
# annotations_jsonpath=task_cfg[task]["train_annotations_jsonpath"],
# split=task_cfg[task]["train_split"],
# image_features_reader=task_feature_reader1[
# task_cfg[task]["features_h5path1"]
# ],
# gt_image_features_reader=task_feature_reader2[
# task_cfg[task]["features_h5path2"]
# ],
# tokenizer=tokenizer,
# bert_model=args.bert_model,
# clean_datasets=args.clean_train_sets,
# padding_index=0,
# max_seq_length=task_cfg[task]["max_seq_length"],
# max_region_num=task_cfg[task]["max_region_num"],
# )
#
# task_datasets_val[task] = None
# if "val" in split:
# task_datasets_val[task] = DatasetMapTrain[task_name](
# task=task_cfg[task]["name"],
# dataroot=task_cfg[task]["dataroot"],
# annotations_jsonpath=task_cfg[task]["val_annotations_jsonpath"],
# split=task_cfg[task]["val_split"],
# image_features_reader=task_feature_reader1[
# task_cfg[task]["features_h5path1"]
# ],
# gt_image_features_reader=task_feature_reader2[
# task_cfg[task]["features_h5path2"]
# ],
# tokenizer=tokenizer,
# bert_model=args.bert_model,
# clean_datasets=args.clean_train_sets,
# padding_index=0,
# max_seq_length=task_cfg[task]["max_seq_length"],
# max_region_num=task_cfg[task]["max_region_num"],
# )
#
# task_num_iters[task] = 0
# task_batch_size[task] = 0
# if "train" in split:
# if args.local_rank == -1:
# train_sampler = RandomSampler(task_datasets_train[task])
# else:
# # TODO: check if this works with current data generator from disk that relies on next(file)
# # (it doesn't return item back by index)
# train_sampler = DistributedSampler(task_datasets_train[task])
#
# task_dataloader_train[task] = DataLoader(
# task_datasets_train[task],
# sampler=train_sampler,
# batch_size=batch_size,
# num_workers=num_workers,
# pin_memory=True,
# )
#
# task_num_iters[task] = len(task_dataloader_train[task])
# task_batch_size[task] = batch_size
#
# if "val" in split:
# task_dataloader_val[task] = DataLoader(
# task_datasets_val[task],
# shuffle=False,
# batch_size=batch_size,
# num_workers=2,
# pin_memory=True,
# )
#
# return (
# task_batch_size,
# task_num_iters,
# task_ids,
# task_datasets_train,
# task_datasets_val,
# task_dataloader_train,
# task_dataloader_val,
# )
def create_train_test(datapath):
data = pd.read_csv(datapath, sep='\t')
labels_headers = ["Visible", 'Subjective', 'Action', 'Story', 'Meta', 'Irrelevant']
X = [i for i in range(data.shape[0])]
X = np.array(X)
y = data[labels_headers].values
(X_train, y_train), (X_test, y_test) = train_test_split(X,y)
train = data.iloc[X_train]
test = data.iloc[X_test]
train.to_csv('train.csv')
test.to_csv('test.csv')
def train_test_split(X, y, test_size =0.33):
from iterstrat.ml_stratifiers import MultilabelStratifiedShuffleSplit
msss = MultilabelStratifiedShuffleSplit(n_splits=2, test_size=0.33, random_state=0)
for train_index, test_index in msss.split(X, y):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
return (X_train, y_train), (X_test, y_test)
if __name__ == '__main__':
print("here")
# create_dataset_files('train', '/Users/sinamalakouti/PycharmProjects/vilbert-multi-task/data/discoursedata/train/')
# create_train_test('/Users/sinamalakouti/PycharmProjects/DiscourseRelationProject/data/dataset123/data-both-04-08-cleaned.tsv')
# images = np.load("../data/discoursedata/test/all_images_test.npy").ravel()
# print(images)
# bert_model = "bert-base-uncased"
# bert_weight_name = json.load(
# open("./../config/" + bert_model + "_weight_name.json", "r")
# )
# task_batch_size, task_num_iters, task_ids, task_datasets_train, task_datasets_val, task_dataloader_train, task_dataloader_val = LoadDatasets(
# '', "task_cfg", ""
# )
# print(an.shape)
# print(cap.shape)
# print(images.shape)
# print(images)
print("ji")
print(os.path.abspath("./data/discoursedata/train/all_images.npy"))
images = np.load("./data/discoursedata/train/all_images.npy").ravel()
download_images(images,"./data/discoursedata/train/", "./data/discoursedata/train/images",'train')
|
import taxcalc
|
#
# Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import argparse
import os
import pathlib
import re
import sys
import subprocess
import time
import logging
CURRENT_PATH = pathlib.Path(os.path.dirname(__file__)).absolute()
# The engine root is based on the location of this file (<ENGINE_ROOT>/scripts/build/Platform/Android). Walk up to calculate the engine root
ENGINE_ROOT = CURRENT_PATH.parents[3]
class AndroidEmuError(Exception):
pass
def get_android_sdk_path():
try:
android_sdk_path = pathlib.Path(os.getenv('LY_ANDROID_SDK'))
if not android_sdk_path:
raise AndroidEmuError(f"LY_ANDROID_SDK environment variable is not set")
if not android_sdk_path.is_dir():
raise AndroidEmuError(f"Android SDK Path ('{android_sdk_path}') set with the LY_ANDROID_SDK variable is invalid")
#TODO: Sanity check on necessary files
return android_sdk_path
except Exception as err:
raise AndroidEmuError(f"Unable to determine android SDK path: {err}")
class Command(object):
def __init__(self, tool_name, tool_path, run_as_shell=True):
if not tool_path.is_file():
raise AndroidEmuError(f"Invalid path for {tool_name}. Cannot find ('{tool_path.absolute()}')")
self.tool_path = tool_path
self.run_as_shell = run_as_shell
def run_return_output(self, cmd_args):
args = [str(self.tool_path)]
if isinstance(cmd_args, str):
args.append(cmd_args)
elif isinstance(cmd_args, list):
args.extend(cmd_args)
else:
assert False, "run_return_output argument must be a string or list of strings"
full_cmd = subprocess.list2cmdline(args)
logging.debug(f"run_return_output: {full_cmd}")
run_result = subprocess.run(args,
capture_output=True,
encoding='UTF-8',
errors='ignore',
shell=self.run_as_shell)
if run_result.returncode != 0:
raise AndroidEmuError(f"Error executing command '{full_cmd}' (return code {run_result.returncode}): {run_result.stderr}")
return run_result.stdout
def run(self, cmd_args, cwd=None, suppress_output=False):
args = [str(self.tool_path)]
if isinstance(cmd_args, str):
args.append(cmd_args)
elif isinstance(cmd_args, list):
args.extend(cmd_args)
else:
assert False, "run_return_output argument must be a string or list of strings"
full_cmd = subprocess.list2cmdline(args)
logging.debug(f"run: {full_cmd}")
run_result = subprocess.run(args,
#stdout=subprocess.DEVNULL if suppress_output else subprocess.STDOUT,
capture_output=False,
shell=self.run_as_shell,
cwd=cwd)
if run_result.returncode != 0:
raise AndroidEmuError(f"Error executing command '{full_cmd}' (return code {run_result.returncode}): {run_result.stderr}")
def run_process(self, cmd_args):
args = [str(self.tool_path)]
if isinstance(cmd_args, str):
args.append(cmd_args)
elif isinstance(cmd_args, list):
args.extend(cmd_args)
else:
assert False, "run_return_output argument must be a string or list of strings"
full_cmd = subprocess.list2cmdline(args)
logging.debug(f"run_process: {full_cmd}")
process = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP | subprocess.NORMAL_PRIORITY_CLASS |
subprocess.CREATE_NO_WINDOW,
encoding='UTF-8',
errors='ignore')
return process
class AndroidEmulatorManager(object):
UNIT_TEST_AVD_NAME = "LY_UNITTEST_AVD"
UNIT_TEST_SYSTEM_IMAGE_PACKAGE = "android-30;google_apis;x86_64"
UNIT_TEST_DEVICE_TEMPLATE_NAME = "pixel_xl"
UNIT_TEST_DEVICE_SETTINGS_MAP = {
"disk.dataPartition.size": "32G",
"vm.heapSize": "1024",
"hw.ramSize": "2048",
"hw.sdCard": "no"
}
EMULATOR_STARTUP_TIMEOUT_SECS = 60*5 # Set the emulator startup timeout to 5 minutes
def __init__(self, base_android_sdk_path, hide_emulator_windows=True, force_avd_creation=False, emulator_startup_timeout=EMULATOR_STARTUP_TIMEOUT_SECS):
self.android_sdk_path = base_android_sdk_path
self.force_avd_creation = force_avd_creation
self.unit_test_avd_name = AndroidEmulatorManager.UNIT_TEST_AVD_NAME
self.unit_test_device_template_name = AndroidEmulatorManager.UNIT_TEST_DEVICE_TEMPLATE_NAME
self.unit_test_device_settings_map = AndroidEmulatorManager.UNIT_TEST_DEVICE_SETTINGS_MAP
self.unit_test_avd_system_image = AndroidEmulatorManager.UNIT_TEST_SYSTEM_IMAGE_PACKAGE
self.hide_emulator_windows = hide_emulator_windows
self.emulator_startup_timeout = emulator_startup_timeout
self.emulator_cmd = Command("Emulator", self.android_sdk_path / 'emulator' / 'emulator.exe')
self.avd_manager_cmd = Command("AVD Manager", self.android_sdk_path / 'tools' / 'bin' / 'avdmanager.bat')
self.sdk_manager_cmd = Command("SDK Manager", self.android_sdk_path / 'tools' / 'bin' / 'sdkmanager.bat')
self.adb_cmd = Command("ADB", self.android_sdk_path / 'platform-tools' / 'adb.exe')
def collect_android_sdk_list(self):
"""
Use the SDK Manager to get the list of installed, available, and updateable packages
:return: tuple of 3 lists: installed, available, and updateable packages
"""
result_str = self.sdk_manager_cmd.run_return_output(['--list'])
# the result will be listed out in 3 sections: Installed packages, Available Packages, and Available updates
# and each item is represented by 3 columns separated by a '|' character
installed_packages = []
available_packages = []
available_updates = []
current_append_list = None
for avd_item in result_str.split('\n'):
avd_item_stripped = avd_item.strip()
if not avd_item_stripped:
continue
if '|' not in avd_item_stripped:
if avd_item_stripped.upper() == 'INSTALLED PACKAGES:':
current_append_list = installed_packages
elif avd_item_stripped.upper() == 'AVAILABLE PACKAGES:':
current_append_list = available_packages
elif avd_item_stripped.upper() == 'AVAILABLE UPDATES:':
current_append_list = available_updates
else:
current_append_list = None
continue
item_parts = [split.strip() for split in avd_item_stripped.split('|')]
if len(item_parts) < 3:
continue
elif item_parts[1].upper() in ('VERSION', 'INSTALLED', '-------'):
continue
elif current_append_list is None:
continue
if current_append_list is not None:
current_append_list.append(item_parts)
return installed_packages, available_packages, available_updates
def update_installed_sdks(self):
"""
Run an SDK Manager update to make sure the SDKs are all up-to-date
"""
logging.info(f"Updating android SDK...")
self.sdk_manager_cmd.run(['--update'])
def install_system_package_if_necessary(self):
"""
Make sure that we have the correct system image installed, and install if not
"""
installed_packages, available_packages, _ = self.collect_android_sdk_list()
unit_test_sdk_package_name = f'system-images;{self.unit_test_avd_system_image}'
detected_sdk_package_version = None
for package_line_items in installed_packages:
if package_line_items[0] == unit_test_sdk_package_name:
detected_sdk_package_version = package_line_items[0]
if detected_sdk_package_version:
# Already installed
logging.info(f"Detected installed system image {self.unit_test_avd_system_image} version {detected_sdk_package_version}")
return
# Make sure its an available image to install
detected_available_sdk_package_version = None
for package_line_items in available_packages:
if package_line_items[0] == unit_test_sdk_package_name:
detected_available_sdk_package_version = package_line_items[0]
if not detected_available_sdk_package_version:
raise AndroidEmuError(f"Unable to install required system image {self.unit_test_avd_system_image}, not found by the Android SDK Manager")
# Install the package
logging.info(f"Installing system image {self.unit_test_avd_system_image}...")
self.sdk_manager_cmd.run(['--install', unit_test_sdk_package_name])
logging.info(f"Installed Completed")
def find_device_id_by_name(self, device_name):
"""
Find a device id (from AVD Manager) by the device name
:param device_name: Name to lookup
:return: The device id
"""
result_str = self.avd_manager_cmd.run_return_output(['list', 'device'])
result_lines = [result_line.strip() for result_line in result_str.split('\n')]
result_line_count = len(result_lines)
current_index = 0
device_to_id_map = {}
while current_index < result_line_count:
current_line = result_lines[current_index]
current_index += 1
# This assumes the pattern "id: <id> or "<device name>"
if current_line.startswith('id:') and 'or' in current_line:
id_and_name_combo = current_line.split('or')
id_and_value_combo = id_and_name_combo[0].split(' ')
name = id_and_name_combo[1].replace('"', '').strip().upper()
id = id_and_value_combo[1]
device_to_id_map[name] = id
if current_line.startswith('Available Android targets:'):
break
device_id = device_to_id_map.get(device_name.upper())
if not device_id:
raise AndroidEmuError(f"Unable to locate device id for '{device_name}'")
return device_id
def query_installed_avds(self):
"""
Get maps of all valid and invalid AVDs installed on the current system
:return: tuple of 2 maps (AVD Name -> Path): Valid and invalid
"""
result_str = self.avd_manager_cmd.run_return_output(['list', 'avd'])
result_lines = [result_line.strip() for result_line in result_str.split('\n')]
line_count = len(result_lines)
current_index = 0
current_name = None
current_path = None
valid_avd_to_path_map = {}
invalid_avd_to_path_map = {}
current_avd_to_path_map = valid_avd_to_path_map
while current_index < line_count:
current_line = result_lines[current_index]
current_index += 1
if current_line.startswith('Name:'):
name = current_line[6:].strip()
if current_name is not None:
current_avd_to_path_map[current_name] = current_path
current_path = None
current_name = name
elif current_line.startswith('Path:'):
current_path = current_line[6:].strip()
elif current_line.startswith('Device:'):
pass
elif 'could not be loaded:' in current_line:
if current_name is not None:
current_avd_to_path_map[current_name] = current_path
current_avd_to_path_map = invalid_avd_to_path_map
current_path = None
current_name = None
if current_name is not None:
current_avd_to_path_map[current_name] = current_path
return valid_avd_to_path_map, invalid_avd_to_path_map
def create_unitest_avd(self):
"""Create the unit test AVD"""
self.install_system_package_if_necessary()
device_id = self.find_device_id_by_name(self.unit_test_device_template_name)
self.avd_manager_cmd.run(['--silent',
'create', 'avd',
'--name', self.unit_test_avd_name,
'--package', f'system-images;{self.unit_test_avd_system_image}',
'--device', device_id])
valid_avd_map, _ = self.query_installed_avds()
unit_test_avd_path = valid_avd_map.get(self.unit_test_avd_name)
if not unit_test_avd_path:
raise AndroidEmuError(f"Unable to create unit test AVD {self.unit_test_avd_name}")
unit_test_avd_config_path = pathlib.Path(unit_test_avd_path) / 'config.ini'
if not unit_test_avd_config_path.is_file():
raise AndroidEmuError(f"Unable to create unit test AVD {self.unit_test_avd_name}: The expected config file '{unit_test_avd_config_path}' does not exist.")
config_content_full = unit_test_avd_config_path.read_text(encoding='UTF-8', errors='ignore')
for item, value in self.unit_test_device_settings_map.items():
regex_friendly_str = item.replace('.', '\\.')
repl_pattern = f"{regex_friendly_str}\\s*=\\s*[\\d]+"
repl_value = f"{item}={value}"
if re.search(repl_pattern, config_content_full):
config_content_full = re.sub(repl_pattern, repl_value, config_content_full)
else:
if not config_content_full.endswith('\n'):
config_content_full += '\n'
config_content_full += f"{repl_value}\n"
unit_test_avd_config_path.write_text(config_content_full)
def query_emulator_device_id(self):
result_str = self.adb_cmd.run_return_output(['devices', '-l'])
emulators = []
for result_line in result_str.split('\n'):
if not result_line.startswith('emulator-'):
continue
emulator = result_line[:result_line.find(' ')].strip()
emulators.append(emulator)
if len(emulators) > 1:
logging.warning(f"Found multiple emulators connect ({','.join(emulators)}). Defaulting to {emulators[0]}")
return emulators[0] if len(emulators) > 0 else None
def install_unit_test_avd(self):
"""
Install the unit test AVD (Android Virtual Device)
"""
valid_avd_map, invalid_avd_map = self.query_installed_avds()
if not self.unit_test_avd_name in valid_avd_map:
create_avd = True
elif self.force_avd_creation or self.unit_test_avd_name in invalid_avd_map:
logging.info(f"Deleting AVD {self.unit_test_avd_name}..")
self.avd_manager_cmd.run(['delete', 'avd', '--name', self.unit_test_avd_name])
create_avd = True
else:
create_avd = False
if create_avd:
self.create_unitest_avd()
def uninstall_unit_test_avd(self):
"""
Uninstall the unit test AVD
"""
logging.info(f"Uninstalling AVD {self.unit_test_avd_name}..")
self.avd_manager_cmd.run(['delete', 'avd', '--name', self.unit_test_avd_name])
def launch_emulator_process(self):
"""
Launch the emulator process for the unit test avd and return the process handle and its device id
:return: tuple of the process handle and the device id for the emulator
"""
emulator_device_id = None
process = None
try:
# Launch the emulator process
emulator_process_args = [
"-avd",
self.unit_test_avd_name
]
if self.hide_emulator_windows:
emulator_process_args.append("-no-window")
process = self.emulator_cmd.run_process(emulator_process_args)
# Wait for the emulator to signal that its bootup is complete
boot_completed = False
start_time = time.time()
timeout_secs = 360
while process.poll() is None:
elapsed_time = time.time() - start_time
if elapsed_time > timeout_secs > 0:
break
line = process.stdout.readline()
print(line, end='')
if "boot completed" in line:
boot_completed = True
break
if not boot_completed:
raise AndroidEmuError("Bootup of emulator timed out")
# query ADB to get the emulator ID
emulator_device_id = self.query_emulator_device_id()
return process, emulator_device_id
except Exception:
if process:
if emulator_device_id:
self.terminate_emulator_process(emulator_device_id)
else:
process.kill()
raise
def terminate_emulator_process(self, device_id):
# Terminate the emulator
kill_emu_args = [
'-s', device_id,
'emu', 'kill'
]
self.adb_cmd.run(kill_emu_args)
def run_emulation_process(self, process_func):
"""
Execute a function that relies on the session based android simulator.
:param process_func: The process function to execute. Function requires one argument which will be the device id
:return: The return value of the process function
"""
emulator_device_id = None
try:
emulator_process, emulator_device_id = self.launch_emulator_process()
return process_func(emulator_device_id)
finally:
if emulator_device_id is not None:
self.terminate_emulator_process(emulator_device_id)
def process_unit_test_on_simulator(base_android_sdk_path, build_path, build_config):
"""
Run the android unit tests on a sessioned simulator
:param base_android_sdk_path: The path to where the Android SDK exists
:param build_path: The build path relative to the engine root where the android unit test project is configured and built
:param build_config: The configuration of the build unit test APK to run
"""
python_cmd = Command("Python", ENGINE_ROOT / 'python' / 'python.cmd')
android_script_root = ENGINE_ROOT / 'cmake' / 'Tools' / 'Platform' / 'Android'
assert android_script_root.is_dir(), "Missing the android scripts path in the engine folder hierarchy"
deploy_android_py_path = android_script_root / 'deploy_android.py'
assert deploy_android_py_path.is_file(), "Missing the android deployment script in the engine folder hierarchy"
launch_android_ptest_py_path = android_script_root / 'launch_android_test.py'
assert launch_android_ptest_py_path.is_file(), "Missing the android unit test launcher script in the engine folder hierarchy"
def _install_and_run_unit_tests(emulator_id):
# install unit test on the emulator
install_apk_args = [
str(deploy_android_py_path),
'-b', build_path,
'-c', build_config,
'--device-id-filter', emulator_id,
'--clean'
]
python_cmd.run(cmd_args=install_apk_args,
cwd=os.path.normpath(str(ENGINE_ROOT)))
try:
# Launch the unit test on the emulator
launch_apk_args = [
str(launch_android_ptest_py_path),
'-b', build_path,
'-c', build_config,
'--device-serial', emulator_id
]
python_cmd.run(cmd_args=launch_apk_args,
cwd=os.path.normpath(str(ENGINE_ROOT)))
return True
except AndroidEmuError:
print("\n\n")
raise AndroidEmuError("Unit Tests Failed")
# Prepare the emulator manager
manager = AndroidEmulatorManager(base_android_sdk_path=base_android_sdk_path,
force_avd_creation=True)
# Make sure that the android SDK is up to date
manager.update_installed_sdks()
# First Install or overwrite the unit test emulator
manager.install_unit_test_avd()
# Run the emulator-dependent process based on the session AVD created by the manager
manager.run_emulation_process(_install_and_run_unit_tests)
# Uninstall the AVD when done
manager.uninstall_unit_test_avd()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Install and an android unit test APK on a android simulator.")
parser.add_argument('--android-sdk-path',
help='Path to the Android SDK')
parser.add_argument('--build-path',
help='The build path (relative to the engine root) where the project was generated and the APK is built',
required=True)
parser.add_argument('--build-config',
help='The build config of the built APK',
required=True)
parser.add_argument('--debug',
help='Enable debug messages from this script',
action="store_true")
parsed_args = parser.parse_args(sys.argv[1:])
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG if parsed_args.debug else logging.INFO)
try:
base_android_sdk_path = pathlib.Path(parsed_args.android_sdk_path) if parsed_args.android_sdk_path else get_android_sdk_path()
process_unit_test_on_simulator(base_android_sdk_path=base_android_sdk_path,
build_path=parsed_args.build_path,
build_config=parsed_args.build_config)
exit(0)
except AndroidEmuError as e:
print(e)
exit(1)
|
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger('main_logger')
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
import os
def get_update_df(conf):
"""
From the direction of the dataset it clean it and return train and validation dataset for the training processes
"""
df = pd.read_csv(conf['dir_df'])
is_normal_df = df.groupby("image_id")["class_id"].agg(lambda s: (s == 14).sum()).reset_index().rename({
"class_id": "target"}, axis=1)
is_normal_df['target'] = (is_normal_df['target'] / 3).astype('int')
skf = StratifiedKFold(n_splits = conf['n_splits'], random_state = 0, shuffle = True)
folds = is_normal_df.copy()
for f, (tr_idx, val_idx) in enumerate(skf.split(folds, folds.target)):
folds.loc[val_idx, 'fold'] = int(f)
folds['fold'] = folds['fold'].astype(int)
folds.image_id = folds.image_id + ".png"
folds['path'] = [os.path.join(conf['DIR_TRAIN'], x) for x in folds.image_id]
df_train = folds[folds['fold']!=conf['fold']]
df_train = df_train.reset_index(drop=True)
df_val = folds[folds['fold']==conf['fold']]
df_val = df_val.reset_index(drop=True)
return df_train, df_val
conf = {'dir_df':'', 'dir_df_test':'', 'DIR_TRAIN':'', 'DIR_TEST':'', 'dir_submision':'',
'prob_mixup':0.8, 'num_classes':1, 'n_splits':5, 'fold':0,
'dir_best': '', 'dir_last':'', 'batch_size':8, 'num_epochs':15}
|
#!/bin/env python
"""Command-line interface (CLI)
SCL <scott@rerobots.net>
Copyright (c) 2019 rerobots, Inc.
"""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import glob
import json
import os.path
import subprocess
import sys
import tempfile
import uuid
import zipfile
from .__init__ import __version__
from .vm import start_vm
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
argparser = argparse.ArgumentParser(description='a tool for static analysis of Misty skills and offboard Misty REST API clients')
argparser.add_argument('FILE', nargs='*', default=None,
help='zip files or skill meta files')
argparser.add_argument('-V', '--version', dest='print_version',
action='store_true', default=False,
help='print version number and exit.')
argparser.add_argument('--check-deps', dest='check_dependencies',
action='store_true', default=False,
help='check for dependencies, like ESLint.')
argparser.add_argument('--vm', dest='run_vm',
action='store_true', default=False,
help='start Misty mock REST API server')
argparser.add_argument('--vm-port', dest='vm_port', default=8888,
help='if --vm, port on which to listen; default is 8888')
argparser.add_argument('--vm-addr', dest='vm_addr', default='127.0.0.1',
help=('if --vm, address on which to listen; default is 127.0.0.1, '
'also known as localhost'))
argparser.add_argument('--vm-trace', dest='trace_vm',
action='store_true', default=False,
help='echo any requests of unknown method, path to stdout.')
args = argparser.parse_args(argv)
if args.print_version:
print(__version__)
return 0
if args.run_vm:
return start_vm(addr=args.vm_addr, port=args.vm_port, trace_unknown=args.trace_vm)
if args.check_dependencies:
args = ['eslint', '--version']
try:
rc = subprocess.call(args, stdout=subprocess.PIPE)
except OSError:
print('ESLint not found. Try to install it as instructed at\n'
'https://eslint.org/docs/user-guide/getting-started')
print('It might suffice to use Yarn (https://yarnpkg.com/en/):\n\n'
' yarn global add eslint\n')
return 1
if rc != 0:
print('ESLint does not appear to be correctly installed. '
'Compare with instructions at\n'
'https://eslint.org/docs/user-guide/getting-started')
return 1
return 0
if not args.FILE:
files = glob.glob('*.zip')
files += glob.glob('*.ZIP')
else:
files = args.FILE
if len(files) == 0:
print('No files given. Try `-h` to get a usage summary.')
return 1
for ii, name in enumerate(files):
if ii > 0:
print('----')
original_dirname = os.path.dirname(name)
skillname = os.path.basename(name)
if skillname.endswith('.json') or skillname.endswith('.JSON'):
skillname = skillname[:-len('.json')]
elif skillname.endswith('.zip') or skillname.endswith('.ZIP'):
skillname = skillname[:-len('.zip')]
else:
print('ERROR: failed to extract skill name from {}'.format(name))
return 1
print('skill: {}'.format(skillname))
temporary_path = None
try:
fp = zipfile.ZipFile(name, mode='r')
temporary_path = tempfile.mkdtemp()
fp.extractall(path=temporary_path)
fp.close()
except zipfile.BadZipFile:
# not a ZIP file? try to treat as meta file
pass
if temporary_path:
parentpath = temporary_path
else:
parentpath = original_dirname
metafilepath = os.path.join(parentpath, '{}.json'.format(skillname))
if not os.path.exists(metafilepath):
metafilepath = os.path.join(parentpath, '{}.JSON'.format(skillname))
if not os.path.exists(metafilepath):
print('ERROR: no meta file found')
return 1
jsfilepath = os.path.join(parentpath, '{}.js'.format(skillname))
if not os.path.exists(jsfilepath):
jsfilepath = os.path.join(parentpath, '{}.JS'.format(skillname))
if not os.path.exists(jsfilepath):
print('ERROR: no JS file found')
return 1
with open(metafilepath, 'rt') as fp:
try:
skillmeta = json.load(fp)
except ValueError:
print('ERROR: meta file does not contain valid JSON')
return 1
print('comparing `Name` field in meta file with file names...')
if 'Name' not in skillmeta:
print('ERROR: meta file is missing name field')
return 1
if skillmeta['Name'] != skillname:
print('ERROR: unexpected name in meta file')
return 1
print('checking that GUID is well-formed...')
if 'UniqueId' not in skillmeta:
print('ERROR: meta file is missing GUID field')
return 1
try:
uuid.UUID(skillmeta['UniqueId'])
except ValueError:
print('ERROR: not well-formed GUID: {}'.format(skillmeta['UniqueId']))
return 1
print('checking syntax of main JS file...')
eslint_rulespath = os.path.join(os.path.dirname(__file__), 'eslint_rules')
args = ['eslint',
'--no-eslintrc',
'--rulesdir', eslint_rulespath,
'--rule', 'mistyobj-prop: 2',
jsfilepath]
subprocess.check_call(args)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
import pandas as pd
from scipy import ndimage
from sklearn.cluster import DBSCAN
import numpy as np
from scipy import stats as st
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
#import spacepy.plot as splot
#import seaborn as sns
import matplotlib.colors as mcolors
from matplotlib.ticker import MultipleLocator
import numpy as np
import pandas as pd
import datetime as dt
#splot.style("spacepy_altgrid")
font = {"family": "serif", "color": "black", "weight": "normal", "size": 10}
fonttext = {"family": "serif", "color": "blue", "weight": "normal", "size": 10}
from matplotlib import font_manager
ticks_font = font_manager.FontProperties(family="serif", size=10, weight="normal")
matplotlib.rcParams["xtick.color"] = "k"
matplotlib.rcParams["ytick.color"] = "k"
matplotlib.rcParams["xtick.labelsize"] = 7
matplotlib.rcParams["ytick.labelsize"] = 7
matplotlib.rcParams["mathtext.default"] = "default"
def smooth(x, window_len=51, window="hanning"):
if x.ndim != 1: raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len: raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3: return x
if not window in ["flat", "hanning", "hamming", "bartlett", "blackman"]: raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == "flat": w = numpy.ones(window_len,"d")
else: w = eval("np."+window+"(window_len)")
y = np.convolve(w/w.sum(),s,mode="valid")
d = window_len - 1
y = y[int(d/2):-int(d/2)]
return y
def rand_jitter(arr):
stdev = .01*(max(arr)-min(arr))
return arr + np.random.randn(len(arr)) * stdev
def to_midlatitude_gate_summary(rad, df, gate_lims, names, smooth, fname, sb):
""" Plot gate distribution summary """
fig, axes = plt.subplots(figsize=(5,5), nrows=4, ncols=1, sharey="row", sharex=True, dpi=150)
attrs = ["p_l"]
beams = sb[1]
scans = sb[0]
count = sb[2]
xpt = 100./scans
labels = ["Power (dB)"]
for j, attr, lab in zip(range(1), attrs, labels):
ax = axes[j]
ax.scatter(rand_jitter(df.slist), rand_jitter(df[attr]), color="r", s=1)
ax.grid(color="gray", linestyle="--", linewidth=0.3)
ax.set_ylabel(lab, fontdict=font)
ax.set_xlim(0,110)
ax.xaxis.set_minor_locator(MultipleLocator(2))
ax = axes[-3]
ax.xaxis.set_minor_locator(MultipleLocator(2))
ax.scatter(df.groupby(["slist"]).count().reset_index()["slist"], xpt*df.groupby(["slist"]).count().reset_index()["p_l"], color="k", s=3)
ax.grid(color="gray", linestyle="--", linewidth=0.3)
ax.set_ylabel("%Count", fontdict=font)
ax.set_xlim(0,110)
fonttext["color"] = "k"
ax = axes[-2]
ax.scatter(smooth[0], xpt*smooth[1], color="k", s=3)
ax.grid(color="gray", linestyle="--", linewidth=0.3)
ax.xaxis.set_minor_locator(MultipleLocator(2))
ax.set_xlim(0,110)
ax.set_ylabel("<%Count>", fontdict=font)
ax = axes[-1]
ax.xaxis.set_minor_locator(MultipleLocator(2))
ax.grid(color="gray", linestyle="--", linewidth=0.3)
ax.set_xlim(0,110)
ax.set_xlabel("Gate", fontdict=font)
ax.set_ylabel(r"$d_{<\%Count>}$", fontdict=font)
ds = pd.DataFrame()
ds["x"], ds["y"] = smooth[0], smooth[1]
for k in gate_lims.keys():
l, u = gate_lims[k][0], gate_lims[k][1]
du = ds[(ds.x>=l) & (ds.x<=u)]
p = np.append(np.diff(du.y), [0])
p[np.argmax(du.y)] = 0
ax.scatter(du.x, xpt*p, color="k", s=3)
ax.axhline(0, color="b", lw=0.4, ls="--")
ax.scatter(smooth[0], smooth[1], color="r", s=1, alpha=.6)
ax.scatter(ds.x, ds.y, color="b", s=0.6, alpha=.5)
fonttext["size"] = 8
for k, n in zip(gate_lims.keys(), names.keys()):
if k >= 0:
for j in range(len(axes)):
ax = axes[j]
ax.axvline(x=gate_lims[k][0], color="b", lw=0.6, ls="--")
ax.axvline(x=gate_lims[k][1], color="darkgreen", lw=0.6, ls=":")
#ax.text(np.mean(gate_lims[k])/110, 0.7, names[n],
# horizontalalignment="center", verticalalignment="center",
# transform=ax.transAxes, fontdict=fonttext)
fig.suptitle("Rad-%s, %s [%s-%s] UT"%(rad, df.time.tolist()[0].strftime("%Y-%m-%d"),
df.time.tolist()[0].strftime("%H.%M"), df.time.tolist()[-1].strftime("%H.%M")) + "\n" +
r"$Beams=%s, N_{sounds}=%d, N_{gates}=%d$"%(beams, scans, 110), size=12)
fig.savefig(fname, bbox_inches="tight")
plt.close()
return
def beam_gate_boundary_plots(boundaries, clusters, clust_idf, glim, blim, title, fname):
""" Beam gate boundary plots showing the distribution of clusters """
fig, ax = plt.subplots(figsize=(6,4), nrows=1, ncols=1, dpi=240)
ax.set_ylabel("Gates", fontdict=font)
ax.set_xlabel("Beams", fontdict=font)
ax.set_xlim(blim[0]-1, blim[1] + 2)
ax.set_ylim(glim[0], glim[1])
for b in range(blim[0], blim[1] + 1):
ax.axvline(b, lw=0.3, color="gray", ls="--")
boundary = boundaries[b]
for bnd in boundary:
ax.plot([b, b+1], [bnd["lb"], bnd["lb"]], ls="--", color="b", lw=0.5)
ax.plot([b, b+1], [bnd["ub"], bnd["ub"]], ls="--", color="g", lw=0.5)
#ax.scatter([b+0.5], [bnd["peak"]], marker="*", color="k", s=3)
for x in clusters.keys():
C = clusters[x]
for _c in C:
if clust_idf is None: ax.text(_c["bmnum"]+(1./3.), (_c["ub"]+_c["lb"])/2, "%02d"%int(x),
horizontalalignment="center", verticalalignment="center",fontdict=fonttext)
else: ax.text(_c["bmnum"]+(1./3.), (_c["ub"]+_c["lb"])/2, clust_idf[x],
horizontalalignment="center", verticalalignment="center",fontdict=fonttext)
ax.axvline(b+1, lw=0.3, color="gray", ls="--")
ax.set_title(title)
ax.set_xticks(np.arange(blim[0], blim[1] + 1) + 0.5)
ax.set_xticklabels(np.arange(blim[0], blim[1] + 1))
fig.savefig(fname, bbox_inches="tight")
return
def cluster_stats(df, cluster, fname, title):
fig, axes = plt.subplots(figsize=(4,8), nrows=3, ncols=1, dpi=120, sharey=True)
v, w, p = [], [], []
for c in cluster:
v.extend(df[(df.bmnum==c["bmnum"]) & (df.slist>=c["lb"]) & (df.slist>=c["lb"])].v.tolist())
w.extend(df[(df.bmnum==c["bmnum"]) & (df.slist>=c["lb"]) & (df.slist>=c["lb"])].w_l.tolist())
p.extend(df[(df.bmnum==c["bmnum"]) & (df.slist>=c["lb"]) & (df.slist>=c["lb"])].p_l.tolist())
ax = axes[0]
v, w, p = np.array(v), np.array(w), np.array(p)
v[v<-1000] = -1000
v[v>1000] = 1000
l, u = np.quantile(v,0.1), np.quantile(v,0.9)
ax.axvline(np.sign(l)*np.log10(abs(l)), ls="--", lw=1., color="r")
ax.axvline(np.sign(u)*np.log10(abs(u)), ls="--", lw=1., color="r")
ax.hist(np.sign(v)*np.log10(abs(v)), bins=np.linspace(-3,3,101), histtype="step", density=False)
ax.set_ylabel("Counts")
ax.set_xlabel(r"V, $ms^{-1}$")
ax.set_xlim([-3,3])
ax.set_xticklabels([-1000,-100,-10,1,10,100,1000])
ax.text(0.7, 0.7, r"$V_{\mu}$=%.1f, $\hat{V}$=%.1f"%(np.mean(v[(v>l) & (v<u)]), np.median(v[(v>l) & (v<u)])) + "\n"\
+ r"$V_{max}$=%.1f, $V_{min}$=%.1f"%(np.max(v[(v>l) & (v<u)]), np.min(v[(v>l) & (v<u)])) + "\n"\
+ r"$V_{\sigma}$=%.1f, n=%d"%(np.std(v[(v>l) & (v<u)]),len(v[(v>l) & (v<u)])),
horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, fontdict={"size":8})
ax = axes[1]
w[w>100]=100
w[w<-100] = 100
l, u = np.quantile(w,0.1), np.quantile(w,0.9)
ax.axvline(l, ls="--", lw=1., color="r")
ax.axvline(u, ls="--", lw=1., color="r")
ax.hist(w, bins=range(-100,100,1), histtype="step", density=False)
ax.set_xlabel(r"W, $ms^{-1}$")
ax.set_xlim([-100,100])
ax.set_ylabel("Counts")
ax.text(0.75, 0.8, r"$W_{\mu}$=%.1f, $\hat{W}$=%.1f"%(np.mean(w[(w>l) & (w<u)]), np.median(w[(w>l) & (w<u)])) + "\n"\
+ r"$W_{max}$=%.1f, $W_{min}$=%.1f"%(np.max(w[(w>l) & (w<u)]), np.min(w[(w>l) & (w<u)])) + "\n"\
+ r"$W_{\sigma}$=%.1f, n=%d"%(np.std(w[(w>l) & (w<u)]),len(w[(w>l) & (w<u)])),
horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, fontdict={"size":8})
ax = axes[2]
p[p>30]=30
l, u = np.quantile(p,0.1), np.quantile(p,0.9)
ax.axvline(l, ls="--", lw=1., color="r")
ax.axvline(u, ls="--", lw=1., color="r")
ax.hist(p, bins=range(0,30,1), histtype="step", density=False)
ax.set_xlabel(r"P, $dB$")
ax.set_xlim([0,30])
ax.set_ylabel("Counts")
ax.text(0.75, 0.8, r"$P_{\mu}$=%.1f, $\hat{P}$=%.1f"%(np.mean(p[(p>l) & (p<u)]), np.median(p[(p>l) & (p<u)])) + "\n"\
+ r"$P_{max}$=%.1f, $P_{min}$=%.1f"%(np.max(p[(p>l) & (p<u)]), np.min(p[(p>l) & (p<u)])) + "\n"\
+ r"$P_{\sigma}$=%.1f, n=%d"%(np.std(p[(p>l) & (p<u)]),len(p[(p>l) & (p<u)])),
horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, fontdict={"size":8})
fig.suptitle(title)
fig.subplots_adjust(hspace=0.5)
fig.savefig(fname, bbox_inches="tight")
return
def general_stats(g_stats, fname):
fig, axes = plt.subplots(figsize=(6,5), nrows=2, ncols=1, dpi=120, sharex=True)
ax = axes[0]
width=0.2
df = pd.DataFrame.from_records(list(g_stats.values()))
ax.bar(df.bmnum-width, df.sound, width=0.3, color="r", label="S")
ax.set_ylabel(r"$N_{sounds}$", fontdict=font)
ax.legend(loc=2)
ax = ax.twinx()
ax.bar(df.bmnum+width, df.echo, width=0.3, color="b", label="E")
ax.set_ylabel(r"$N_{echo}$", fontdict=font)
ax.set_xlabel("Beams", fontdict=font)
ax.set_xticks(df.bmnum)
ax.legend(loc=1)
ax = axes[1]
ax.errorbar(df.bmnum, df.v, yerr=df.v_mad, color="r", elinewidth=2.5, ecolor="r", fmt="o", ls="None", label="V")
ax.errorbar(df.bmnum, df.w, yerr=df.w_mad, color="b", elinewidth=1.5, ecolor="b", fmt="o", ls="None", label="W")
ax.errorbar(df.bmnum, df.p, yerr=df.p_mad, color="k", elinewidth=0.5, ecolor="k", fmt="o", ls="None", label="P")
ax.set_ylim(-20, 40)
ax.set_ylabel(r"$V_{med},W_{med},P_{med}$", fontdict=font)
ax.set_xlabel("Beams", fontdict=font)
ax.set_xticks(df.bmnum)
ax.legend(loc=1)
fig.subplots_adjust(hspace=0.1)
fig.savefig(fname, bbox_inches="tight")
return
def individal_cluster_stats(cluster, df, fname, title):
fig = plt.figure(figsize=(8,12), dpi=120)
V = []
vbbox, vbox = {}, []
vecho, vsound, echo, sound = {}, {}, [], []
beams = []
for c in cluster:
v = np.array(df[(df.slist>=c["lb"]) & (df.slist<=c["ub"]) & (df.bmnum==c["bmnum"])].v)
V.extend(v.tolist())
v[v<-1000] = -1000
v[v>1000] = 1000
if c["bmnum"] not in vbbox.keys():
vbbox[c["bmnum"]] = v.tolist()
vecho[c["bmnum"]] = c["echo"]
vsound[c["bmnum"]] = c["sound"]
else:
vbbox[c["bmnum"]].extend(v.tolist())
vecho[c["bmnum"]] += c["echo"]
#vsound[c["bmnum"]] += c["sound"]
beams = sorted(vbbox.keys())
avbox = []
for b in beams:
if b!=15: avbox.append(vbbox[b])
vbox.append(vbbox[b])
echo.append(vecho[b])
sound.append(vsound[b])
from scipy import stats
pval = -1.
if len(vbox) > 1:
H, pval = stats.f_oneway(*avbox)
print(H,pval)
ax = plt.subplot2grid((4, 2), (0, 1), colspan=1)
V = np.array(V)
V[V<-1000] = -1000
V[V>1000] = 1000
l, u = np.quantile(V,0.05), np.quantile(V,0.95)
ax.axvline(np.sign(l)*np.log10(abs(l)), ls="--", lw=1., color="r")
ax.axvline(np.sign(u)*np.log10(abs(u)), ls="--", lw=1., color="r")
ax.hist(np.sign(V)*np.log10(abs(V)), bins=np.linspace(-3,3,101), histtype="step", density=False)
ax.text(0.7, 0.8, r"$V_{min}$=%.1f, $V_{max}$=%.1f"%(np.min(V[(V>l) & (V<u)]), np.max(V[(V>l) & (V<u)])) + "\n"\
+ r"$V_{\mu}$=%.1f, $\hat{V}$=%.1f"%(np.mean(V[(V>l) & (V<u)]), np.median(V[(V>l) & (V<u)])) + "\n"\
+ r"$V_{\sigma}$=%.1f, n=%d"%(np.std(V[(V>l) & (V<u)]),len(V[(V>l) & (V<u)])),
horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, fontdict={"size":8})
ax.set_xlabel(r"V, $ms^{-1}$", fontdict=font)
ax.set_yticklabels([])
ax.set_xlim([-3,3])
ax.set_xticklabels([-1000,-100,-10,1,10,100,1000])
ax = plt.subplot2grid((4, 2), (0, 0), colspan=1)
ax.hist(np.sign(V)*np.log10(abs(V)), bins=np.linspace(-3,3,101), histtype="step", density=False)
ax.text(0.7, 0.8, r"$V_{min}$=%.1f, $V_{max}$=%.1f"%(np.min(V), np.max(V)) + "\n"\
+ r"$V_{\mu}$=%.1f, $\hat{V}$=%.1f"%(np.mean(V), np.median(V)) + "\n"\
+ r"$V_{\sigma}$=%.1f, n=%d"%(np.std(V),len(V)),
horizontalalignment="center", verticalalignment="center", transform=ax.transAxes, fontdict={"size":8})
ax.set_ylabel("Counts", fontdict=font)
ax.set_xlabel(r"V, $ms^{-1}$", fontdict=font)
ax.set_xlim([-3,3])
ax.set_xticklabels([-1000,-100,-10,1,10,100,1000])
ax = plt.subplot2grid((4, 2), (1, 0), colspan=2)
ax.boxplot(vbox, flierprops = dict(marker="o", markerfacecolor="none", markersize=0.8, linestyle="none"))
ax.set_ylim(-100,100)
ax.set_xlabel(r"Beams", fontdict=font)
ax.set_ylabel(r"V, $ms^{-1}$", fontdict=font)
ax.set_xticklabels(beams)
ax.text(1.05,0.5, "p-val=%.2f"%pval, horizontalalignment="center", verticalalignment="center",
transform=ax.transAxes, fontdict={"size":10}, rotation=90)
ax.axhline(0, ls="--", lw=0.5, color="k")
fig.suptitle(title, y=0.92)
ax = plt.subplot2grid((4, 2), (2, 0), colspan=2)
ax.boxplot(vbox, flierprops = dict(marker="o", markerfacecolor="none", markersize=0.8, linestyle="none"),
showbox=False, showcaps=False)
ax.set_xticklabels(beams)
ax.axhline(0, ls="--", lw=0.5, color="k")
ax.set_xlabel(r"Beams", fontdict=font)
ax.set_ylabel(r"V, $ms^{-1}$", fontdict=font)
ax = plt.subplot2grid((4, 2), (3, 0), colspan=2)
width=0.2
ax.bar(np.array(beams)-width, sound, width=0.3, color="r", label="S")
ax.set_ylabel(r"$N_{sounds}$", fontdict=font)
ax.set_xlabel("Beams", fontdict=font)
ax.legend(loc=2)
ax = ax.twinx()
ax.bar(np.array(beams)+width, echo, width=0.3, color="b", label="E")
ax.set_ylabel(r"$N_{echo}$", fontdict=font)
ax.set_xlabel("Beams", fontdict=font)
ax.set_xticks(beams)
ax.legend(loc=1)
fig.subplots_adjust(hspace=0.2, wspace=0.2)
fig.savefig(fname, bbox_inches="tight")
return
class MiddleLatFilter(object):
""" Class to filter middle latitude radars """
def __init__(self, rad, scans, eps=2, min_samples=10, plot=False):
"""
initialize variables
rad: Radar code
scans: List of scans
eps: Radius of DBSCAN
min_samples: min samplese of DBSCAN
"""
self.rad = rad
self.scans = scans
self.eps = eps
self.min_samples = min_samples
self.boundaries = {}
self.plot = plot
return
def _reset_(self, rad, scans, plot=False):
""" Reset existing parameters """
self.rad = rad
self.scans = scans
self.plot = plot
return
def extract_gatelims(self, df):
"""
Extract gate limits for individual clusters
"""
glims = {}
for l in set(df.labels):
if l >= 0:
u = df[df.labels==l]
if len(u) > 0: glims[l] = [np.min(u.slist) + 1, np.max(u.slist) - 1]
return glims
def filter_by_dbscan(self, df, bm):
"""
Do filter by dbscan name
"""
du, sb = df[df.bmnum==bm], [np.nan, np.nan, np.nan]
sb[0] = len(du.groupby("time"))
self.gen_summary[bm] = {"bmnum": bm, "v": np.median(du.v), "p": np.median(du.p_l), "w": np.median(du.w_l),
"sound":sb[0], "echo":len(du), "v_mad": st.median_absolute_deviation(du.v),
"p_mad": st.median_absolute_deviation(du.p_l), "w_mad": st.median_absolute_deviation(du.w_l)}
xpt = 100./sb[0]
if bm == "all":
sb[1] = "[" + str(int(np.min(df.bmnum))) + "-" + str(int(np.max(df.bmnum))) + "]"
sb[2] = len(self.scans) * int((np.max(df.bmnum)-np.min(df.bmnum)+1))
else:
sb[1] = "[" + str(int(bm)) + "]"
sb[2] = len(self.scans)
print(" Beam Analysis: ", bm, len(du.groupby("time")))
rng, eco = np.array(du.groupby(["slist"]).count().reset_index()["slist"]),\
np.array(du.groupby(["slist"]).count().reset_index()["p_l"])
Rng, Eco = np.arange(np.max(du.slist)+1), np.zeros((np.max(du.slist)+1))
for e, r in zip(eco, rng):
Eco[Rng.tolist().index(r)] = e
if len(eco) > self.window: eco, Eco = smooth(eco, self.window), smooth(Eco, self.window)
glims, labels = {}, []
ds = DBSCAN(eps=self.eps, min_samples=self.min_samples).fit(du[["slist"]].values)
du["labels"] = ds.labels_
names = {}
print(eco, Eco, bm)
for j, r in enumerate(set(ds.labels_)):
x = du[du.labels==r]
glims[r] = [np.min(x.slist), np.max(x.slist)]
names[r] = "C"+str(j)
if r >= 0: self.boundaries[bm].append({"peak": Rng[np.min(x.slist) + Eco[np.min(x.slist):np.max(x.slist)].argmax()],
"ub": np.max(x.slist), "lb": np.min(x.slist), "value": np.max(eco)*xpt, "bmnum": bm, "echo": len(x), "sound": sb[0]})
print(" Individual clster detected: ", set(du.labels))
if self.plot: to_midlatitude_gate_summary(self.rad, du, glims, names, (rng,eco),
"figs/{r}_gate_{bm}_summary.png".format(r=self.rad, bm="%02d"%bm), sb)
return
def filter_by_SMF(self, df, bm, method=None):
"""
Filter by Simple Minded Filter
method: np/ndimage
"""
def local_minima_ndimage(array, min_distance = 1, periodic=False, edges_allowed=True):
"""Find all local maxima of the array, separated by at least min_distance."""
array = np.asarray(array)
cval = 0
if periodic: mode = "wrap"
elif edges_allowed: mode = "nearest"
else: mode = "constant"
cval = array.max()+1
min_points = array == ndimage.minimum_filter(array, 1+2*min_distance, mode=mode, cval=cval)
troughs = [indices[min_points] for indices in np.indices(array.shape)][0]
if troughs[0] != 0: troughs = np.insert(troughs, 0, 0)
if troughs[-1] <= np.max(du.slist) - min_distance: troughs = np.append(troughs, [np.max(du.slist)])
troughs[-2] = troughs[-2] + 1
return troughs
def local_maxima_ndimage(array, min_distance = 1, periodic=False, edges_allowed=True):
array = np.asarray(array)
cval = 0
if periodic: mode = "wrap"
elif edges_allowed: mode = "nearest"
else: mode = "constant"
cval = array.max()+1
max_points = array == ndimage.maximum_filter(array, 1+2*min_distance, mode=mode, cval=cval)
peaks = [indices[max_points] for indices in np.indices(array.shape)][0]
return peaks
def local_minima_np(array):
""" Local minima by numpy stats """
troughs = signal.argrelmin(eco, order=self.order)[0]
if troughs[0] != 0: troughs = np.insert(troughs, 0, 0)
if troughs[-1] != np.max(du.slist): troughs = np.append(troughs, [np.max(du.slist)])
troughs[-2] = troughs[-2] + 1
return troughs
if bm == "all": du = df.copy()
else: du = df[df.bmnum==bm]
print(" Beam Analysis: ", bm, len(du.groupby("time")))
sb = [np.nan, np.nan, np.nan]
sb[0] = len(du.groupby("time"))
self.gen_summary[bm] = {"bmnum": bm, "v": np.median(du.v), "p": np.median(du.p_l), "w": np.median(du.w_l),
"sound":sb[0], "echo":len(du), "v_mad": st.median_absolute_deviation(du.v),
"p_mad": st.median_absolute_deviation(du.p_l), "w_mad": st.median_absolute_deviation(du.w_l)}
xpt = 100./sb[0]
if bm == "all":
sb[1] = "[" + str(int(np.min(df.bmnum))) + "-" + str(int(np.max(df.bmnum))) + "]"
sb[2] = len(self.scans) * int((np.max(df.bmnum)-np.min(df.bmnum)+1))
else:
sb[1] = "[" + str(int(bm)) + "]"
sb[2] = len(self.scans)
rng, eco = np.array(du.groupby(["slist"]).count().reset_index()["slist"]),\
np.array(du.groupby(["slist"]).count().reset_index()["p_l"])
glims, labels = {}, []
Rng, Eco = np.arange(np.max(du.slist)+1), np.zeros((np.max(du.slist)+1))
for e, r in zip(eco, rng):
Eco[Rng.tolist().index(r)] = e
eco, Eco = smooth(eco, self.window), smooth(Eco, self.window)
du["labels"] = [np.nan] * len(du)
names = {}
if method == "np": troughs = local_minima_np(np.array(eco))
else: troughs = local_minima_ndimage(np.array(eco), min_distance=5)
peaks = local_maxima_ndimage(np.array(eco), min_distance=5)
print(" Gate bounds: ", troughs, peaks)
peaks = np.append(peaks, np.median(troughs[-2:]))
if len(troughs) > len(peaks):
troughs = troughs[:len(peaks)]
if len(peaks) > len(troughs): peaks = peaks[:len(troughs)]
for r in range(len(troughs)-1):
glims[r] = [troughs[r], troughs[r+1]]
du["labels"] = np.where((du["slist"]<=troughs[r+1]) & (du["slist"]>=troughs[r]), r, du["labels"])
names[r] = "C" + str(r)
if r >= 0: self.boundaries[bm].append({"peak": peaks[r],
"ub": troughs[r+1], "lb": troughs[r], "value": np.max(eco)*xpt, "bmnum": bm,
"echo": len(du[(du["slist"]<=troughs[r+1]) & (du["slist"]>=troughs[r])]), "sound": sb[0]})
du["labels"] = np.where(np.isnan(du["labels"]), -1, du["labels"])
print(" Individual clster detected: ", set(du.labels))
if self.plot: to_midlatitude_gate_summary(self.rad, du, glims, names, (rng,eco),
"figs/{r}_gate_{bm}_summary.png".format(r=self.rad, bm="%02d"%bm), sb)
return
def doFilter(self, io, window=11, order=1, beams=range(4,24), dbeam=15):
"""
Do filter for sub-auroral scatter
"""
self.gen_summary = {}
self.clust_idf = {}
self.order = order
self.window = window
df = pd.DataFrame()
for i, fsc in enumerate(self.scans):
dx = io.convert_to_pandas(fsc.beams, v_params=["p_l", "v", "w_l", "slist"])
df = df.append(dx)
beams = range(np.min(df.bmnum), np.max(df.bmnum))
print(" Beam range - ", np.min(df.bmnum), np.max(df.bmnum))
if beams == None or len(beams) == 0:
bm = "all"
self.boundaries[bm] = []
self.filter_by_SMF(df, bm)
else:
for bm in beams:
self.boundaries[bm] = []
self.gen_summary[bm] = {}
if (dbeam is not None) and (bm==dbeam): self.filter_by_SMF(df, bm)
else: self.filter_by_dbscan(df, bm)
title = "Date: %s [%s-%s] UT | %s"%(df.time.tolist()[0].strftime("%Y-%m-%d"),
df.time.tolist()[0].strftime("%H.%M"), df.time.tolist()[-1].strftime("%H.%M"), self.rad.upper())
fname = "figs/%s_gate_boundary.png"%(self.rad)
self.gc = []
for x in self.boundaries.keys():
for j, m in enumerate(self.boundaries[x]):
self.gc.append(m)
self.ubeam, self.lbeam = np.max(df.bmnum)-1, np.min(df.bmnum)
self.sma_bgspace()
df["labels"] = [np.nan] * len(df)
for ck in self.clusters.keys():
cluster = self.clusters[ck]
for c in cluster:
df["labels"] = np.where((df["bmnum"]==c["bmnum"]) & (df["slist"]<=c["ub"]) & (df["slist"]>=c["lb"]), ck, df["labels"])
#self.cluster_identification(df)
if self.plot: beam_gate_boundary_plots(self.boundaries, self.clusters, None,
glim=(0, 100), blim=(np.min(df.bmnum), np.max(df.bmnum)), title=title,
fname=fname)
#if self.plot: beam_gate_boundary_plots(self.boundaries, self.clusters, self.clust_idf,
# glim=(0, 100), blim=(np.min(df.bmnum), np.max(df.bmnum)), title=title,
# fname=fname.replace("gate_boundary", "gate_boundary_id"))
#if self.plot: general_stats(self.gen_summary, fname="figs/%s_general_stats.png"%(self.rad))
#for c in self.clusters.keys():
# cluster = self.clusters[c]
# fname = "figs/%s_clust_%02d_stats.png"%(self.rad, c)
# cluster_stats(df, cluster, fname, title+" | Cluster# %02d"%c)
# individal_cluster_stats(self.clusters[c], df, "figs/%s_ind_clust_%02d_stats.png"%(self.rad, c),
# title+" | Cluster# %02d"%c+"\n"+"Cluster ID: _%s_"%self.clust_idf[c].upper())
return df
def cluster_identification(self, df, qntl=[0.05,0.95]):
""" Idenitify the cluster based on Idenitification """
for c in self.clusters.keys():
V = []
for cls in self.clusters[c]:
v = np.array(df[(df.slist>=cls["lb"]) & (df.slist<=cls["ub"]) & (df.bmnum==cls["bmnum"])].v)
V.extend(v.tolist())
V = np.array(V)
l, u = np.quantile(V,qntl[0]), np.quantile(V,qntl[1])
Vmin, Vmax = np.min(V[(V>l) & (V<u)]), np.max(V[(V>l) & (V<u)])
Vmean, Vmed = np.mean(V[(V>l) & (V<u)]), np.median(V[(V>l) & (V<u)])
Vsig = np.std(V[(V>l) & (V<u)])
self.clust_idf[c] = "us"
if Vmin > -20 and Vmax < 20: self.clust_idf[c] = "gs"
elif (Vmin > -50 and Vmax < 50) and (Vmed-Vsig < -20 or Vmed+Vsig > 20): self.clust_idf[c] = "sais"
return
def sma_bgspace(self):
"""
Simple minded algorithm in B.G space
"""
def range_comp(x, y, pcnt=0.7):
_cx = False
insc = set(x).intersection(y)
if len(x) < len(y) and len(insc) >= len(x)*pcnt: _cx = True
if len(x) > len(y) and len(insc) >= len(y)*pcnt: _cx = True
return _cx
def find_adjucent(lst, mxx):
mxl = []
for l in lst:
if l["peak"] >= mxx["lb"] and l["peak"] <= mxx["ub"]: mxl.append(l)
elif mxx["peak"] >= l["lb"] and mxx["peak"] <= l["ub"]: mxl.append(l)
elif range_comp(range(l["lb"], l["ub"]+1), range(mxx["lb"], mxx["ub"]+1)): mxl.append(l)
return mxl
def nested_cluster_find(bm, mx, j, case=-1):
if bm < self.lbeam and bm > self.ubeam: return
else:
if (case == -1 and bm >= self.lbeam) or (case == 1 and bm <= self.ubeam):
mxl = find_adjucent(self.boundaries[bm], mx)
for m in mxl:
if m in self.gc:
del self.gc[self.gc.index(m)]
self.clusters[j].append(m)
nested_cluster_find(m["bmnum"] + case, m, j, case)
nested_cluster_find(m["bmnum"] + (-1*case), m, j, (-1*case))
return
self.clusters = {}
j = 0
while len(self.gc) > 0:
self.clusters[j] = []
mx = max(self.gc, key=lambda x:x["value"])
self.clusters[j].append(mx)
if mx in self.gc: del self.gc[self.gc.index(mx)]
nested_cluster_find(mx["bmnum"] - 1, mx, j, case=-1)
nested_cluster_find(mx["bmnum"] + 1, mx, j, case=1)
j += 1
return
class TimeFilter(object):
""" Class to time filter middle latitude radars """
def __init__(self, df, beam=7, tw=15, eps=2, min_samples=10):
self.df = df[df.bmnum==beam]
self.tw = tw
self.eps = eps
self.min_samples = min_samples
self.boundaries = {}
return
def run_codes(self):
start = self.df.time.tolist()[0]
end = start + dt.timedelta(minutes=self.tw)
k, j = 0, 0
labels = []
time_index = []
while start <= self.df.time.tolist()[-1]:
u = self.df[(self.df.time>=start) & (self.df.time<=end)]
ds = DBSCAN(eps=self.eps, min_samples=self.min_samples).fit(u[["slist"]].values)
labs = ds.labels_
labs[labs>=0] = labs[labs>=0] + k
labels.extend(labs.tolist())
start = end
end = start + dt.timedelta(minutes=self.tw)
k += len(set(labs[labs>=0]))
time_index.extend([j]*len(labs))
j += 1
self.df["gate_labels"] = labels
self.df["labels"] = labels#[-1]*len(self.df)
self.df["time_index"] = time_index
K = len(self.df)
for ti in np.unique(time_index):
u = self.df[self.df.time_index==ti]
self.boundaries[ti] = []
for ix in np.unique(u.gate_labels):
du = u[u.gate_labels==ix]
if ix >= 0 and len(du)/len(u) > 0.5:
self.boundaries[ti].append({"peak": du.slist.mean(), "ub": du.slist.max(), "lb": du.slist.min(),
"value":len(du)/len(u), "time_index": ti, "gc": ix})
print(ti, np.unique(u.gate_labels), len(self.boundaries[ti]))
self.ltime, self.utime = np.min(time_index), np.max(time_index)
self.gc = []
for ti in np.unique(time_index):
clust = self.boundaries[ti]
for cl in clust:
self.gc.append(cl)
print(len(self.gc))
#for cx in np.unique(self.df["gate_labels"]):
# if cx >= 0:
# u = self.df[(self.df.gate_labels==cx)]
# self.gc.append({"peak": u.slist.mean(), "ub": u.slist.max(), "lb": u.slist.min(), "value":len(u)/K,
# "time_index": u.time_index.tolist()[0]})
self.sma_bgspace()
return
def sma_bgspace(self):
"""
Simple minded algorithm in B.G space
"""
def range_comp(x, y, pcnt=0.7):
_cx = False
insc = set(x).intersection(y)
if len(x) < len(y) and len(insc) >= len(x)*pcnt: _cx = True
if len(x) > len(y) and len(insc) >= len(y)*pcnt: _cx = True
return _cx
def find_adjucent(lst, mxx):
mxl = []
for l in lst:
if l["peak"] >= mxx["lb"] and l["peak"] <= mxx["ub"]: mxl.append(l)
elif mxx["peak"] >= l["lb"] and mxx["peak"] <= l["ub"]: mxl.append(l)
elif range_comp(range(l["lb"], l["ub"]+1), range(mxx["lb"], mxx["ub"]+1)): mxl.append(l)
return mxl
def nested_cluster_find(ti, mx, j, case=-1):
if ti < self.ltime and ti > self.utime: return
else:
if (case == -1 and ti >= self.ltime) or (case == 1 and ti <= self.utime):
mxl = find_adjucent(self.boundaries[ti], mx)
for m in mxl:
if m in self.gc:
del self.gc[self.gc.index(m)]
self.clusters[j].append(m)
nested_cluster_find(m["time_index"] + case, m, j, case)
nested_cluster_find(m["time_index"] + (-1*case), m, j, (-1*case))
return
self.clusters = {}
j = 0
while len(self.gc) > 0:
self.clusters[j] = []
mx = max(self.gc, key=lambda x:x["value"])
self.clusters[j].append(mx)
if mx in self.gc: del self.gc[self.gc.index(mx)]
nested_cluster_find(mx["time_index"] - 1, mx, j, case=-1)
nested_cluster_find(mx["time_index"] + 1, mx, j, case=1)
j += 1
for c in self.clusters.keys():
clust = self.clusters[c]
for cl in clust:
print(c, cl)
self.df["labels"] = np.where((self.df.slist<=cl["ub"]) & (self.df.slist>=cl["lb"]) &
(self.df.time_index==cl["time_index"]) & (self.df.gate_labels==cl["gc"])
, c, self.df["labels"])
print(set(self.df["labels"]), self.clusters.keys())
return
class ScatterTypeDetection(object):
""" Detecting scatter type """
def __init__(self, df):
""" kind: 0- individual, 2- KDE by grouping """
self.df = df
return
def run(self, kind=0, thresh=[1./3.,2./3.], case=0, mod=False):
self.kind = kind
self.thresh = thresh
self.case = case
if self.kind == 0: self.indp()
if self.kind == 1: self.group()
if self.kind == 11: self.new_group()
if self.kind == 2: self.kde()
if mod: self.gs_flg[self.gs_flg==2] = 0
self.df["gflg"] = self.gs_flg
return self.df
def kde(self):
from scipy.stats import beta
import warnings
warnings.filterwarnings('ignore', 'The iteration is not making good progress')
vel = np.hstack(self.df["v"])
wid = np.hstack(self.df["w_l"])
clust_flg_1d = np.hstack(self.df["labels"])
self.gs_flg = np.zeros(len(clust_flg_1d))
for c in np.unique(clust_flg_1d):
clust_mask = c == clust_flg_1d
if c == -1: self.gs_flg[clust_mask] = -1
else:
v, w = vel[clust_mask], wid[clust_mask]
if self.case == 0: f = 1/(1+np.exp(np.abs(v)+w/3-30))
if self.case == 1: f = 1/(1+np.exp(np.abs(v)+w/4-60))
if self.case == 2: f = 1/(1+np.exp(np.abs(v)-0.139*w+0.00113*w**2-33.1))
#if self.case == 3: f = 1/(1+np.exp(np.abs(w)-0.1*(v-0)**2-10))
if self.case == 3:
f = 1/(1+np.exp(np.abs(v)-20.))
gflg = np.median(f)
if gflg <= self.thresh[0]: gflg=0.
elif gflg >= self.thresh[1]: gflg=1.
else: gflg=-1
self.gs_flg[clust_mask] = gflg
return
def indp(self):
vel = np.hstack(self.df["v"])
wid = np.hstack(self.df["w_l"])
clust_flg_1d = np.hstack(self.df["labels"])
self.gs_flg = np.zeros(len(clust_flg_1d))
for c in np.unique(clust_flg_1d):
clust_mask = c == clust_flg_1d
if c == -1: self.gs_flg[clust_mask] = -1
else:
v, w = vel[clust_mask], wid[clust_mask]
gflg = np.zeros(len(v))
if self.case == 0: gflg = (np.abs(v)+w/3 < 30).astype(int)
if self.case == 1: gflg = (np.abs(v)+w/4 < 60).astype(int)
if self.case == 2: gflg = (np.abs(v)-0.139*w+0.00113*w**2<33.1).astype(int)
#if self.case == 3: gflg = (np.abs(w)-0.1*(v-0)**2<10).astype(int)
if self.case == 3:
for i, vi, wi in zip(range(len(v)),v,w):
if np.abs(vi)<10: gflg[i] = 1
elif np.abs(vi)>=15 and np.abs(vi)<50: gflg[i] = 2
elif np.abs(vi)>=50: gflg[i] = 0
self.gs_flg[clust_mask] = gflg
return
def new_group(self):
print("here", self.kind, self.case)
vel = np.hstack(self.df["v"])
wid = np.hstack(self.df["w_l"])
beams = np.hstack(self.df["bmnum"])
clust_flg_1d = np.hstack(self.df["labels"])
self.gs_flg = np.zeros(len(clust_flg_1d))
for c in np.unique(clust_flg_1d):
for bm in np.unique(beams):
clust_mask = c == clust_flg_1d
if c == -1: self.gs_flg[clust_mask] = -1
else:
v, w = np.mean(vel[clust_mask]), np.mean(wid[clust_mask])
v, w = vel[clust_mask], wid[clust_mask]
gflg = np.zeros(len(v))
if self.case == 0: gflg = (np.abs(v)+w/3 < 30).astype(int)
if self.case == 1: gflg = (np.abs(v)+w/4 < 60).astype(int)
if self.case == 2: gflg = (np.abs(v)-0.139*w+0.00113*w**2<33.1).astype(int)
if self.case == 3:
for i, vi, wi in zip(range(len(v)),v,w):
if np.abs(vi)<10: gflg[i] = 1
elif np.abs(vi)>=15 and np.abs(vi)<50: gflg[i] = 2
elif np.abs(vi)>=50: gflg[i] = 0
#gflg = (np.logical_or(np.abs(v)<20., np.abs(w)<30.)).astype(int)
self.gs_flg[clust_mask] = max(set(gflg.tolist()), key = gflg.tolist().count)
return
def group(self, type="median"):
vel = np.hstack(self.df["v"])
wid = np.hstack(self.df["w_l"])
clust_flg_1d = np.hstack(self.df["labels"])
self.gs_flg = np.zeros(len(clust_flg_1d))
for c in np.unique(clust_flg_1d):
clust_mask = c == clust_flg_1d
if c == -1: self.gs_flg[clust_mask] = -1
else:
v, w = np.mean(vel[clust_mask]), np.mean(wid[clust_mask])
v, w = vel[clust_mask], wid[clust_mask]
gflg = np.zeros(len(v))
if self.case == 0: gflg = (np.abs(v)+w/3 < 30).astype(int)
if self.case == 1: gflg = (np.abs(v)+w/4 < 60).astype(int)
if self.case == 2: gflg = (np.abs(v)-0.139*w+0.00113*w**2<33.1).astype(int)
#if self.case == 3:
# vl, vu = np.quantile(vel[clust_mask],0.25), np.quantile(vel[clust_mask],0.75)
# wl, wu = np.quantile(vel[clust_mask],0.25), np.quantile(vel[clust_mask],0.75)
# v, w = vel[clust_mask], wid[clust_mask]
# v, w = v[(v>vl) & (v<vu)], w[(w>wl) & (w<wu)]
# gflg = -1
# #if ((vu < 10) and (vl > -10.)) and (wu < 25.): gflg = 1
# if np.mean(np.abs(v))<5: gflg=1
# elif np.mean(np.abs(v))>=5 and np.mean(np.abs(v))<20: gflg = 2
# elif np.mean(np.abs(v))>=20: gflg = 0
#self.gs_flg[clust_mask] = gflg#max(set(gflg.tolist()), key = gflg.tolist().count)
if self.case == 3:
for i, vi, wi in zip(range(len(v)),v,w):
if np.abs(vi)<5: gflg[i] = 1
elif np.abs(vi)>=5 and np.abs(vi)<50: gflg[i] = 2
elif np.abs(vi)>=50: gflg[i] = 0
#gflg = (np.logical_or(np.abs(v)<20., np.abs(w)<30.)).astype(int)
self.gs_flg[clust_mask] = max(set(gflg.tolist()), key = gflg.tolist().count)
return
|
import numpy as np
import math
def splitdata_train_test(data, fraction_training):
#randomize data set order
np.random.seed(0)
np.random.shuffle(data)
#find split point
training_rows = math.floor(len(data) * fraction_training) #int(...) would be enough
training_set = data[0:training_rows] #or data[:training_rows]
testing_set = data[training_rows:len(data)] #or data[training_rows:]
return (training_set, testing_set)
if __name__ == "__main__":
data = np.load('galaxy_catalogue.npy')
# set the fraction of data which should be in the training set
fraction_training = 0.7
# split the data using your function
training, testing = splitdata_train_test(data, fraction_training)
# print the key values
print('Number data galaxies:', len(data))
print('Train fraction:', fraction_training)
print('Number of galaxies in training set:', len(training))
print('Number of galaxies in testing set:', len(testing))
|
#!/usr/bin/env python3
"""Download images provided in wedding-list.csv"""
import csv
import subprocess
WEDDING_LIST_IMG_DIR = "static/img/wedding-list/"
WEDDING_LIST_CSV = "backend/db/wedding-list.csv"
with open(WEDDING_LIST_CSV) as csvfile:
reader = csv.reader(csvfile, quotechar='"')
next(reader)
images_urls = {row[3] for row in reader}
cmd = ['wget', '--no-clobber', '-P', WEDDING_LIST_IMG_DIR] + list(images_urls)
subprocess.run(cmd)
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../test/.'))
# -- Project information -----------------------------------------------------
project = 'sensparse'
copyright = '2019, dh'
author = 'dh'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'sensparsedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sensparse.tex', 'sensparse Documentation',
'dh', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sensparse', 'sensparse Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sensparse', 'sensparse Documentation',
author, 'sensparse', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
import cv2 as cv
import const
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from collections import deque
def get_neighbors(height, width, pixel):
return np.mgrid[
max(0, pixel[0] - 1):min(height, pixel[0] + 2),
max(0, pixel[1] - 1):min(width, pixel[1] + 2)
].reshape(2, -1).T
class Watershed(object):
MASK = -2
WSHD = 0
INIT = -1
INQE = -3
def __init__(self, levels=256):
self.levels = levels
# Neighbour (coordinates of) pixels, including the given pixel.
def apply(self, image):
current_label = 0
flag = False
fifo = deque()
shape = image.shape
height = shape[0]
width = shape[1]
total = height * width
labels = np.full((height, width), self.INIT, np.int32)
reshaped_image = image.reshape(total)
# [y, x] pairs of pixel coordinates of the flattened image.
pixels = np.mgrid[0:height, 0:width].reshape(2, -1).T
# Coordinates of neighbour pixels for each pixel.
neighbours = np.array([get_neighbors(height, width, p) for p in pixels])
if len(neighbours.shape) == 3:
# Case where all pixels have the same number of neighbours.
neighbours = neighbours.reshape(height, width, -1, 2)
else:
# Case where pixels may have a different number of pixels.
neighbours = neighbours.reshape(height, width)
indices = np.argsort(reshaped_image)
sorted_image = reshaped_image[indices]
sorted_pixels = pixels[indices]
# self.levels evenly spaced steps from minimum to maximum.
levels = np.linspace(sorted_image[0], sorted_image[-1], self.levels)
level_indices = []
current_level = 0
# Get the indices that deleimit pixels with different values.
for i in range(total):
if sorted_image[i] > levels[current_level]:
# Skip levels until the next highest one is reached.
while sorted_image[i] > levels[current_level]: current_level += 1
level_indices.append(i)
level_indices.append(total)
start_index = 0
for stop_index in level_indices:
# Mask all pixels at the current level.
for p in sorted_pixels[start_index:stop_index]:
labels[p[0], p[1]] = self.MASK
# Initialize queue with neighbours of existing basins at the current level.
for q in neighbours[p[0], p[1]]:
# p == q is ignored here because labels[p] < WSHD
if labels[q[0], q[1]] >= self.WSHD:
labels[p[0], p[1]] = self.INQE
fifo.append(p)
break
# Extend basins.
while fifo:
p = fifo.popleft()
# Label p by inspecting neighbours.
for q in neighbours[p[0], p[1]]:
# Don't set lab_p in the outer loop because it may change.
lab_p = labels[p[0], p[1]]
lab_q = labels[q[0], q[1]]
if lab_q > 0:
if lab_p == self.INQE or (lab_p == self.WSHD and flag):
labels[p[0], p[1]] = lab_q
elif lab_p > 0 and lab_p != lab_q:
labels[p[0], p[1]] = self.WSHD
flag = False
elif lab_q == self.WSHD:
if lab_p == self.INQE:
labels[p[0], p[1]] = self.WSHD
flag = True
elif lab_q == self.MASK:
labels[q[0], q[1]] = self.INQE
fifo.append(q)
# Detect and process new minima at the current level.
for p in sorted_pixels[start_index:stop_index]:
# p is inside a new minimum. Create a new label.
if labels[p[0], p[1]] == self.MASK:
current_label += 1
fifo.append(p)
labels[p[0], p[1]] = current_label
while fifo:
q = fifo.popleft()
for r in neighbours[q[0], q[1]]:
if labels[r[0], r[1]] == self.MASK:
fifo.append(r)
labels[r[0], r[1]] = current_label
start_index = stop_index
return labels
def main():
input_file = "../assets/seg_test.jpg"
image = cv.imread(input_file, cv.IMREAD_GRAYSCALE)
w = Watershed()
labels = w.apply(image)
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 1, 1)
ax1.imshow(labels, cmap='Paired', interpolation='nearest')
ax1.set_axis_off()
plt.show()
if __name__ == "__main__":
main()
|
from django import forms
from django.contrib.auth.forms import AuthenticationForm
class CustomAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if not user.is_active or not user.is_validated:
raise forms.ValidationError('There was a problem with your login.', code='invalid_login')
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('flexbe_input')
import rospy
import pickle
import actionlib
import threading
from flexbe_msgs.msg import BehaviorInputAction, BehaviorInputFeedback, BehaviorInputResult, BehaviorInputGoal
from .complex_action_server import ComplexActionServer
'''
Created on 02/13/2015
@author: Philipp Schillinger, Brian Wright
'''
class BehaviorInput(object):
def __init__(self):
'''
Constructor
'''
#onboard connection
self._as = ComplexActionServer('flexbe/behavior_input', BehaviorInputAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
rospy.loginfo("Ready for data requests...")
def execute_cb(self, goal , goal_handle):
rospy.loginfo("--> Got a request!")
rospy.loginfo('"%s"' % goal.msg)
relay_ocs_client_ = actionlib.SimpleActionClient('flexbe/operator_input', BehaviorInputAction)
# wait for data msg
print("waiting")
relay_ocs_client_.wait_for_server()
print("done")
# Fill in the goal here
relay_ocs_client_.send_goal(goal)
print("waiting for result")
relay_ocs_client_.wait_for_result()
print("got result")
result = BehaviorInputResult()
result = relay_ocs_client_.get_result()
#result.data now serialized
data_str = result.data
print(data_str)
if(result.result_code == BehaviorInputResult.RESULT_OK):
self._as.set_succeeded(BehaviorInputResult(result_code=BehaviorInputResult.RESULT_OK, data=data_str), "ok",goal_handle)
elif(result.result_code == BehaviorInputResult.RESULT_FAILED):
# remove
self._as.set_succeeded(BehaviorInputResult(result_code=BehaviorInputResult.RESULT_FAILED, data=data_str),"failed",goal_handle)
rospy.loginfo("<-- Replied with FAILED")
elif(result.result_code == BehaviorInputResult.RESULT_ABORTED ):
self._as.set_succeeded(BehaviorInputResult(result_code=BehaviorInputResult.RESULT_ABORTED, data=data_str),"Aborted",goal_handle)
rospy.loginfo("<-- Replied with ABORT")
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from scipy.optimize import fmin
import emcee
__all__ = ['mcmc_fit', 'initial_odr_fit']
def v_vector(theta):
"""
Hogg+ 2010, Eqn 29.
"""
return [[-np.sin(theta)], [np.cos(theta)]]
def lnprior(p, max_theta=1.55, min_theta=1.5, min_lnf=0):
theta, b, lnf = p
if not ((min_theta < theta < max_theta) and (-0.5 < b < 0.5) and
(lnf > min_lnf)):
return -np.inf
else:
return 0
def ln_likelihood(p, x, y, x_err, y_err):
"""
Hogg+ 2010, Eqn 30., with an additional parameter that scales up the
uncertainty in the x dimension, ``x_err``, by a constant factor.
The likelihood has been written assuming x and y uncertainties are
uncorrelated.
"""
# theta, b, lnf, V = p
theta, b, lnf = p
# Assert prior:
# lnf < min_lnf or V < 0
# if (theta < min_theta or theta > max_theta or b < -0.5 or b > 0.5
# or lnf < min_lnf):
v = v_vector(theta)
f = np.exp(lnf)
lnp = lnprior(p)
if not np.isfinite(lnp):
return lnp
delta = v[0][0] * x + v[1][0] * y - b * np.cos(theta)
sigma_sq = v[0][0]**2 * (f * x_err)**2 + v[1][0]**2 * y_err**2
# sigma_sq = v[0][0]**2 * x_err**2 + v[1][0]**2 * y_err**2
ln_like = np.sum(-0.5 * (delta**2 / sigma_sq + np.log(sigma_sq) +
np.log(2*np.pi)))
return ln_like
def initial_odr_fit(s_apo, s_mwo, init_guess):
"""
Use `~scipy.optimize.fmin` to minimize the chi^2 for initial parameters.
Parameters
----------
s_apo : `Measurement`
s_mwo : `Measurement`
init_guess : list or `~numpy.ndarray`
Returns
-------
initial_params : `~numpy.ndarray`
"""
initial_params = fmin(lambda *args, **kwargs: -ln_likelihood(*args, **kwargs),
init_guess, args=(s_apo.value, s_mwo.value,
s_apo.err, s_mwo.err))
return initial_params
def mcmc_fit(s_apo, s_mwo, init_guess, nwalkers, n_steps_burnin=2000,
n_steps_postburnin=5000, ln_likelihood=ln_likelihood):
ndim = len(init_guess)
p0 = []
while len(p0) < nwalkers:
trial = [init_guess[0] + 0.05 * np.random.randn(),
init_guess[1] + 0.01 * np.random.randn(),
init_guess[2] + 0.001 * np.random.randn()]
if np.isfinite(lnprior(trial)):
p0.append(trial)
args = (s_apo.value, s_mwo.value, s_apo.err, s_mwo.err)
sampler = emcee.EnsembleSampler(nwalkers, ndim, ln_likelihood, args=args,
threads=2)
# Burn in for this many steps:
p1 = sampler.run_mcmc(p0, n_steps_burnin)[0]
sampler.reset()
p2 = sampler.run_mcmc(p1, n_steps_burnin)[0]
sampler.reset()
# Now run for this many more steps:
sampler.run_mcmc(p2, n_steps_postburnin)
samples = sampler.chain[:, :, :].reshape((-1, ndim))
return samples
|
#!/usr/bin/env python3
import math
import sys
from typing import Any, Dict
import numpy as np
from selfdrive.controls.lib.vehicle_model import ACCELERATION_DUE_TO_GRAVITY
from selfdrive.locationd.models.constants import ObservationKind
from selfdrive.swaglog import cloudlog
from rednose.helpers.kalmanfilter import KalmanFilter
if __name__ == '__main__': # Generating sympy
import sympy as sp
from rednose.helpers.ekf_sym import gen_code
else:
from rednose.helpers.ekf_sym_pyx import EKF_sym # pylint: disable=no-name-in-module, import-error
i = 0
def _slice(n):
global i
s = slice(i, i + n)
i += n
return s
class States():
# Vehicle model params
STIFFNESS = _slice(1) # [-]
STEER_RATIO = _slice(1) # [-]
ANGLE_OFFSET = _slice(1) # [rad]
ANGLE_OFFSET_FAST = _slice(1) # [rad]
VELOCITY = _slice(2) # (x, y) [m/s]
YAW_RATE = _slice(1) # [rad/s]
STEER_ANGLE = _slice(1) # [rad]
ROAD_ROLL = _slice(1) # [rad]
class CarKalman(KalmanFilter):
name = 'car'
initial_x = np.array([
1.0,
15.0,
0.0,
0.0,
10.0, 0.0,
0.0,
0.0,
0.0
])
# process noise
Q = np.diag([
(.05 / 100)**2,
.01**2,
math.radians(0.02)**2,
math.radians(0.25)**2,
.1**2, .01**2,
math.radians(0.1)**2,
math.radians(0.1)**2,
math.radians(1)**2,
])
P_initial = Q.copy()
obs_noise: Dict[int, Any] = {
ObservationKind.STEER_ANGLE: np.atleast_2d(math.radians(0.01)**2),
ObservationKind.ANGLE_OFFSET_FAST: np.atleast_2d(math.radians(10.0)**2),
ObservationKind.ROAD_ROLL: np.atleast_2d(math.radians(1.0)**2),
ObservationKind.STEER_RATIO: np.atleast_2d(5.0**2),
ObservationKind.STIFFNESS: np.atleast_2d(5.0**2),
ObservationKind.ROAD_FRAME_X_SPEED: np.atleast_2d(0.1**2),
}
global_vars = [
'mass',
'rotational_inertia',
'center_to_front',
'center_to_rear',
'stiffness_front',
'stiffness_rear',
]
@staticmethod
def generate_code(generated_dir):
dim_state = CarKalman.initial_x.shape[0]
name = CarKalman.name
# vehicle models comes from The Science of Vehicle Dynamics: Handling, Braking, and Ride of Road and Race Cars
# Model used is in 6.15 with formula from 6.198
# globals
global_vars = [sp.Symbol(name) for name in CarKalman.global_vars]
m, j, aF, aR, cF_orig, cR_orig = global_vars
# make functions and jacobians with sympy
# state variables
state_sym = sp.MatrixSymbol('state', dim_state, 1)
state = sp.Matrix(state_sym)
# Vehicle model constants
x = state[States.STIFFNESS, :][0, 0]
cF, cR = x * cF_orig, x * cR_orig
angle_offset = state[States.ANGLE_OFFSET, :][0, 0]
angle_offset_fast = state[States.ANGLE_OFFSET_FAST, :][0, 0]
theta = state[States.ROAD_ROLL, :][0, 0]
sa = state[States.STEER_ANGLE, :][0, 0]
sR = state[States.STEER_RATIO, :][0, 0]
u, v = state[States.VELOCITY, :]
r = state[States.YAW_RATE, :][0, 0]
A = sp.Matrix(np.zeros((2, 2)))
A[0, 0] = -(cF + cR) / (m * u)
A[0, 1] = -(cF * aF - cR * aR) / (m * u) - u
A[1, 0] = -(cF * aF - cR * aR) / (j * u)
A[1, 1] = -(cF * aF**2 + cR * aR**2) / (j * u)
B = sp.Matrix(np.zeros((2, 1)))
B[0, 0] = cF / m / sR
B[1, 0] = (cF * aF) / j / sR
C = sp.Matrix(np.zeros((2, 1)))
C[0, 0] = ACCELERATION_DUE_TO_GRAVITY
C[1, 0] = 0
x = sp.Matrix([v, r]) # lateral velocity, yaw rate
x_dot = A * x + B * (sa - angle_offset - angle_offset_fast) - C * theta
dt = sp.Symbol('dt')
state_dot = sp.Matrix(np.zeros((dim_state, 1)))
state_dot[States.VELOCITY.start + 1, 0] = x_dot[0]
state_dot[States.YAW_RATE.start, 0] = x_dot[1]
# Basic descretization, 1st order integrator
# Can be pretty bad if dt is big
f_sym = state + dt * state_dot
#
# Observation functions
#
obs_eqs = [
[sp.Matrix([r]), ObservationKind.ROAD_FRAME_YAW_RATE, None],
[sp.Matrix([u, v]), ObservationKind.ROAD_FRAME_XY_SPEED, None],
[sp.Matrix([u]), ObservationKind.ROAD_FRAME_X_SPEED, None],
[sp.Matrix([sa]), ObservationKind.STEER_ANGLE, None],
[sp.Matrix([angle_offset_fast]), ObservationKind.ANGLE_OFFSET_FAST, None],
[sp.Matrix([sR]), ObservationKind.STEER_RATIO, None],
[sp.Matrix([x]), ObservationKind.STIFFNESS, None],
[sp.Matrix([theta]), ObservationKind.ROAD_ROLL, None],
]
gen_code(generated_dir, name, f_sym, dt, state_sym, obs_eqs, dim_state, dim_state, global_vars=global_vars)
def __init__(self, generated_dir, steer_ratio=15, stiffness_factor=1, angle_offset=0, P_initial=None): # pylint: disable=super-init-not-called
dim_state = self.initial_x.shape[0]
dim_state_err = self.P_initial.shape[0]
x_init = self.initial_x
x_init[States.STEER_RATIO] = steer_ratio
x_init[States.STIFFNESS] = stiffness_factor
x_init[States.ANGLE_OFFSET] = angle_offset
if P_initial is not None:
self.P_initial = P_initial
# init filter
self.filter = EKF_sym(generated_dir, self.name, self.Q, self.initial_x, self.P_initial, dim_state, dim_state_err, global_vars=self.global_vars, logger=cloudlog)
if __name__ == "__main__":
generated_dir = sys.argv[2]
CarKalman.generate_code(generated_dir)
|
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from MACE.Parsers.VCF import CollectionVCF
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True,
help="Input vcf file with variants")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix", required=True,
help="Prefix of output files")
args = parser.parse_args()
variants = CollectionVCF(in_file=args.input, from_file=True)
homozygous, heterozygous = variants.filter_by_zygoty()
homozygous.write("%s.homo.vcf" % args.output_prefix)
heterozygous.write("%s.hetero.vcf" % args.output_prefix)
|
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import pytest
import numpy
import awkward1
numba = pytest.importorskip("numba")
awkward1_numba = pytest.importorskip("awkward1._connect._numba")
awkward1_numba_arrayview = pytest.importorskip("awkward1._connect._numba.arrayview")
awkward1_numba.register_and_check()
def test_view():
aslist = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
asarray = awkward1.repartition(awkward1.Array(aslist), 3)
asview = awkward1_numba_arrayview.ArrayView.fromarray(asarray)
for start in range(10):
for stop in range(start, 10):
asview.start = start
asview.stop = stop
assert awkward1.to_list(asview.toarray()) == aslist[start:stop]
asarray = awkward1.repartition(awkward1.Array(aslist), [3, 2, 0, 1, 4])
asview = awkward1_numba_arrayview.ArrayView.fromarray(asarray)
for start in range(10):
for stop in range(start, 10):
asview.start = start
asview.stop = stop
assert awkward1.to_list(asview.toarray()) == aslist[start:stop]
aslist = [[1, 2, 3], [], [4, 5], [6], [7, 8, 9, 10]]
asarray = awkward1.repartition(awkward1.Array(aslist), 3)
asview = awkward1_numba_arrayview.ArrayView.fromarray(asarray)
for start in range(5):
for stop in range(start, 5):
asview.start = start
asview.stop = stop
assert awkward1.to_list(asview.toarray()) == aslist[start:stop]
def test_boxing1():
asnumpy = numpy.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert sys.getrefcount(asnumpy) == 2
aslayout = awkward1.layout.NumpyArray(asnumpy)
aspart = awkward1.repartition(aslayout, 3, highlevel=False)
asarray = awkward1.Array(aspart)
aspart = asarray._layout
assert (sys.getrefcount(asnumpy), sys.getrefcount(aslayout), sys.getrefcount(aspart)) == (3, 2, 3)
@numba.njit
def f1(x):
return 3.14
for i in range(5):
f1(asarray)
assert (sys.getrefcount(asnumpy), sys.getrefcount(aslayout), sys.getrefcount(aspart)) == (3, 2, 3)
del asarray
del aspart
del aslayout
import gc
gc.collect()
assert sys.getrefcount(asnumpy) == 2
def test_boxing2():
asnumpy = numpy.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert sys.getrefcount(asnumpy) == 2
aslayout = awkward1.layout.NumpyArray(asnumpy)
aspart = awkward1.repartition(aslayout, 3, highlevel=False)
asarray = awkward1.Array(aspart)
aspart = asarray._layout
assert (sys.getrefcount(asnumpy), sys.getrefcount(aslayout), sys.getrefcount(aspart)) == (3, 2, 3)
@numba.njit
def f2(x):
return x
for i in range(10):
out = f2(asarray)
assert isinstance(out.layout, awkward1.partition.PartitionedArray)
assert awkward1.to_list(out) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert (sys.getrefcount(asnumpy), sys.getrefcount(aslayout), sys.getrefcount(aspart)) == (3, 2, 3)
del out
del asarray
del aspart
del aslayout
import gc
gc.collect()
assert sys.getrefcount(asnumpy) == 2
def test_boxing3():
asnumpy = numpy.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert sys.getrefcount(asnumpy) == 2
aslayout = awkward1.layout.NumpyArray(asnumpy)
aspart = awkward1.repartition(aslayout, 3, highlevel=False)
asarray = awkward1.Array(aspart)
aspart = asarray._layout
assert (sys.getrefcount(asnumpy), sys.getrefcount(aslayout), sys.getrefcount(aspart)) == (3, 2, 3)
@numba.njit
def f3(x):
return x, x
for i in range(10):
out1, out2 = f3(asarray)
assert isinstance(out1.layout, awkward1.partition.PartitionedArray)
assert isinstance(out2.layout, awkward1.partition.PartitionedArray)
assert awkward1.to_list(out1) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert awkward1.to_list(out2) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert (sys.getrefcount(asnumpy), sys.getrefcount(aslayout), sys.getrefcount(aspart)) == (3, 2, 3)
del out1
del out2
del asarray
del aspart
del aslayout
import gc
gc.collect()
assert sys.getrefcount(asnumpy) == 2
def test_getitem_1a():
array = awkward1.repartition(awkward1.Array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]), 3)
@numba.njit
def f1(x, i):
return x[i]
assert [f1(array, i) for i in range(10)] == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]
assert [f1(array, -i) for i in range(1, 11)] == [9.9, 8.8, 7.7, 6.6, 5.5, 4.4, 3.3, 2.2, 1.1, 0.0]
def test_getitem_1b():
asnumpy = numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
array = awkward1.repartition(awkward1.Array(asnumpy), 3)
assert sys.getrefcount(asnumpy) == 3
@numba.njit
def f2(x, i1, i2):
out = x[i1:i2]
return out
assert isinstance(f2(array, 0, 10).layout, awkward1.partition.PartitionedArray)
assert isinstance(f2(array, 4, 5).layout, awkward1.partition.PartitionedArray)
assert isinstance(f2(array, 5, 5).layout, awkward1.partition.PartitionedArray)
for start in range(-10, 10):
for stop in range(-10, 10):
assert awkward1.to_list(f2(array, start, stop)) == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9][start:stop]
assert sys.getrefcount(asnumpy) == 3
del array
assert sys.getrefcount(asnumpy) == 2
def test_getitem_2():
aslist = [{"x": 0.0, "y": []}, {"x": 1.1, "y": [1]}, {"x": 2.2, "y": [2, 2]},
{"x": 3.3, "y": [3, 3, 3]}, {"x": 4.4, "y": [4, 4, 4, 4]}, {"x": 5.5, "y": [5, 5, 5]},
{"x": 6.6, "y": [6, 6]}, {"x": 7.7, "y": [7]}, {"x": 8.8, "y": []}]
asarray = awkward1.repartition(awkward1.Array(aslist), 2)
@numba.njit
def f3a(x):
return x["x"]
assert awkward1.to_list(f3a(asarray)) == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8]
@numba.njit
def f3b(x):
return x.x
assert awkward1.to_list(f3b(asarray)) == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8]
@numba.njit
def f4a(x):
return x["y"]
assert awkward1.to_list(f4a(asarray)) == [[], [1], [2, 2], [3, 3, 3], [4, 4, 4, 4], [5, 5, 5], [6, 6], [7], []]
@numba.njit
def f4b(x):
return x.y
assert awkward1.to_list(f4b(asarray)) == [[], [1], [2, 2], [3, 3, 3], [4, 4, 4, 4], [5, 5, 5], [6, 6], [7], []]
@numba.njit
def f5a(x, i):
return x["x"][i]
assert [f5a(asarray, i) for i in range(-9, 9)]
@numba.njit
def f5b(x, i):
return x[i]["x"]
assert [f5b(asarray, i) for i in range(-9, 9)]
@numba.njit
def f5c(x, i):
return x.x[i]
assert [f5c(asarray, i) for i in range(-9, 9)]
@numba.njit
def f5d(x, i):
return x[i].x
assert [f5d(asarray, i) for i in range(-9, 9)]
@numba.njit
def f6a(x, i):
return x["y"][i]
assert awkward1.to_list(f6a(asarray, 6)) == [6, 6]
assert awkward1.to_list(f6a(asarray, -3)) == [6, 6]
@numba.njit
def f6b(x, i):
return x[i]["y"]
assert awkward1.to_list(f6b(asarray, 6)) == [6, 6]
assert awkward1.to_list(f6b(asarray, -3)) == [6, 6]
@numba.njit
def f6c(x, i):
return x.y[i]
assert awkward1.to_list(f6c(asarray, 6)) == [6, 6]
assert awkward1.to_list(f6c(asarray, -3)) == [6, 6]
@numba.njit
def f6d(x, i):
return x[i].y
assert awkward1.to_list(f6d(asarray, 6)) == [6, 6]
assert awkward1.to_list(f6d(asarray, -3)) == [6, 6]
def test_len():
array = awkward1.repartition(awkward1.Array([1.1, 2.2, 3.3, 4.4, 5.5]), 3)
@numba.njit
def f1(x):
return len(x)
assert f1(array) == 5
aslist = [{"x": 0.0, "y": []}, {"x": 1.1, "y": [1]}, {"x": 2.2, "y": [2, 2]},
{"x": 3.3, "y": [3, 3, 3]}, {"x": 4.4, "y": [4, 4, 4, 4]}, {"x": 5.5, "y": [5, 5, 5]},
{"x": 6.6, "y": [6, 6]}, {"x": 7.7, "y": [7]}, {"x": 8.8, "y": []}]
asarray = awkward1.repartition(awkward1.Array(aslist), 2)
assert f1(asarray) == 9
def test_iter():
asnumpy = numpy.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
assert sys.getrefcount(asnumpy) == 2
array = awkward1.repartition(awkward1.Array(asnumpy), 3)
assert sys.getrefcount(asnumpy) == 3
@numba.njit
def f1(x):
out = 0
for xi in x:
out += xi
return out
for i in range(10):
assert f1(array) == 45
assert sys.getrefcount(asnumpy) == 3
del array
assert sys.getrefcount(asnumpy) == 2
aslist = [{"x": 0.0, "y": []}, {"x": 1.1, "y": [1]}, {"x": 2.2, "y": [2, 2]},
{"x": 3.3, "y": [3, 3, 3]}, {"x": 4.4, "y": [4, 4, 4, 4]}, {"x": 5.5, "y": [5, 5, 5]},
{"x": 6.6, "y": [6, 6]}, {"x": 7.7, "y": [7]}, {"x": 8.8, "y": []}]
asarray = awkward1.repartition(awkward1.Array(aslist), 2)
@numba.njit
def f2(x):
i = 0
for xi in x:
if i == 6:
return xi["y"]
i += 1
assert awkward1.to_list(f2(asarray)) == [6, 6]
@numba.njit
def f3(x):
i = 0
for xi in x:
if i == 6:
return xi
i += 1
assert awkward1.to_list(f3(asarray)) == {"x": 6.6, "y": [6, 6]}
|
import pygame
import random
class Apple:
def __init__(self):
self.apple_skin = pygame.Surface((10, 10))
self.apple_skin.fill((255, 0, 0))
self.pos = (random.randint(0, 400) // 10 * 10, random.randint(0, 400) // 10 * 10)
def new_pos(self):
self.pos = (random.randint(0, 400) // 10 * 10, random.randint(0, 400) // 10 * 10)
def apple_collected(self, snake, player, frame_rate):
if self.pos == snake.snake_size[0]:
self.new_pos()
player.increase_score()
frame_rate += 1
snake.snake_size.append((0, 0))
return frame_rate
|
import numpy as np
from skimage.measure import points_in_poly
import pandas as pd
from ._base import GeneAssignmentAlgorithm
class PointInPoly(GeneAssignmentAlgorithm):
def __init__(self, verbose=False, **kwargs):
self.verbose = verbose
@classmethod
def add_arguments(cls, parser):
pass
@staticmethod
def _assign(cells_region, spots, use_hull=True, verbose=False):
res = pd.DataFrame({'spot_id': range(0, spots.shape[0])})
res['cell_id'] = None
for cell_id in range(cells_region.count):
if use_hull:
verts = cells_region[cell_id].hull
else:
verts = cells_region[cell_id].coordinates
verts = np.array(verts)
in_poly = points_in_poly(spots, verts)
res.loc[res.spot_id[in_poly], 'cell_id'] = cell_id
if verbose:
cnt = np.sum(in_poly)
print(cell_id, cnt)
return res
def assign_genes(self, intensity_table, regions):
x = intensity_table.coords['features'].x.values
y = intensity_table.coords['features'].y.values
points = pd.DataFrame(dict(x=x, y=y))
return self._assign(regions, points, use_hull=True, verbose=self.verbose)
|
from __future__ import annotations
import asyncio
import bisect
import builtins
import concurrent.futures
import errno
import heapq
import logging
import os
import random
import sys
import threading
import warnings
import weakref
from collections import defaultdict, deque, namedtuple
from collections.abc import Callable, Hashable, Iterable, MutableMapping
from contextlib import suppress
from datetime import timedelta
from inspect import isawaitable
from pickle import PicklingError
from typing import TYPE_CHECKING, Any, ClassVar
if TYPE_CHECKING:
from .client import Client
from tlz import first, keymap, merge, pluck # noqa: F401
from tornado.ioloop import IOLoop, PeriodicCallback
import dask
from dask.core import istask
from dask.system import CPU_COUNT
from dask.utils import (
apply,
format_bytes,
funcname,
parse_bytes,
parse_timedelta,
stringify,
typename,
)
from . import comm, preloading, profile, system, utils
from .batched import BatchedSend
from .comm import connect, get_address_host
from .comm.addressing import address_from_user_args, parse_address
from .comm.utils import OFFLOAD_THRESHOLD
from .core import (
CommClosedError,
Status,
coerce_to_address,
error_message,
pingpong,
send_recv,
)
from .diagnostics import nvml
from .diagnostics.plugin import _get_plugin_name
from .diskutils import WorkSpace
from .http import get_handlers
from .metrics import time
from .node import ServerNode
from .proctitle import setproctitle
from .protocol import pickle, to_serialize
from .pubsub import PubSubWorkerExtension
from .security import Security
from .sizeof import safe_sizeof as sizeof
from .threadpoolexecutor import ThreadPoolExecutor
from .threadpoolexecutor import secede as tpe_secede
from .utils import (
LRU,
TimeoutError,
_maybe_complex,
get_ip,
has_arg,
import_file,
iscoroutinefunction,
json_load_robust,
key_split,
log_errors,
offload,
parse_ports,
silence_logging,
thread_state,
warn_on_duration,
)
from .utils_comm import gather_from_workers, pack_data, retry_operation
from .utils_perf import ThrottledGC, disable_gc_diagnosis, enable_gc_diagnosis
from .versions import get_versions
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
no_value = "--no-value-sentinel--"
PROCESSING = {
"waiting",
"ready",
"constrained",
"executing",
"long-running",
"cancelled",
"resumed",
}
READY = {"ready", "constrained"}
DEFAULT_EXTENSIONS: list[type] = [PubSubWorkerExtension]
DEFAULT_METRICS: dict[str, Callable[[Worker], Any]] = {}
DEFAULT_STARTUP_INFORMATION: dict[str, Callable[[Worker], Any]] = {}
DEFAULT_DATA_SIZE = parse_bytes(
dask.config.get("distributed.scheduler.default-data-size")
)
SerializedTask = namedtuple("SerializedTask", ["function", "args", "kwargs", "task"])
class InvalidTransition(Exception):
pass
class TaskState:
"""Holds volatile state relating to an individual Dask task
* **dependencies**: ``set(TaskState instances)``
The data needed by this key to run
* **dependents**: ``set(TaskState instances)``
The keys that use this dependency.
* **duration**: ``float``
Expected duration the a task
* **priority**: ``tuple``
The priority this task given by the scheduler. Determines run order.
* **state**: ``str``
The current state of the task. One of ["waiting", "ready", "executing",
"fetch", "memory", "flight", "long-running", "rescheduled", "error"]
* **who_has**: ``set(worker)``
Workers that we believe have this data
* **coming_from**: ``str``
The worker that current task data is coming from if task is in flight
* **waiting_for_data**: ``set(keys of dependencies)``
A dynamic version of dependencies. All dependencies that we still don't
have for a particular key.
* **resource_restrictions**: ``{str: number}``
Abstract resources required to run a task
* **exception**: ``str``
The exception caused by running a task if it erred
* **traceback**: ``str``
The exception caused by running a task if it erred
* **type**: ``type``
The type of a particular piece of data
* **suspicious_count**: ``int``
The number of times a dependency has not been where we expected it
* **startstops**: ``[{startstop}]``
Log of transfer, load, and compute times for a task
* **start_time**: ``float``
Time at which task begins running
* **stop_time**: ``float``
Time at which task finishes running
* **metadata**: ``dict``
Metadata related to task. Stored metadata should be msgpack
serializable (e.g. int, string, list, dict).
* **nbytes**: ``int``
The size of a particular piece of data
* **annotations**: ``dict``
Task annotations
Parameters
----------
key: str
runspec: SerializedTask
A named tuple containing the ``function``, ``args``, ``kwargs`` and
``task`` associated with this `TaskState` instance. This defaults to
``None`` and can remain empty if it is a dependency that this worker
will receive from another worker.
"""
def __init__(self, key, runspec=None):
assert key is not None
self.key = key
self.runspec = runspec
self.dependencies = set()
self.dependents = set()
self.duration = None
self.priority = None
self.state = "released"
self.who_has = set()
self.coming_from = None
self.waiting_for_data = set()
self.waiters = set()
self.resource_restrictions = {}
self.exception = None
self.exception_text = ""
self.traceback = None
self.traceback_text = ""
self.type = None
self.suspicious_count = 0
self.startstops = []
self.start_time = None
self.stop_time = None
self.metadata = {}
self.nbytes = None
self.annotations = None
self.done = False
self._next = None
def __repr__(self):
return f"<Task {self.key!r} {self.state}>"
def get_nbytes(self) -> int:
nbytes = self.nbytes
return nbytes if nbytes is not None else DEFAULT_DATA_SIZE
def is_protected(self) -> bool:
return self.state in PROCESSING or any(
dep_ts.state in PROCESSING for dep_ts in self.dependents
)
class Worker(ServerNode):
"""Worker node in a Dask distributed cluster
Workers perform two functions:
1. **Serve data** from a local dictionary
2. **Perform computation** on that data and on data from peers
Workers keep the scheduler informed of their data and use that scheduler to
gather data from other workers when necessary to perform a computation.
You can start a worker with the ``dask-worker`` command line application::
$ dask-worker scheduler-ip:port
Use the ``--help`` flag to see more options::
$ dask-worker --help
The rest of this docstring is about the internal state the the worker uses
to manage and track internal computations.
**State**
**Informational State**
These attributes don't change significantly during execution.
* **nthreads:** ``int``:
Number of nthreads used by this worker process
* **executors:** ``Dict[str, concurrent.futures.Executor]``:
Executors used to perform computation. Always contains the default
executor.
* **local_directory:** ``path``:
Path on local machine to store temporary files
* **scheduler:** ``rpc``:
Location of scheduler. See ``.ip/.port`` attributes.
* **name:** ``string``:
Alias
* **services:** ``{str: Server}``:
Auxiliary web servers running on this worker
* **service_ports:** ``{str: port}``:
* **total_out_connections**: ``int``
The maximum number of concurrent outgoing requests for data
* **total_in_connections**: ``int``
The maximum number of concurrent incoming requests for data
* **comm_threshold_bytes**: ``int``
As long as the total number of bytes in flight is below this threshold
we will not limit the number of outgoing connections for a single tasks
dependency fetch.
* **batched_stream**: ``BatchedSend``
A batched stream along which we communicate to the scheduler
* **log**: ``[(message)]``
A structured and queryable log. See ``Worker.story``
**Volatile State**
These attributes track the progress of tasks that this worker is trying to
complete. In the descriptions below a ``key`` is the name of a task that
we want to compute and ``dep`` is the name of a piece of dependent data
that we want to collect from others.
* **tasks**: ``{key: TaskState}``
The tasks currently executing on this worker (and any dependencies of those tasks)
* **data:** ``{key: object}``:
Prefer using the **host** attribute instead of this, unless
memory_limit and at least one of memory_target_fraction or
memory_spill_fraction values are defined, in that case, this attribute
is a zict.Buffer, from which information on LRU cache can be queried.
* **data.memory:** ``{key: object}``:
Dictionary mapping keys to actual values stored in memory. Only
available if condition for **data** being a zict.Buffer is met.
* **data.disk:** ``{key: object}``:
Dictionary mapping keys to actual values stored on disk. Only
available if condition for **data** being a zict.Buffer is met.
* **data_needed**: deque(keys)
The keys which still require data in order to execute, arranged in a deque
* **ready**: [keys]
Keys that are ready to run. Stored in a LIFO stack
* **constrained**: [keys]
Keys for which we have the data to run, but are waiting on abstract
resources like GPUs. Stored in a FIFO deque
* **executing_count**: ``int``
A count of tasks currently executing on this worker
* **executed_count**: int
A number of tasks that this worker has run in its lifetime
* **long_running**: {keys}
A set of keys of tasks that are running and have started their own
long-running clients.
* **has_what**: ``{worker: {deps}}``
The data that we care about that we think a worker has
* **pending_data_per_worker**: ``{worker: [dep]}``
The data on each worker that we still want, prioritized as a deque
* **in_flight_tasks**: ``int``
A count of the number of tasks that are coming to us in current
peer-to-peer connections
* **in_flight_workers**: ``{worker: {task}}``
The workers from which we are currently gathering data and the
dependencies we expect from those connections
* **comm_bytes**: ``int``
The total number of bytes in flight
* **threads**: ``{key: int}``
The ID of the thread on which the task ran
* **active_threads**: ``{int: key}``
The keys currently running on active threads
* **waiting_for_data_count**: ``int``
A count of how many tasks are currently waiting for data
Parameters
----------
scheduler_ip: str
scheduler_port: int
ip: str, optional
data: MutableMapping, type, None
The object to use for storage, builds a disk-backed LRU dict by default
nthreads: int, optional
loop: tornado.ioloop.IOLoop
local_directory: str, optional
Directory where we place local resources
name: str, optional
memory_limit: int, float, string
Number of bytes of memory that this worker should use.
Set to zero for no limit. Set to 'auto' to calculate
as system.MEMORY_LIMIT * min(1, nthreads / total_cores)
Use strings or numbers like 5GB or 5e9
memory_target_fraction: float
Fraction of memory to try to stay beneath
memory_spill_fraction: float
Fraction of memory at which we start spilling to disk
memory_pause_fraction: float
Fraction of memory at which we stop running new tasks
executor: concurrent.futures.Executor, dict[str, concurrent.futures.Executor], str
The executor(s) to use. Depending on the type, it has the following meanings:
- Executor instance: The default executor.
- Dict[str, Executor]: mapping names to Executor instances. If the
"default" key isn't in the dict, a "default" executor will be created
using ``ThreadPoolExecutor(nthreads)``.
- Str: The string "offload", which refer to the same thread pool used for
offloading communications. This results in the same thread being used
for deserialization and computation.
resources: dict
Resources that this worker has like ``{'GPU': 2}``
nanny: str
Address on which to contact nanny, if it exists
lifetime: str
Amount of time like "1 hour" after which we gracefully shut down the worker.
This defaults to None, meaning no explicit shutdown time.
lifetime_stagger: str
Amount of time like "5 minutes" to stagger the lifetime value
The actual lifetime will be selected uniformly at random between
lifetime +/- lifetime_stagger
lifetime_restart: bool
Whether or not to restart a worker after it has reached its lifetime
Default False
Examples
--------
Use the command line to start a worker::
$ dask-scheduler
Start scheduler at 127.0.0.1:8786
$ dask-worker 127.0.0.1:8786
Start worker at: 127.0.0.1:1234
Registered with scheduler at: 127.0.0.1:8786
See Also
--------
distributed.scheduler.Scheduler
distributed.nanny.Nanny
"""
_instances: ClassVar[weakref.WeakSet[Worker]] = weakref.WeakSet()
_initialized_clients: ClassVar[weakref.WeakSet[Client]] = weakref.WeakSet()
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
ncores=None,
nthreads=None,
loop=None,
local_dir=None,
local_directory=None,
services=None,
service_ports=None,
service_kwargs=None,
name=None,
reconnect=True,
memory_limit="auto",
executor=None,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
security=None,
contact_address=None,
memory_monitor_interval="200ms",
extensions=None,
metrics=DEFAULT_METRICS,
startup_information=DEFAULT_STARTUP_INFORMATION,
data=None,
interface=None,
host=None,
port=None,
protocol=None,
dashboard_address=None,
dashboard=False,
http_prefix="/",
nanny=None,
plugins=(),
low_level_profiler=dask.config.get("distributed.worker.profile.low-level"),
validate=None,
profile_cycle_interval=None,
lifetime=None,
lifetime_stagger=None,
lifetime_restart=None,
**kwargs,
):
self.tasks = {}
self.waiting_for_data_count = 0
self.has_what = defaultdict(set)
self.pending_data_per_worker = defaultdict(deque)
self.nanny = nanny
self._lock = threading.Lock()
self.data_needed = []
self.in_flight_workers = {}
self.total_out_connections = dask.config.get(
"distributed.worker.connections.outgoing"
)
self.total_in_connections = dask.config.get(
"distributed.worker.connections.incoming"
)
self.comm_threshold_bytes = 10e6
self.comm_nbytes = 0
self._missing_dep_flight = set()
self.threads = {}
self.active_threads_lock = threading.Lock()
self.active_threads = {}
self.active_keys = set()
self.profile_keys = defaultdict(profile.create)
self.profile_keys_history = deque(maxlen=3600)
self.profile_recent = profile.create()
self.profile_history = deque(maxlen=3600)
self.generation = 0
self.ready = []
self.constrained = deque()
self._executing = set()
self._in_flight_tasks = set()
self.executed_count = 0
self.long_running = set()
self.recent_messages_log = deque(
maxlen=dask.config.get("distributed.comm.recent-messages-log-length")
)
self.target_message_size = 50e6 # 50 MB
self.log = deque(maxlen=100000)
if validate is None:
validate = dask.config.get("distributed.scheduler.validate")
self.validate = validate
self._transitions_table = {
("cancelled", "resumed"): self.transition_cancelled_resumed,
("cancelled", "fetch"): self.transition_cancelled_fetch,
("cancelled", "released"): self.transition_cancelled_released,
("cancelled", "waiting"): self.transition_cancelled_waiting,
("cancelled", "forgotten"): self.transition_cancelled_forgotten,
("cancelled", "memory"): self.transition_cancelled_memory,
("cancelled", "error"): self.transition_generic_error,
("resumed", "memory"): self.transition_generic_memory,
("resumed", "error"): self.transition_generic_error,
("resumed", "released"): self.transition_generic_released,
("resumed", "waiting"): self.transition_rescheduled_next,
("resumed", "fetch"): self.transition_rescheduled_next,
("constrained", "executing"): self.transition_constrained_executing,
("constrained", "released"): self.transition_constrained_released,
("error", "released"): self.transition_generic_released,
("executing", "error"): self.transition_executing_error,
("executing", "long-running"): self.transition_executing_long_running,
("executing", "memory"): self.transition_executing_memory,
("executing", "released"): self.transition_executing_released,
("executing", "rescheduled"): self.transition_executing_rescheduled,
("fetch", "flight"): self.transition_fetch_flight,
("fetch", "missing"): self.transition_fetch_missing,
("fetch", "released"): self.transition_generic_released,
("flight", "error"): self.transition_flight_error,
("flight", "fetch"): self.transition_flight_fetch,
("flight", "memory"): self.transition_flight_memory,
("flight", "released"): self.transition_flight_released,
("long-running", "error"): self.transition_generic_error,
("long-running", "memory"): self.transition_long_running_memory,
("long-running", "rescheduled"): self.transition_executing_rescheduled,
("long-running", "released"): self.transition_executing_released,
("memory", "released"): self.transition_memory_released,
("missing", "fetch"): self.transition_missing_fetch,
("missing", "released"): self.transition_missing_released,
("missing", "error"): self.transition_generic_error,
("ready", "error"): self.transition_generic_error,
("ready", "executing"): self.transition_ready_executing,
("ready", "released"): self.transition_generic_released,
("released", "error"): self.transition_generic_error,
("released", "fetch"): self.transition_released_fetch,
("released", "forgotten"): self.transition_released_forgotten,
("released", "memory"): self.transition_released_memory,
("released", "waiting"): self.transition_released_waiting,
("waiting", "constrained"): self.transition_waiting_constrained,
("waiting", "ready"): self.transition_waiting_ready,
("waiting", "released"): self.transition_generic_released,
}
self._transition_counter = 0
self.incoming_transfer_log = deque(maxlen=100000)
self.incoming_count = 0
self.outgoing_transfer_log = deque(maxlen=100000)
self.outgoing_count = 0
self.outgoing_current_count = 0
self.repetitively_busy = 0
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.bandwidth_workers = defaultdict(
lambda: (0, 0)
) # bw/count recent transfers
self.bandwidth_types = defaultdict(lambda: (0, 0)) # bw/count recent transfers
self.latency = 0.001
self._client = None
if profile_cycle_interval is None:
profile_cycle_interval = dask.config.get("distributed.worker.profile.cycle")
profile_cycle_interval = parse_timedelta(profile_cycle_interval, default="ms")
self._setup_logging(logger)
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
if not local_directory:
local_directory = dask.config.get("temporary-directory") or os.getcwd()
os.makedirs(local_directory, exist_ok=True)
local_directory = os.path.join(local_directory, "dask-worker-space")
with warn_on_duration(
"1s",
"Creating scratch directories is taking a surprisingly long time. "
"This is often due to running workers on a network file system. "
"Consider specifying a local-directory to point workers to write "
"scratch data to a local disk.",
):
self._workspace = WorkSpace(os.path.abspath(local_directory))
self._workdir = self._workspace.new_work_dir(prefix="worker-")
self.local_directory = self._workdir.dir_path
if not preload:
preload = dask.config.get("distributed.worker.preload")
if not preload_argv:
preload_argv = dask.config.get("distributed.worker.preload-argv")
self.preloads = preloading.process_preloads(
self, preload, preload_argv, file_dir=self.local_directory
)
if scheduler_file:
cfg = json_load_robust(scheduler_file)
scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address", None):
scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
scheduler_addr = coerce_to_address(scheduler_ip)
else:
scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
self.contact_address = contact_address
if protocol is None:
protocol_address = scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
self._start_port = port
self._start_host = host
if host:
# Helpful error message if IPv6 specified incorrectly
_, host_address = parse_address(host)
if host_address.count(":") > 1 and not host_address.startswith("["):
raise ValueError(
"Host address with IPv6 must be bracketed like '[::1]'; "
f"got {host_address}"
)
self._interface = interface
self._protocol = protocol
if ncores is not None:
warnings.warn("the ncores= parameter has moved to nthreads=")
nthreads = ncores
self.nthreads = nthreads or CPU_COUNT
if resources is None:
resources = dask.config.get("distributed.worker.resources", None)
self.total_resources = resources or {}
self.available_resources = (resources or {}).copy()
self.death_timeout = parse_timedelta(death_timeout)
self.extensions = {}
if silence_logs:
silence_logging(level=silence_logs)
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
if "memory_target_fraction" in kwargs:
self.memory_target_fraction = kwargs.pop("memory_target_fraction")
else:
self.memory_target_fraction = dask.config.get(
"distributed.worker.memory.target"
)
if "memory_spill_fraction" in kwargs:
self.memory_spill_fraction = kwargs.pop("memory_spill_fraction")
else:
self.memory_spill_fraction = dask.config.get(
"distributed.worker.memory.spill"
)
if "memory_pause_fraction" in kwargs:
self.memory_pause_fraction = kwargs.pop("memory_pause_fraction")
else:
self.memory_pause_fraction = dask.config.get(
"distributed.worker.memory.pause"
)
if isinstance(data, MutableMapping):
self.data = data
elif callable(data):
self.data = data()
elif isinstance(data, tuple):
self.data = data[0](**data[1])
elif self.memory_limit and (
self.memory_target_fraction or self.memory_spill_fraction
):
from .spill import SpillBuffer
self.data = SpillBuffer(
os.path.join(self.local_directory, "storage"),
target=int(
self.memory_limit
* (self.memory_target_fraction or self.memory_spill_fraction)
)
or sys.maxsize,
)
else:
self.data = {}
self.actors = {}
self.loop = loop or IOLoop.current()
self.reconnect = reconnect
# Common executors always available
self.executors: dict[str, concurrent.futures.Executor] = {
"offload": utils._offload_executor,
"actor": ThreadPoolExecutor(1, thread_name_prefix="Dask-Actor-Threads"),
}
if nvml.device_get_count() > 0:
self.executors["gpu"] = ThreadPoolExecutor(
1, thread_name_prefix="Dask-GPU-Threads"
)
# Find the default executor
if executor == "offload":
self.executors["default"] = self.executors["offload"]
elif isinstance(executor, dict):
self.executors.update(executor)
elif executor is not None:
self.executors["default"] = executor
if "default" not in self.executors:
self.executors["default"] = ThreadPoolExecutor(
self.nthreads, thread_name_prefix="Dask-Default-Threads"
)
self.batched_stream = BatchedSend(interval="2ms", loop=self.loop)
self.name = name
self.scheduler_delay = 0
self.stream_comms = {}
self.heartbeat_active = False
self._ipython_kernel = None
if self.local_directory not in sys.path:
sys.path.insert(0, self.local_directory)
self.services = {}
self.service_specs = services or {}
self._dashboard_address = dashboard_address
self._dashboard = dashboard
self._http_prefix = http_prefix
self.metrics = dict(metrics) if metrics else {}
self.startup_information = (
dict(startup_information) if startup_information else {}
)
self.low_level_profiler = low_level_profiler
handlers = {
"gather": self.gather,
"run": self.run,
"run_coroutine": self.run_coroutine,
"get_data": self.get_data,
"update_data": self.update_data,
"free_keys": self.handle_free_keys,
"terminate": self.close,
"ping": pingpong,
"upload_file": self.upload_file,
"start_ipython": self.start_ipython,
"call_stack": self.get_call_stack,
"profile": self.get_profile,
"profile_metadata": self.get_profile_metadata,
"get_logs": self.get_logs,
"keys": self.keys,
"versions": self.versions,
"actor_execute": self.actor_execute,
"actor_attribute": self.actor_attribute,
"plugin-add": self.plugin_add,
"plugin-remove": self.plugin_remove,
"get_monitor_info": self.get_monitor_info,
}
stream_handlers = {
"close": self.close,
"cancel-compute": self.handle_cancel_compute,
"acquire-replicas": self.handle_acquire_replicas,
"compute-task": self.handle_compute_task,
"free-keys": self.handle_free_keys,
"remove-replicas": self.handle_remove_replicas,
"steal-request": self.handle_steal_request,
}
super().__init__(
handlers=handlers,
stream_handlers=stream_handlers,
io_loop=self.loop,
connection_args=self.connection_args,
**kwargs,
)
self.scheduler = self.rpc(scheduler_addr)
self.execution_state = {
"scheduler": self.scheduler.address,
"ioloop": self.loop,
"worker": self,
}
pc = PeriodicCallback(self.heartbeat, 1000)
self.periodic_callbacks["heartbeat"] = pc
pc = PeriodicCallback(
lambda: self.batched_stream.send({"op": "keep-alive"}), 60000
)
self.periodic_callbacks["keep-alive"] = pc
pc = PeriodicCallback(self.find_missing, 1000)
self.periodic_callbacks["find-missing"] = pc
self._suspicious_count_limit = 10
self._address = contact_address
self.memory_monitor_interval = parse_timedelta(
memory_monitor_interval, default="ms"
)
if self.memory_limit:
self._memory_monitoring = False
pc = PeriodicCallback(
self.memory_monitor, self.memory_monitor_interval * 1000
)
self.periodic_callbacks["memory"] = pc
if extensions is None:
extensions = DEFAULT_EXTENSIONS
for ext in extensions:
ext(self)
self._throttled_gc = ThrottledGC(logger=logger)
setproctitle("dask-worker [not started]")
profile_trigger_interval = parse_timedelta(
dask.config.get("distributed.worker.profile.interval"), default="ms"
)
pc = PeriodicCallback(self.trigger_profile, profile_trigger_interval * 1000)
self.periodic_callbacks["profile"] = pc
pc = PeriodicCallback(self.cycle_profile, profile_cycle_interval * 1000)
self.periodic_callbacks["profile-cycle"] = pc
self.plugins = {}
self._pending_plugins = plugins
self.lifetime = lifetime or dask.config.get(
"distributed.worker.lifetime.duration"
)
lifetime_stagger = lifetime_stagger or dask.config.get(
"distributed.worker.lifetime.stagger"
)
self.lifetime_restart = lifetime_restart or dask.config.get(
"distributed.worker.lifetime.restart"
)
if isinstance(self.lifetime, str):
self.lifetime = parse_timedelta(self.lifetime)
if isinstance(lifetime_stagger, str):
lifetime_stagger = parse_timedelta(lifetime_stagger)
if self.lifetime:
self.lifetime += (random.random() * 2 - 1) * lifetime_stagger
self.io_loop.call_later(self.lifetime, self.close_gracefully)
Worker._instances.add(self)
##################
# Administrative #
##################
def __repr__(self):
return "<%s: %r, %s, %s, stored: %d, running: %d/%d, ready: %d, comm: %d, waiting: %d>" % (
self.__class__.__name__,
self.address,
self.name,
self.status,
len(self.data),
self.executing_count,
self.nthreads,
len(self.ready),
self.in_flight_tasks,
self.waiting_for_data_count,
)
@property
def logs(self):
return self._deque_handler.deque
def log_event(self, topic, msg):
self.batched_stream.send(
{
"op": "log-event",
"topic": topic,
"msg": msg,
}
)
@property
def executing_count(self) -> int:
return len(self._executing)
@property
def in_flight_tasks(self) -> int:
return len(self._in_flight_tasks)
@property
def worker_address(self):
"""For API compatibility with Nanny"""
return self.address
@property
def local_dir(self):
"""For API compatibility with Nanny"""
warnings.warn(
"The local_dir attribute has moved to local_directory", stacklevel=2
)
return self.local_directory
@property
def executor(self):
return self.executors["default"]
@ServerNode.status.setter # type: ignore
def status(self, value):
"""Override Server.status to notify the Scheduler of status changes"""
ServerNode.status.__set__(self, value)
if (
self.batched_stream
and self.batched_stream.comm
and not self.batched_stream.comm.closed()
):
self.batched_stream.send(
{"op": "worker-status-change", "status": self._status.name}
)
async def get_metrics(self):
out = dict(
executing=self.executing_count,
in_memory=len(self.data),
ready=len(self.ready),
in_flight=self.in_flight_tasks,
bandwidth={
"total": self.bandwidth,
"workers": dict(self.bandwidth_workers),
"types": keymap(typename, self.bandwidth_types),
},
spilled_nbytes=getattr(self.data, "spilled_total", 0),
)
out.update(self.monitor.recent())
for k, metric in self.metrics.items():
try:
result = metric(self)
if isawaitable(result):
result = await result
# In case of collision, prefer core metrics
out.setdefault(k, result)
except Exception: # TODO: log error once
pass
return out
async def get_startup_information(self):
result = {}
for k, f in self.startup_information.items():
try:
v = f(self)
if isawaitable(v):
v = await v
result[k] = v
except Exception: # TODO: log error once
pass
return result
def identity(self, comm=None):
return {
"type": type(self).__name__,
"id": self.id,
"scheduler": self.scheduler.address,
"nthreads": self.nthreads,
"ncores": self.nthreads, # backwards compatibility
"memory_limit": self.memory_limit,
}
#####################
# External Services #
#####################
async def _register_with_scheduler(self):
self.periodic_callbacks["keep-alive"].stop()
self.periodic_callbacks["heartbeat"].stop()
start = time()
if self.contact_address is None:
self.contact_address = self.address
logger.info("-" * 49)
while True:
try:
_start = time()
comm = await connect(self.scheduler.address, **self.connection_args)
comm.name = "Worker->Scheduler"
comm._server = weakref.ref(self)
await comm.write(
dict(
op="register-worker",
reply=False,
address=self.contact_address,
keys=list(self.data),
nthreads=self.nthreads,
name=self.name,
nbytes={
ts.key: ts.get_nbytes()
for ts in self.tasks.values()
# Only if the task is in memory this is a sensible
# result since otherwise it simply submits the
# default value
if ts.state == "memory"
},
types={k: typename(v) for k, v in self.data.items()},
now=time(),
resources=self.total_resources,
memory_limit=self.memory_limit,
local_directory=self.local_directory,
services=self.service_ports,
nanny=self.nanny,
pid=os.getpid(),
versions=get_versions(),
metrics=await self.get_metrics(),
extra=await self.get_startup_information(),
),
serializers=["msgpack"],
)
future = comm.read(deserializers=["msgpack"])
response = await future
if response.get("warning"):
logger.warning(response["warning"])
_end = time()
middle = (_start + _end) / 2
self._update_latency(_end - start)
self.scheduler_delay = response["time"] - middle
self.status = Status.running
break
except OSError:
logger.info("Waiting to connect to: %26s", self.scheduler.address)
await asyncio.sleep(0.1)
except TimeoutError:
logger.info("Timed out when connecting to scheduler")
if response["status"] != "OK":
raise ValueError(f"Unexpected response from register: {response!r}")
else:
await asyncio.gather(
*(
self.plugin_add(name=name, plugin=plugin)
for name, plugin in response["worker-plugins"].items()
)
)
logger.info(" Registered to: %26s", self.scheduler.address)
logger.info("-" * 49)
self.batched_stream.start(comm)
self.periodic_callbacks["keep-alive"].start()
self.periodic_callbacks["heartbeat"].start()
self.loop.add_callback(self.handle_scheduler, comm)
def _update_latency(self, latency):
self.latency = latency * 0.05 + self.latency * 0.95
if self.digests is not None:
self.digests["latency"].add(latency)
async def heartbeat(self):
if self.heartbeat_active:
logger.debug("Heartbeat skipped: channel busy")
return
self.heartbeat_active = True
logger.debug("Heartbeat: %s", self.address)
try:
start = time()
response = await retry_operation(
self.scheduler.heartbeat_worker,
address=self.contact_address,
now=start,
metrics=await self.get_metrics(),
executing={
key: start - self.tasks[key].start_time
for key in self.active_keys
if key in self.tasks
},
)
end = time()
middle = (start + end) / 2
self._update_latency(end - start)
if response["status"] == "missing":
for i in range(10):
if self.status not in (Status.running, Status.paused):
break
else:
await asyncio.sleep(0.05)
else:
await self._register_with_scheduler()
return
self.scheduler_delay = response["time"] - middle
self.periodic_callbacks["heartbeat"].callback_time = (
response["heartbeat-interval"] * 1000
)
self.bandwidth_workers.clear()
self.bandwidth_types.clear()
except CommClosedError:
logger.warning("Heartbeat to scheduler failed", exc_info=True)
if not self.reconnect:
await self.close(report=False)
except OSError as e:
# Scheduler is gone. Respect distributed.comm.timeouts.connect
if "Timed out trying to connect" in str(e):
await self.close(report=False)
else:
raise e
finally:
self.heartbeat_active = False
async def handle_scheduler(self, comm):
try:
await self.handle_stream(
comm, every_cycle=[self.ensure_communicating, self.ensure_computing]
)
except Exception as e:
logger.exception(e)
raise
finally:
if self.reconnect and self.status in (Status.running, Status.paused):
logger.info("Connection to scheduler broken. Reconnecting...")
self.loop.add_callback(self.heartbeat)
else:
await self.close(report=False)
def start_ipython(self, comm):
"""Start an IPython kernel
Returns Jupyter connection info dictionary.
"""
from ._ipython_utils import start_ipython
if self._ipython_kernel is None:
self._ipython_kernel = start_ipython(
ip=self.ip, ns={"worker": self}, log=logger
)
return self._ipython_kernel.get_connection_info()
async def upload_file(self, comm, filename=None, data=None, load=True):
out_filename = os.path.join(self.local_directory, filename)
def func(data):
if isinstance(data, str):
data = data.encode()
with open(out_filename, "wb") as f:
f.write(data)
f.flush()
return data
if len(data) < 10000:
data = func(data)
else:
data = await offload(func, data)
if load:
try:
import_file(out_filename)
cache_loads.data.clear()
except Exception as e:
logger.exception(e)
raise e
return {"status": "OK", "nbytes": len(data)}
def keys(self, comm=None):
return list(self.data)
async def gather(self, comm=None, who_has=None):
who_has = {
k: [coerce_to_address(addr) for addr in v]
for k, v in who_has.items()
if k not in self.data
}
result, missing_keys, missing_workers = await gather_from_workers(
who_has, rpc=self.rpc, who=self.address
)
self.update_data(data=result, report=False)
if missing_keys:
logger.warning(
"Could not find data: %s on workers: %s (who_has: %s)",
missing_keys,
missing_workers,
who_has,
)
return {"status": "partial-fail", "keys": missing_keys}
else:
return {"status": "OK"}
def get_monitor_info(self, comm=None, recent=False, start=0):
result = dict(
range_query=(
self.monitor.recent()
if recent
else self.monitor.range_query(start=start)
),
count=self.monitor.count,
last_time=self.monitor.last_time,
)
if nvml.device_get_count() > 0:
result["gpu_name"] = self.monitor.gpu_name
result["gpu_memory_total"] = self.monitor.gpu_memory_total
return result
#############
# Lifecycle #
#############
async def start(self):
if self.status and self.status in (
Status.closed,
Status.closing,
Status.closing_gracefully,
):
return
assert self.status is Status.undefined, self.status
await super().start()
enable_gc_diagnosis()
thread_state.on_event_loop_thread = True
ports = parse_ports(self._start_port)
for port in ports:
start_address = address_from_user_args(
host=self._start_host,
port=port,
interface=self._interface,
protocol=self._protocol,
security=self.security,
)
kwargs = self.security.get_listen_args("worker")
if self._protocol in ("tcp", "tls"):
kwargs = kwargs.copy()
kwargs["default_host"] = get_ip(
get_address_host(self.scheduler.address)
)
try:
await self.listen(start_address, **kwargs)
except OSError as e:
if len(ports) > 1 and e.errno == errno.EADDRINUSE:
continue
else:
raise
else:
self._start_address = start_address
break
else:
raise ValueError(
f"Could not start Worker on host {self._start_host}"
f"with port {self._start_port}"
)
# Start HTTP server associated with this Worker node
routes = get_handlers(
server=self,
modules=dask.config.get("distributed.worker.http.routes"),
prefix=self._http_prefix,
)
self.start_http_server(routes, self._dashboard_address)
if self._dashboard:
try:
import distributed.dashboard.worker
except ImportError:
logger.debug("To start diagnostics web server please install Bokeh")
else:
distributed.dashboard.worker.connect(
self.http_application,
self.http_server,
self,
prefix=self._http_prefix,
)
self.ip = get_address_host(self.address)
if self.name is None:
self.name = self.address
for preload in self.preloads:
await preload.start()
# Services listen on all addresses
# Note Nanny is not a "real" service, just some metadata
# passed in service_ports...
self.start_services(self.ip)
try:
listening_address = "%s%s:%d" % (self.listener.prefix, self.ip, self.port)
except Exception:
listening_address = f"{self.listener.prefix}{self.ip}"
logger.info(" Start worker at: %26s", self.address)
logger.info(" Listening to: %26s", listening_address)
for k, v in self.service_ports.items():
logger.info(" {:>16} at: {:>26}".format(k, self.ip + ":" + str(v)))
logger.info("Waiting to connect to: %26s", self.scheduler.address)
logger.info("-" * 49)
logger.info(" Threads: %26d", self.nthreads)
if self.memory_limit:
logger.info(" Memory: %26s", format_bytes(self.memory_limit))
logger.info(" Local Directory: %26s", self.local_directory)
setproctitle("dask-worker [%s]" % self.address)
await asyncio.gather(
*(self.plugin_add(plugin=plugin) for plugin in self._pending_plugins)
)
self._pending_plugins = ()
await self._register_with_scheduler()
self.start_periodic_callbacks()
return self
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
async def close(
self, report=True, timeout=30, nanny=True, executor_wait=True, safe=False
):
with log_errors():
if self.status in (Status.closed, Status.closing):
await self.finished()
return
self.reconnect = False
disable_gc_diagnosis()
try:
logger.info("Stopping worker at %s", self.address)
except ValueError: # address not available if already closed
logger.info("Stopping worker")
if self.status not in (
Status.running,
Status.paused,
Status.closing_gracefully,
):
logger.info("Closed worker has not yet started: %s", self.status)
self.status = Status.closing
for preload in self.preloads:
await preload.teardown()
if nanny and self.nanny:
with self.rpc(self.nanny) as r:
await r.close_gracefully()
setproctitle("dask-worker [closing]")
teardowns = [
plugin.teardown(self)
for plugin in self.plugins.values()
if hasattr(plugin, "teardown")
]
await asyncio.gather(*(td for td in teardowns if isawaitable(td)))
for pc in self.periodic_callbacks.values():
pc.stop()
if self._client:
# If this worker is the last one alive, clean up the worker
# initialized clients
if not any(
w
for w in Worker._instances
if w != self and w.status in (Status.running, Status.paused)
):
for c in Worker._initialized_clients:
# Regardless of what the client was initialized with
# we'll require the result as a future. This is
# necessary since the heursitics of asynchronous are not
# reliable and we might deadlock here
c._asynchronous = True
if c.asynchronous:
await c.close()
else:
# There is still the chance that even with us
# telling the client to be async, itself will decide
# otherwise
c.close()
with suppress(EnvironmentError, TimeoutError):
if report and self.contact_address is not None:
await asyncio.wait_for(
self.scheduler.unregister(
address=self.contact_address, safe=safe
),
timeout,
)
await self.scheduler.close_rpc()
self._workdir.release()
self.stop_services()
if (
self.batched_stream
and self.batched_stream.comm
and not self.batched_stream.comm.closed()
):
self.batched_stream.send({"op": "close-stream"})
if self.batched_stream:
with suppress(TimeoutError):
await self.batched_stream.close(timedelta(seconds=timeout))
for executor in self.executors.values():
if executor is utils._offload_executor:
continue # Never shutdown the offload executor
if isinstance(executor, ThreadPoolExecutor):
executor._work_queue.queue.clear()
executor.shutdown(wait=executor_wait, timeout=timeout)
else:
executor.shutdown(wait=executor_wait)
self.stop()
await self.rpc.close()
self.status = Status.closed
await super().close()
setproctitle("dask-worker [closed]")
return "OK"
async def close_gracefully(self, restart=None):
"""Gracefully shut down a worker
This first informs the scheduler that we're shutting down, and asks it
to move our data elsewhere. Afterwards, we close as normal
"""
if self.status in (Status.closing, Status.closing_gracefully):
await self.finished()
if self.status == Status.closed:
return
if restart is None:
restart = self.lifetime_restart
logger.info("Closing worker gracefully: %s", self.address)
self.status = Status.closing_gracefully
await self.scheduler.retire_workers(workers=[self.address], remove=False)
await self.close(safe=True, nanny=not restart)
async def terminate(self, comm=None, report=True, **kwargs):
await self.close(report=report, **kwargs)
return "OK"
async def wait_until_closed(self):
warnings.warn("wait_until_closed has moved to finished()")
await self.finished()
assert self.status == Status.closed
################
# Worker Peers #
################
def send_to_worker(self, address, msg):
if address not in self.stream_comms:
bcomm = BatchedSend(interval="1ms", loop=self.loop)
self.stream_comms[address] = bcomm
async def batched_send_connect():
comm = await connect(
address, **self.connection_args # TODO, serialization
)
comm.name = "Worker->Worker"
await comm.write({"op": "connection_stream"})
bcomm.start(comm)
self.loop.add_callback(batched_send_connect)
self.stream_comms[address].send(msg)
async def get_data(
self, comm, keys=None, who=None, serializers=None, max_connections=None
):
start = time()
if max_connections is None:
max_connections = self.total_in_connections
# Allow same-host connections more liberally
if (
max_connections
and comm
and get_address_host(comm.peer_address) == get_address_host(self.address)
):
max_connections = max_connections * 2
if self.status == Status.paused:
max_connections = 1
throttle_msg = " Throttling outgoing connections because worker is paused."
else:
throttle_msg = ""
if (
max_connections is not False
and self.outgoing_current_count >= max_connections
):
logger.debug(
"Worker %s has too many open connections to respond to data request "
"from %s (%d/%d).%s",
self.address,
who,
self.outgoing_current_count,
max_connections,
throttle_msg,
)
return {"status": "busy"}
self.outgoing_current_count += 1
data = {k: self.data[k] for k in keys if k in self.data}
if len(data) < len(keys):
for k in set(keys) - set(data):
if k in self.actors:
from .actor import Actor
data[k] = Actor(type(self.actors[k]), self.address, k, worker=self)
msg = {"status": "OK", "data": {k: to_serialize(v) for k, v in data.items()}}
nbytes = {k: self.tasks[k].nbytes for k in data if k in self.tasks}
stop = time()
if self.digests is not None:
self.digests["get-data-load-duration"].add(stop - start)
start = time()
try:
compressed = await comm.write(msg, serializers=serializers)
response = await comm.read(deserializers=serializers)
assert response == "OK", response
except OSError:
logger.exception(
"failed during get data with %s -> %s", self.address, who, exc_info=True
)
comm.abort()
raise
finally:
self.outgoing_current_count -= 1
stop = time()
if self.digests is not None:
self.digests["get-data-send-duration"].add(stop - start)
total_bytes = sum(filter(None, nbytes.values()))
self.outgoing_count += 1
duration = (stop - start) or 0.5 # windows
self.outgoing_transfer_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2,
"duration": duration,
"who": who,
"keys": nbytes,
"total": total_bytes,
"compressed": compressed,
"bandwidth": total_bytes / duration,
}
)
return Status.dont_reply
###################
# Local Execution #
###################
def update_data(
self, comm=None, data=None, report=True, serializers=None, stimulus_id=None
):
if stimulus_id is None:
stimulus_id = f"update-data-{time()}"
recommendations = {}
scheduler_messages = []
for key, value in data.items():
try:
ts = self.tasks[key]
recommendations[ts] = ("memory", value)
except KeyError:
self.tasks[key] = ts = TaskState(key)
recs, smsgs = self._put_key_in_memory(
ts, value, stimulus_id=stimulus_id
)
recommendations.update(recs)
scheduler_messages += smsgs
ts.priority = None
ts.duration = None
self.log.append((key, "receive-from-scatter"))
if report:
scheduler_messages.append(
{"op": "add-keys", "keys": list(data), "stimulus_id": stimulus_id}
)
self.transitions(recommendations, stimulus_id=stimulus_id)
for msg in scheduler_messages:
self.batched_stream.send(msg)
return {"nbytes": {k: sizeof(v) for k, v in data.items()}, "status": "OK"}
def handle_free_keys(self, comm=None, keys=None, reason=None):
"""
Handler to be called by the scheduler.
The given keys are no longer referred to and required by the scheduler.
The worker is now allowed to release the key, if applicable.
This does not guarantee that the memory is released since the worker may
still decide to hold on to the data and task since it is required by an
upstream dependency.
"""
self.log.append(("free-keys", keys, reason))
recommendations = {}
for key in keys:
ts = self.tasks.get(key)
if ts:
recommendations[ts] = "released" if ts.dependents else "forgotten"
self.transitions(recommendations, stimulus_id=reason)
def handle_remove_replicas(self, keys, stimulus_id):
"""Stream handler notifying the worker that it might be holding unreferenced,
superfluous data.
This should not actually happen during ordinary operations and is only intended
to correct any erroneous state. An example where this is necessary is if a
worker fetches data for a downstream task but that task is released before the
data arrives. In this case, the scheduler will notify the worker that it may be
holding this unnecessary data, if the worker hasn't released the data itself,
already.
This handler does not guarantee the task nor the data to be actually
released but only asks the worker to release the data on a best effort
guarantee. This protects from race conditions where the given keys may
already have been rescheduled for compute in which case the compute
would win and this handler is ignored.
For stronger guarantees, see handler free_keys
"""
self.log.append(("remove-replicas", keys, stimulus_id))
recommendations = {}
rejected = []
for key in keys:
ts = self.tasks.get(key)
if ts is None or ts.state != "memory":
continue
if not ts.is_protected():
self.log.append(("remove-replica-confirmed", ts.key, stimulus_id))
recommendations[ts] = "released" if ts.dependents else "forgotten"
else:
rejected.append(key)
if rejected:
self.log.append(("remove-replica-rejected", rejected, stimulus_id))
self.batched_stream.send(
{"op": "add-keys", "keys": rejected, "stimulus_id": stimulus_id}
)
self.transitions(recommendations=recommendations, stimulus_id=stimulus_id)
return "OK"
async def set_resources(self, **resources):
for r, quantity in resources.items():
if r in self.total_resources:
self.available_resources[r] += quantity - self.total_resources[r]
else:
self.available_resources[r] = quantity
self.total_resources[r] = quantity
await retry_operation(
self.scheduler.set_resources,
resources=self.total_resources,
worker=self.contact_address,
)
###################
# Task Management #
###################
def handle_cancel_compute(self, key, reason):
"""
Cancel a task on a best effort basis. This is only possible while a task
is in state `waiting` or `ready`.
Nothing will happen otherwise.
"""
ts = self.tasks.get(key)
if ts and ts.state in ("waiting", "ready"):
self.log.append((key, "cancel-compute", reason))
ts.scheduler_holds_ref = False
# All possible dependents of TS should not be in state Processing on
# scheduler side and therefore should not be assigned to a worker,
# yet.
assert not ts.dependents
self.transition(ts, "released", stimulus_id=reason)
def handle_acquire_replicas(
self, comm=None, keys=None, priorities=None, who_has=None, stimulus_id=None
):
recommendations = {}
scheduler_msgs = []
for k in keys:
recs, smsgs = self.register_acquire_internal(
k,
stimulus_id=stimulus_id,
priority=priorities[k],
)
recommendations.update(recs)
scheduler_msgs += smsgs
self.update_who_has(who_has, stimulus_id=stimulus_id)
for msg in scheduler_msgs:
self.batched_stream.send(msg)
self.transitions(recommendations, stimulus_id=stimulus_id)
def register_acquire_internal(self, key, priority, stimulus_id):
try:
ts = self.tasks[key]
logger.debug(
"Data task already known %s", {"task": ts, "stimulus_id": stimulus_id}
)
except KeyError:
self.tasks[key] = ts = TaskState(key)
self.log.append((key, "register-replica", ts.state, stimulus_id, time()))
ts.priority = ts.priority or priority
recommendations = {}
scheduler_msgs = []
if ts.state in ("released", "cancelled", "error"):
recommendations[ts] = "fetch"
return recommendations, scheduler_msgs
def handle_compute_task(
self,
*,
key,
# FIXME: This will break protocol
function=None,
args=None,
kwargs=None,
task=no_value,
who_has=None,
nbytes=None,
priority=None,
duration=None,
resource_restrictions=None,
actor=False,
annotations=None,
stimulus_id=None,
):
self.log.append((key, "compute-task", stimulus_id, time()))
try:
ts = self.tasks[key]
logger.debug(
"Asked to compute an already known task %s",
{"task": ts, "stimulus_id": stimulus_id},
)
except KeyError:
self.tasks[key] = ts = TaskState(key)
ts.runspec = SerializedTask(function, args, kwargs, task)
if priority is not None:
priority = tuple(priority) + (self.generation,)
self.generation -= 1
if actor:
self.actors[ts.key] = None
ts.exception = None
ts.traceback = None
ts.exception_text = ""
ts.traceback_text = ""
ts.priority = priority
ts.duration = duration
if resource_restrictions:
ts.resource_restrictions = resource_restrictions
ts.annotations = annotations
recommendations = {}
scheduler_msgs = []
for dependency in who_has:
recs, smsgs = self.register_acquire_internal(
key=dependency,
stimulus_id=stimulus_id,
priority=priority,
)
recommendations.update(recs)
scheduler_msgs += smsgs
dep_ts = self.tasks[dependency]
# link up to child / parents
ts.dependencies.add(dep_ts)
dep_ts.dependents.add(ts)
if ts.state in {"ready", "executing", "waiting", "resumed"}:
pass
elif ts.state == "memory":
recommendations[ts] = "memory"
scheduler_msgs.append(self.get_task_state_for_scheduler(ts))
elif ts.state in {"released", "fetch", "flight", "missing", "cancelled"}:
recommendations[ts] = "waiting"
else:
raise RuntimeError(f"Unexpected task state encountered {ts} {stimulus_id}")
for msg in scheduler_msgs:
self.batched_stream.send(msg)
self.transitions(recommendations, stimulus_id=stimulus_id)
# We received new info, that's great but not related to the compute-task
# instruction
self.update_who_has(who_has, stimulus_id=stimulus_id)
if nbytes is not None:
for key, value in nbytes.items():
self.tasks[key].nbytes = value
def transition_missing_fetch(self, ts, *, stimulus_id):
self._missing_dep_flight.discard(ts)
ts.state = "fetch"
heapq.heappush(self.data_needed, (ts.priority, ts.key))
return {}, []
def transition_missing_released(self, ts, *, stimulus_id):
self._missing_dep_flight.discard(ts)
recommendations = self.release_key(ts.key, reason="missing->released")
assert ts.key in self.tasks
return recommendations, []
def transition_fetch_missing(self, ts, *, stimulus_id):
# handle_missing will append to self.data_needed if new workers are found
ts.state = "missing"
self._missing_dep_flight.add(ts)
return {}, []
def transition_released_fetch(self, ts, *, stimulus_id):
for w in ts.who_has:
self.pending_data_per_worker[w].append(ts.key)
ts.state = "fetch"
heapq.heappush(self.data_needed, (ts.priority, ts.key))
return {}, []
def transition_generic_released(self, ts, *, stimulus_id):
recs = self.release_key(ts.key, reason=stimulus_id)
return recs, []
def transition_released_waiting(self, ts, *, stimulus_id):
if self.validate:
assert ts.state == "released"
assert all(d.key in self.tasks for d in ts.dependencies)
recommendations = {}
ts.waiting_for_data.clear()
for dep_ts in ts.dependencies:
if not dep_ts.state == "memory":
ts.waiting_for_data.add(dep_ts)
dep_ts.waiters.add(ts)
if ts.waiting_for_data:
self.waiting_for_data_count += 1
elif ts.resource_restrictions:
recommendations[ts] = "constrained"
else:
recommendations[ts] = "ready"
ts.state = "waiting"
return recommendations, []
def transition_fetch_flight(self, ts, worker, *, stimulus_id):
if self.validate:
assert ts.state == "fetch"
assert ts.who_has
assert ts.key not in self.data_needed
ts.state = "flight"
ts.coming_from = worker
self._in_flight_tasks.add(ts)
return {}, []
def transition_memory_released(self, ts, *, stimulus_id):
recs = self.release_key(ts.key, reason=stimulus_id)
smsgs = [{"op": "release-worker-data", "key": ts.key}]
return recs, smsgs
def transition_waiting_constrained(self, ts, *, stimulus_id):
if self.validate:
assert ts.state == "waiting"
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors
for dep in ts.dependencies
)
assert all(dep.state == "memory" for dep in ts.dependencies)
assert ts.key not in self.ready
ts.state = "constrained"
self.constrained.append(ts.key)
return {}, []
def transition_long_running_rescheduled(self, ts, *, stimulus_id):
recs = {ts: "released"}
smsgs = [{"op": "reschedule", "key": ts.key, "worker": self.address}]
return recs, smsgs
def transition_executing_rescheduled(self, ts, *, stimulus_id):
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
self._executing.discard(ts)
recs = {ts: "released"}
smsgs = [{"op": "reschedule", "key": ts.key, "worker": self.address}]
return recs, smsgs
def transition_waiting_ready(self, ts, *, stimulus_id):
if self.validate:
assert ts.state == "waiting"
assert ts.key not in self.ready
assert not ts.waiting_for_data
for dep in ts.dependencies:
assert dep.key in self.data or dep.key in self.actors
assert dep.state == "memory"
ts.state = "ready"
heapq.heappush(self.ready, (ts.priority, ts.key))
return {}, []
def transition_generic_error(
self, ts, exception, traceback, exception_text, traceback_text, *, stimulus_id
):
ts.exception = exception
ts.traceback = traceback
ts.exception_text = exception_text
ts.traceback_text = traceback_text
smsgs = [self.get_task_state_for_scheduler(ts)]
ts.state = "error"
return {}, smsgs
def transition_executing_error(
self, ts, exception, traceback, exception_text, traceback_text, *, stimulus_id
):
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
self._executing.discard(ts)
return self.transition_generic_error(
ts,
exception,
traceback,
exception_text,
traceback_text,
stimulus_id=stimulus_id,
)
def transition_rescheduled_next(self, ts, *, stimulus_id):
next_state = ts._next
recs = self.release_key(ts.key, reason=stimulus_id)
recs[ts] = next_state
return recs, []
def transition_cancelled_fetch(self, ts, *, stimulus_id):
if ts.done:
return {ts: "released"}, []
elif ts._previous == "flight":
ts.state = ts._previous
return {}, []
else:
assert ts._previous == "executing"
return {ts: ("resumed", "fetch")}, []
def transition_cancelled_resumed(self, ts, next, *, stimulus_id):
ts._next = next
ts.state = "resumed"
return {}, []
def transition_cancelled_waiting(self, ts, *, stimulus_id):
if ts.done:
return {ts: "released"}, []
elif ts._previous == "executing":
ts.state = ts._previous
return {}, []
else:
assert ts._previous == "flight"
return {ts: ("resumed", "waiting")}, []
def transition_cancelled_forgotten(self, ts, *, stimulus_id):
ts._next = "forgotten"
if not ts.done:
return {}, []
return {ts: "released"}, []
def transition_cancelled_released(self, ts, *, stimulus_id):
if not ts.done:
ts._next = "released"
return {}, []
next_state = ts._next
self._executing.discard(ts)
self._in_flight_tasks.discard(ts)
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
recommendations = self.release_key(ts.key, reason=stimulus_id)
recommendations[ts] = next_state or "released"
return recommendations, []
def transition_executing_released(self, ts, *, stimulus_id):
ts._previous = ts.state
# See https://github.com/dask/distributed/pull/5046#discussion_r685093940
ts.state = "cancelled"
ts.done = False
return {}, []
def transition_long_running_memory(self, ts, value=no_value, *, stimulus_id):
self.executed_count += 1
return self.transition_generic_memory(ts, value=value, stimulus_id=stimulus_id)
def transition_generic_memory(self, ts, value=no_value, *, stimulus_id):
if value is no_value and ts.key not in self.data:
raise RuntimeError(
f"Tried to transition task {ts} to `memory` without data available"
)
if ts.resource_restrictions is not None:
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
self._executing.discard(ts)
self._in_flight_tasks.discard(ts)
ts.coming_from = None
recs, smsgs = self._put_key_in_memory(ts, value, stimulus_id=stimulus_id)
smsgs.append(self.get_task_state_for_scheduler(ts))
return recs, smsgs
def transition_executing_memory(self, ts, value=no_value, *, stimulus_id):
if self.validate:
assert ts.state == "executing" or ts.key in self.long_running
assert not ts.waiting_for_data
assert ts.key not in self.ready
self._executing.discard(ts)
self.executed_count += 1
return self.transition_generic_memory(ts, value=value, stimulus_id=stimulus_id)
def transition_constrained_released(self, ts, *, stimulus_id):
recs = self.release_key(ts.key, reason=stimulus_id)
return recs, []
def transition_constrained_executing(self, ts, *, stimulus_id):
if self.validate:
assert not ts.waiting_for_data
assert ts.key not in self.data
assert ts.state in READY
assert ts.key not in self.ready
for dep in ts.dependencies:
assert dep.key in self.data or dep.key in self.actors
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] -= quantity
ts.state = "executing"
self._executing.add(ts)
self.loop.add_callback(self.execute, ts.key, stimulus_id=stimulus_id)
return {}, []
def transition_ready_executing(self, ts, *, stimulus_id):
if self.validate:
assert not ts.waiting_for_data
assert ts.key not in self.data
assert ts.state in READY
assert ts.key not in self.ready
assert all(
dep.key in self.data or dep.key in self.actors
for dep in ts.dependencies
)
ts.state = "executing"
self._executing.add(ts)
self.loop.add_callback(self.execute, ts.key, stimulus_id=stimulus_id)
return {}, []
def transition_flight_fetch(self, ts, *, stimulus_id):
self._in_flight_tasks.discard(ts)
ts.coming_from = None
for w in ts.who_has:
self.pending_data_per_worker[w].append(ts.key)
ts.state = "fetch"
heapq.heappush(self.data_needed, (ts.priority, ts.key))
return {}, []
def transition_flight_error(
self, ts, exception, traceback, exception_text, traceback_text, *, stimulus_id
):
self._in_flight_tasks.discard(ts)
ts.coming_from = None
return self.transition_generic_error(
ts,
exception,
traceback,
exception_text,
traceback_text,
stimulus_id=stimulus_id,
)
def transition_flight_released(self, ts, *, stimulus_id):
ts._previous = "flight"
# See https://github.com/dask/distributed/pull/5046#discussion_r685093940
ts.state = "cancelled"
return {}, []
def transition_cancelled_memory(self, ts, value, *, stimulus_id):
return {ts: ts._next}, []
def transition_executing_long_running(self, ts, compute_duration, *, stimulus_id):
ts.state = "long-running"
self._executing.discard(ts)
self.long_running.add(ts.key)
smsgs = [
{
"op": "long-running",
"key": ts.key,
"compute_duration": compute_duration,
}
]
self.io_loop.add_callback(self.ensure_computing)
return {}, smsgs
def transition_released_memory(self, ts, value, *, stimulus_id):
recs, smsgs = self._put_key_in_memory(ts, value, stimulus_id=stimulus_id)
smsgs.append({"op": "add-keys", "keys": [ts.key], "stimulus_id": stimulus_id})
return recs, smsgs
def transition_flight_memory(self, ts, value, *, stimulus_id):
self._in_flight_tasks.discard(ts)
ts.coming_from = None
recs, smsgs = self._put_key_in_memory(ts, value, stimulus_id=stimulus_id)
smsgs.append({"op": "add-keys", "keys": [ts.key], "stimulus_id": stimulus_id})
return recs, smsgs
def transition_released_forgotten(self, ts, *, stimulus_id):
recommendations = {}
# Dependents _should_ be released by the scheduler before this
if self.validate:
assert not any(d.state != "forgotten" for d in ts.dependents)
for dep in ts.dependencies:
dep.dependents.discard(ts)
if dep.state == "released" and not dep.dependents:
recommendations[dep] = "forgotten"
# Mark state as forgotten in case it is still referenced
ts.state = "forgotten"
self.tasks.pop(ts.key, None)
return recommendations, []
def _transition(self, ts, finish, *args, stimulus_id, **kwargs):
if isinstance(finish, tuple):
# the concatenated transition path might need to access the tuple
assert not args
finish, *args = finish
if ts is None or ts.state == finish:
return {}, []
start = ts.state
func = self._transitions_table.get((start, finish))
if func is not None:
self._transition_counter += 1
recs, smsgs = func(ts, *args, stimulus_id=stimulus_id, **kwargs)
self._notify_plugins("transition", ts.key, start, finish, **kwargs)
elif "released" not in (start, finish):
# start -> "released" -> finish
try:
recs, smsgs = self._transition(ts, "released", stimulus_id=stimulus_id)
v = recs.get(ts, (finish, *args))
if isinstance(v, tuple):
v_state, *v_args = v
else:
v_state, v_args = v, ()
b_recs, b_smsgs = self._transition(
ts, v_state, *v_args, stimulus_id=stimulus_id
)
recs.update(b_recs)
smsgs += b_smsgs
except InvalidTransition:
raise InvalidTransition(
f"Impossible transition from {start} to {finish} for {ts.key}"
) from None
else:
raise InvalidTransition(
f"Impossible transition from {start} to {finish} for {ts.key}"
)
self.log.append(
(
ts.key,
start,
ts.state,
{ts.key: new for ts, new in recs.items()},
stimulus_id,
time(),
)
)
return recs, smsgs
def transition(self, ts, finish: str, *, stimulus_id, **kwargs):
"""Transition a key from its current state to the finish state
Examples
--------
>>> self.transition('x', 'waiting')
{'x': 'processing'}
Returns
-------
Dictionary of recommendations for future transitions
See Also
--------
Scheduler.transitions: transitive version of this function
"""
recs, smsgs = self._transition(ts, finish, stimulus_id=stimulus_id, **kwargs)
for msg in smsgs:
self.batched_stream.send(msg)
self.transitions(recs, stimulus_id=stimulus_id)
def transitions(self, recommendations: dict, *, stimulus_id):
"""Process transitions until none are left
This includes feedback from previous transitions and continues until we
reach a steady state
"""
smsgs = []
remaining_recs = recommendations.copy()
tasks = set()
while remaining_recs:
ts, finish = remaining_recs.popitem()
tasks.add(ts)
a_recs, a_smsgs = self._transition(ts, finish, stimulus_id=stimulus_id)
remaining_recs.update(a_recs)
smsgs += a_smsgs
if self.validate:
# Full state validation is very expensive
for ts in tasks:
self.validate_task(ts)
if not self.batched_stream.closed():
for msg in smsgs:
self.batched_stream.send(msg)
else:
logger.debug(
"BatchedSend closed while transitioning tasks. %d tasks not sent.",
len(smsgs),
)
def maybe_transition_long_running(self, ts, *, stimulus_id, compute_duration=None):
if ts.state == "executing":
self.transition(
ts,
"long-running",
compute_duration=compute_duration,
stimulus_id=stimulus_id,
)
assert ts.state == "long-running"
def stateof(self, key):
ts = self.tasks[key]
return {
"executing": ts.state == "executing",
"waiting_for_data": bool(ts.waiting_for_data),
"heap": key in pluck(1, self.ready),
"data": key in self.data,
}
def story(self, *keys):
keys = [key.key if isinstance(key, TaskState) else key for key in keys]
return [
msg
for msg in self.log
if any(key in msg for key in keys)
or any(
key in c
for key in keys
for c in msg
if isinstance(c, (tuple, list, set))
)
]
def ensure_communicating(self):
stimulus_id = f"ensure-communicating-{time()}"
skipped_worker_in_flight = []
while self.data_needed and (
len(self.in_flight_workers) < self.total_out_connections
or self.comm_nbytes < self.comm_threshold_bytes
):
logger.debug(
"Ensure communicating. Pending: %d. Connections: %d/%d",
len(self.data_needed),
len(self.in_flight_workers),
self.total_out_connections,
)
_, key = heapq.heappop(self.data_needed)
try:
ts = self.tasks[key]
except KeyError:
continue
if ts.state != "fetch":
continue
if not ts.who_has:
self.transition(ts, "missing", stimulus_id=stimulus_id)
continue
workers = [w for w in ts.who_has if w not in self.in_flight_workers]
if not workers:
skipped_worker_in_flight.append((ts.priority, ts.key))
continue
host = get_address_host(self.address)
local = [w for w in workers if get_address_host(w) == host]
if local:
worker = random.choice(local)
else:
worker = random.choice(list(workers))
assert worker != self.address
to_gather, total_nbytes = self.select_keys_for_gather(worker, ts.key)
self.log.append(
("gather-dependencies", worker, to_gather, "stimulus", time())
)
self.comm_nbytes += total_nbytes
self.in_flight_workers[worker] = to_gather
recommendations = {self.tasks[d]: ("flight", worker) for d in to_gather}
self.transitions(recommendations=recommendations, stimulus_id=stimulus_id)
self.loop.add_callback(
self.gather_dep,
worker=worker,
to_gather=to_gather,
total_nbytes=total_nbytes,
stimulus_id=stimulus_id,
)
for el in skipped_worker_in_flight:
heapq.heappush(self.data_needed, el)
def get_task_state_for_scheduler(self, ts):
if ts.key in self.data or self.actors.get(ts.key):
typ = ts.type
if ts.nbytes is None or typ is None:
try:
value = self.data[ts.key]
except KeyError:
value = self.actors[ts.key]
ts.nbytes = sizeof(value)
typ = ts.type = type(value)
del value
try:
typ_serialized = dumps_function(typ)
except PicklingError:
# Some types fail pickling (example: _thread.lock objects),
# send their name as a best effort.
typ_serialized = pickle.dumps(typ.__name__, protocol=4)
d = {
"op": "task-finished",
"status": "OK",
"key": ts.key,
"nbytes": ts.nbytes,
"thread": self.threads.get(ts.key),
"type": typ_serialized,
"typename": typename(typ),
"metadata": ts.metadata,
}
elif ts.exception is not None:
d = {
"op": "task-erred",
"status": "error",
"key": ts.key,
"thread": self.threads.get(ts.key),
"exception": ts.exception,
"traceback": ts.traceback,
"exception_text": ts.exception_text,
"traceback_text": ts.traceback_text,
}
else:
logger.error("Key not ready to send to worker, %s: %s", ts.key, ts.state)
return None
if ts.startstops:
d["startstops"] = ts.startstops
return d
def _put_key_in_memory(self, ts, value, *, stimulus_id):
if ts.key in self.data:
ts.state = "memory"
return {}, []
recommendations = {}
scheduler_messages = []
if ts.key in self.actors:
self.actors[ts.key] = value
else:
start = time()
try:
self.data[ts.key] = value
except Exception as e:
msg = error_message(e)
ts.exception = msg["exception"]
ts.traceback = msg["traceback"]
recommendations[ts] = ("error", msg["exception"], msg["traceback"])
return recommendations, []
stop = time()
if stop - start > 0.020:
ts.startstops.append(
{"action": "disk-write", "start": start, "stop": stop}
)
ts.state = "memory"
if ts.nbytes is None:
ts.nbytes = sizeof(value)
ts.type = type(value)
for dep in ts.dependents:
dep.waiting_for_data.discard(ts)
if not dep.waiting_for_data and dep.state == "waiting":
self.waiting_for_data_count -= 1
recommendations[dep] = "ready"
self.log.append((ts.key, "put-in-memory", stimulus_id, time()))
return recommendations, scheduler_messages
def select_keys_for_gather(self, worker, dep):
assert isinstance(dep, str)
deps = {dep}
total_bytes = self.tasks[dep].get_nbytes()
L = self.pending_data_per_worker[worker]
while L:
d = L.popleft()
ts = self.tasks.get(d)
if ts is None or ts.state != "fetch":
continue
if total_bytes + ts.get_nbytes() > self.target_message_size:
break
deps.add(d)
total_bytes += ts.get_nbytes()
return deps, total_bytes
@property
def total_comm_bytes(self):
warnings.warn(
"The attribute `Worker.total_comm_bytes` has been renamed to `comm_threshold_bytes`. "
"Future versions will only support the new name.",
DeprecationWarning,
)
return self.comm_threshold_bytes
async def gather_dep(
self,
worker: str,
to_gather: Iterable[str],
total_nbytes: int,
*,
stimulus_id,
):
"""Gather dependencies for a task from a worker who has them
Parameters
----------
worker : str
Address of worker to gather dependencies from
to_gather : list
Keys of dependencies to gather from worker -- this is not
necessarily equivalent to the full list of dependencies of ``dep``
as some dependencies may already be present on this worker.
total_nbytes : int
Total number of bytes for all the dependencies in to_gather combined
"""
cause = None
if self.status not in (Status.running, Status.paused):
return
with log_errors():
response = {}
to_gather_keys = set()
try:
found_dependent_for_cause = False
for dependency_key in to_gather:
dependency_ts = self.tasks.get(dependency_key)
if dependency_ts and dependency_ts.state == "flight":
to_gather_keys.add(dependency_key)
if not found_dependent_for_cause:
cause = dependency_ts
# For diagnostics we want to attach the transfer to
# a single task. this task is typically the next to
# be executed but since we're fetching tasks for
# potentially many dependents, an exact match is not
# possible. If there are no dependents, this is a
# pure replica fetch
for dependent in dependency_ts.dependents:
cause = dependent
found_dependent_for_cause = True
break
if not to_gather_keys:
return
# Keep namespace clean since this func is long and has many
# dep*, *ts* variables
del to_gather, dependency_key, dependency_ts
self.log.append(
("request-dep", worker, to_gather_keys, stimulus_id, time())
)
logger.debug(
"Request %d keys for task %s from %s",
len(to_gather_keys),
cause,
worker,
)
start = time()
response = await get_data_from_worker(
self.rpc, to_gather_keys, worker, who=self.address
)
stop = time()
if response["status"] == "busy":
return
data = {k: v for k, v in response["data"].items() if k in self.tasks}
lost_keys = response["data"].keys() - data.keys()
if lost_keys:
self.log.append(("lost-during-gather", lost_keys, stimulus_id))
total_bytes = sum(self.tasks[key].get_nbytes() for key in data)
cause.startstops.append(
{
"action": "transfer",
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"source": worker,
}
)
duration = (stop - start) or 0.010
bandwidth = total_bytes / duration
self.incoming_transfer_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2.0 + self.scheduler_delay,
"duration": duration,
"keys": {key: self.tasks[key].nbytes for key in data},
"total": total_bytes,
"bandwidth": bandwidth,
"who": worker,
}
)
if total_bytes > 1000000:
self.bandwidth = self.bandwidth * 0.95 + bandwidth * 0.05
bw, cnt = self.bandwidth_workers[worker]
self.bandwidth_workers[worker] = (bw + bandwidth, cnt + 1)
types = set(map(type, response["data"].values()))
if len(types) == 1:
[typ] = types
bw, cnt = self.bandwidth_types[typ]
self.bandwidth_types[typ] = (bw + bandwidth, cnt + 1)
if self.digests is not None:
self.digests["transfer-bandwidth"].add(total_bytes / duration)
self.digests["transfer-duration"].add(duration)
self.counters["transfer-count"].add(len(response["data"]))
self.incoming_count += 1
self.log.append(
("receive-dep", worker, set(response["data"]), stimulus_id, time())
)
except OSError:
logger.exception("Worker stream died during communication: %s", worker)
has_what = self.has_what.pop(worker)
self.pending_data_per_worker.pop(worker)
self.log.append(
("receive-dep-failed", worker, has_what, stimulus_id, time())
)
for d in has_what:
ts = self.tasks[d]
ts.who_has.remove(worker)
except Exception as e:
logger.exception(e)
if self.batched_stream and LOG_PDB:
import pdb
pdb.set_trace()
raise
finally:
self.comm_nbytes -= total_nbytes
busy = response.get("status", "") == "busy"
data = response.get("data", {})
if busy:
self.log.append(
("busy-gather", worker, to_gather_keys, stimulus_id, time())
)
recommendations: dict[TaskState, str | tuple] = {}
deps_to_iter = set(self.in_flight_workers.pop(worker)) & to_gather_keys
for d in deps_to_iter:
ts = self.tasks.get(d)
assert ts, (d, self.story(d))
ts.done = True
if d in data:
recommendations[ts] = ("memory", data[d])
elif not busy:
ts.who_has.discard(worker)
self.has_what[worker].discard(ts.key)
self.log.append(("missing-dep", d))
self.batched_stream.send(
{"op": "missing-data", "errant_worker": worker, "key": d}
)
if ts.state != "memory" and ts not in recommendations:
recommendations[ts] = "fetch"
del data, response
self.transitions(
recommendations=recommendations, stimulus_id=stimulus_id
)
self.ensure_computing()
if not busy:
self.repetitively_busy = 0
else:
# Exponential backoff to avoid hammering scheduler/worker
self.repetitively_busy += 1
await asyncio.sleep(0.100 * 1.5 ** self.repetitively_busy)
await self.query_who_has(*to_gather_keys, stimulus_id=stimulus_id)
self.ensure_communicating()
async def find_missing(self):
with log_errors():
if not self._missing_dep_flight:
return
try:
if self.validate:
for ts in self._missing_dep_flight:
assert not ts.who_has
stimulus_id = f"find-missing-{time()}"
who_has = await retry_operation(
self.scheduler.who_has,
keys=[ts.key for ts in self._missing_dep_flight],
)
who_has = {k: v for k, v in who_has.items() if v}
self.update_who_has(who_has, stimulus_id=stimulus_id)
finally:
# This is quite arbitrary but the heartbeat has scaling implemented
self.periodic_callbacks[
"find-missing"
].callback_time = self.periodic_callbacks["heartbeat"].callback_time
self.ensure_communicating()
self.ensure_computing()
async def query_who_has(self, *deps, stimulus_id):
with log_errors():
who_has = await retry_operation(self.scheduler.who_has, keys=deps)
self.update_who_has(who_has, stimulus_id=stimulus_id)
return who_has
def update_who_has(self, who_has, *, stimulus_id):
try:
recommendations = {}
for dep, workers in who_has.items():
if not workers:
continue
if dep in self.tasks:
if self.address in workers and self.tasks[dep].state != "memory":
logger.debug(
"Scheduler claims worker %s holds data for task %s which is not true.",
self.name,
dep,
)
# Do not mutate the input dict. That's rude
workers = set(workers) - {self.address}
dep_ts = self.tasks[dep]
dep_ts.who_has.update(workers)
if dep_ts.state == "missing":
recommendations[dep_ts] = "fetch"
for worker in workers:
self.has_what[worker].add(dep)
if dep_ts.state in ("fetch", "flight", "missing"):
self.pending_data_per_worker[worker].append(dep_ts.key)
self.transitions(recommendations=recommendations, stimulus_id=stimulus_id)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def handle_steal_request(self, key):
# There may be a race condition between stealing and releasing a task.
# In this case the self.tasks is already cleared. The `None` will be
# registered as `already-computing` on the other end
ts = self.tasks.get(key)
state = ts.state if ts is not None else None
response = {"op": "steal-response", "key": key, "state": state}
self.batched_stream.send(response)
if state in {"ready", "waiting", "constrained"}:
# If task is marked as "constrained" we haven't yet assigned it an
# `available_resources` to run on, that happens in
# `transition_constrained_executing`
self.transition(ts, "forgotten", stimulus_id=f"steal-request-{time()}")
def release_key(
self,
key: Hashable,
cause: TaskState | None = None,
reason: str | None = None,
report: bool = True,
):
recommendations = {}
try:
if self.validate:
assert not isinstance(key, TaskState)
ts = self.tasks[key]
# needed for legacy notification support
state_before = ts.state
ts.state = "released"
logger.debug(
"Release key %s", {"key": key, "cause": cause, "reason": reason}
)
if cause:
self.log.append((key, "release-key", {"cause": cause}, reason))
else:
self.log.append((key, "release-key", reason))
if key in self.data:
try:
del self.data[key]
except FileNotFoundError:
logger.error("Tried to delete %s but no file found", exc_info=True)
if key in self.actors:
del self.actors[key]
for worker in ts.who_has:
self.has_what[worker].discard(ts.key)
ts.who_has.clear()
if key in self.threads:
del self.threads[key]
if ts.resource_restrictions is not None:
if ts.state == "executing":
for resource, quantity in ts.resource_restrictions.items():
self.available_resources[resource] += quantity
for d in ts.dependencies:
ts.waiting_for_data.discard(d)
d.waiters.discard(ts)
if not d.waiters and d.state in {"flight", "fetch", "missing"}:
recommendations[d] = "forgotten"
ts.waiting_for_data.clear()
ts.nbytes = None
ts._previous = None
ts._next = None
ts.done = False
self._executing.discard(ts)
self._in_flight_tasks.discard(ts)
self._notify_plugins(
"release_key", key, state_before, cause, reason, report
)
except CommClosedError:
# Batched stream send might raise if it was already closed
pass
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
return recommendations
################
# Execute Task #
################
def run(self, comm, function, args=(), wait=True, kwargs=None):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
def run_coroutine(self, comm, function, args=(), kwargs=None, wait=True):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
async def plugin_add(self, comm=None, plugin=None, name=None):
with log_errors(pdb=False):
if isinstance(plugin, bytes):
plugin = pickle.loads(plugin)
if name is None:
name = _get_plugin_name(plugin)
assert name
if name in self.plugins:
await self.plugin_remove(comm=comm, name=name)
self.plugins[name] = plugin
logger.info("Starting Worker plugin %s" % name)
if hasattr(plugin, "setup"):
try:
result = plugin.setup(worker=self)
if isawaitable(result):
result = await result
except Exception as e:
msg = error_message(e)
return msg
return {"status": "OK"}
async def plugin_remove(self, comm=None, name=None):
with log_errors(pdb=False):
logger.info(f"Removing Worker plugin {name}")
try:
plugin = self.plugins.pop(name)
if hasattr(plugin, "teardown"):
result = plugin.teardown(worker=self)
if isawaitable(result):
result = await result
except Exception as e:
msg = error_message(e)
return msg
return {"status": "OK"}
async def actor_execute(
self,
comm=None,
actor=None,
function=None,
args=(),
kwargs: dict | None = None,
):
kwargs = kwargs or {}
separate_thread = kwargs.pop("separate_thread", True)
key = actor
actor = self.actors[key]
func = getattr(actor, function)
name = key_split(key) + "." + function
try:
if iscoroutinefunction(func):
result = await func(*args, **kwargs)
elif separate_thread:
result = await self.loop.run_in_executor(
self.executors["actor"],
apply_function_actor,
func,
args,
kwargs,
self.execution_state,
name,
self.active_threads,
self.active_threads_lock,
)
else:
result = func(*args, **kwargs)
return {"status": "OK", "result": to_serialize(result)}
except Exception as ex:
return {"status": "error", "exception": to_serialize(ex)}
def actor_attribute(self, comm=None, actor=None, attribute=None):
try:
value = getattr(self.actors[actor], attribute)
return {"status": "OK", "result": to_serialize(value)}
except Exception as ex:
return {"status": "error", "exception": to_serialize(ex)}
def meets_resource_constraints(self, key: str) -> bool:
ts = self.tasks[key]
if not ts.resource_restrictions:
return True
for resource, needed in ts.resource_restrictions.items():
if self.available_resources[resource] < needed:
return False
return True
async def _maybe_deserialize_task(self, ts, *, stimulus_id):
if not isinstance(ts.runspec, SerializedTask):
return ts.runspec
try:
start = time()
# Offload deserializing large tasks
if sizeof(ts.runspec) > OFFLOAD_THRESHOLD:
function, args, kwargs = await offload(_deserialize, *ts.runspec)
else:
function, args, kwargs = _deserialize(*ts.runspec)
stop = time()
if stop - start > 0.010:
ts.startstops.append(
{"action": "deserialize", "start": start, "stop": stop}
)
return function, args, kwargs
except Exception as e:
logger.error("Could not deserialize task", exc_info=True)
self.log.append((ts.key, "deserialize-error"))
emsg = error_message(e)
emsg.pop("status")
self.transition(
ts,
"error",
**emsg,
stimulus_id=stimulus_id,
)
raise
def ensure_computing(self):
if self.status == Status.paused:
return
try:
stimulus_id = f"ensure-computing-{time()}"
while self.constrained and self.executing_count < self.nthreads:
key = self.constrained[0]
ts = self.tasks.get(key, None)
if ts is None or ts.state != "constrained":
self.constrained.popleft()
continue
if self.meets_resource_constraints(key):
self.constrained.popleft()
self.transition(ts, "executing", stimulus_id=stimulus_id)
else:
break
while self.ready and self.executing_count < self.nthreads:
priority, key = heapq.heappop(self.ready)
ts = self.tasks.get(key)
if ts is None:
# It is possible for tasks to be released while still remaining on
# `ready` The scheduler might have re-routed to a new worker and
# told this worker to release. If the task has "disappeared" just
# continue through the heap
continue
elif ts.key in self.data:
self.transition(ts, "memory", stimulus_id=stimulus_id)
elif ts.state in READY:
self.transition(ts, "executing", stimulus_id=stimulus_id)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
async def execute(self, key, *, stimulus_id):
if self.status in (Status.closing, Status.closed, Status.closing_gracefully):
return
if key not in self.tasks:
return
ts = self.tasks[key]
try:
if ts.state == "cancelled":
# This might happen if keys are canceled
logger.debug(
"Trying to execute task %s which is not in executing state anymore",
ts,
)
ts.done = True
self.transition(ts, "released", stimulus_id=stimulus_id)
return
if self.validate:
assert not ts.waiting_for_data
assert ts.state == "executing"
assert ts.runspec is not None
function, args, kwargs = await self._maybe_deserialize_task(
ts, stimulus_id=stimulus_id
)
args2, kwargs2 = self._prepare_args_for_execution(ts, args, kwargs)
if ts.annotations is not None and "executor" in ts.annotations:
executor = ts.annotations["executor"]
else:
executor = "default"
assert executor in self.executors
assert key == ts.key
self.active_keys.add(ts.key)
result: dict
try:
e = self.executors[executor]
ts.start_time = time()
if iscoroutinefunction(function):
result = await apply_function_async(
function,
args2,
kwargs2,
self.scheduler_delay,
)
elif "ThreadPoolExecutor" in str(type(e)):
result = await self.loop.run_in_executor(
e,
apply_function,
function,
args2,
kwargs2,
self.execution_state,
ts.key,
self.active_threads,
self.active_threads_lock,
self.scheduler_delay,
)
else:
result = await self.loop.run_in_executor(
e,
apply_function_simple,
function,
args2,
kwargs2,
self.scheduler_delay,
)
finally:
self.active_keys.discard(ts.key)
key = ts.key
# key *must* be still in tasks. Releasing it direclty is forbidden
# without going through cancelled
ts = self.tasks.get(key)
assert ts, self.story(key)
ts.done = True
result["key"] = ts.key
value = result.pop("result", None)
ts.startstops.append(
{"action": "compute", "start": result["start"], "stop": result["stop"]}
)
self.threads[ts.key] = result["thread"]
recommendations = {}
if result["op"] == "task-finished":
ts.nbytes = result["nbytes"]
ts.type = result["type"]
recommendations[ts] = ("memory", value)
if self.digests is not None:
self.digests["task-duration"].add(result["stop"] - result["start"])
elif isinstance(result.pop("actual-exception"), Reschedule):
recommendations[ts] = "rescheduled"
else:
logger.warning(
"Compute Failed\n"
"Function: %s\n"
"args: %s\n"
"kwargs: %s\n"
"Exception: %r\n",
str(funcname(function))[:1000],
convert_args_to_str(args2, max_len=1000),
convert_kwargs_to_str(kwargs2, max_len=1000),
result["exception_text"],
)
recommendations[ts] = (
"error",
result["exception"],
result["traceback"],
result["exception_text"],
result["traceback_text"],
)
self.transitions(recommendations, stimulus_id=stimulus_id)
logger.debug("Send compute response to scheduler: %s, %s", ts.key, result)
if self.validate:
assert ts.state != "executing"
assert not ts.waiting_for_data
except Exception as exc:
assert ts
logger.error(
"Exception during execution of task %s.", ts.key, exc_info=True
)
emsg = error_message(exc)
emsg.pop("status")
self.transition(
ts,
"error",
**emsg,
stimulus_id=stimulus_id,
)
finally:
self.ensure_computing()
self.ensure_communicating()
def _prepare_args_for_execution(self, ts, args, kwargs):
start = time()
data = {}
for dep in ts.dependencies:
k = dep.key
try:
data[k] = self.data[k]
except KeyError:
from .actor import Actor # TODO: create local actor
data[k] = Actor(type(self.actors[k]), self.address, k, self)
args2 = pack_data(args, data, key_types=(bytes, str))
kwargs2 = pack_data(kwargs, data, key_types=(bytes, str))
stop = time()
if stop - start > 0.005:
ts.startstops.append({"action": "disk-read", "start": start, "stop": stop})
if self.digests is not None:
self.digests["disk-load-duration"].add(stop - start)
return args2, kwargs2
##################
# Administrative #
##################
async def memory_monitor(self):
"""Track this process's memory usage and act accordingly
If we rise above 70% memory use, start dumping data to disk.
If we rise above 80% memory use, stop execution of new tasks
"""
if self._memory_monitoring:
return
self._memory_monitoring = True
total = 0
proc = self.monitor.proc
memory = proc.memory_info().rss
frac = memory / self.memory_limit
def check_pause(memory):
frac = memory / self.memory_limit
# Pause worker threads if above 80% memory use
if self.memory_pause_fraction and frac > self.memory_pause_fraction:
# Try to free some memory while in paused state
self._throttled_gc.collect()
if self.status == Status.running:
logger.warning(
"Worker is at %d%% memory usage. Pausing worker. "
"Process memory: %s -- Worker memory limit: %s",
int(frac * 100),
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
self.status = Status.paused
elif self.status == Status.paused:
logger.warning(
"Worker is at %d%% memory usage. Resuming worker. "
"Process memory: %s -- Worker memory limit: %s",
int(frac * 100),
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
self.status = Status.running
self.ensure_computing()
check_pause(memory)
# Dump data to disk if above 70%
if self.memory_spill_fraction and frac > self.memory_spill_fraction:
logger.debug(
"Worker is at %.0f%% memory usage. Start spilling data to disk.",
frac * 100,
)
start = time()
target = self.memory_limit * self.memory_target_fraction
count = 0
need = memory - target
while memory > target:
if not self.data.fast:
logger.warning(
"Unmanaged memory use is high. This may indicate a memory leak "
"or the memory may not be released to the OS; see "
"https://distributed.dask.org/en/latest/worker.html#memtrim "
"for more information. "
"-- Unmanaged memory: %s -- Worker memory limit: %s",
format_bytes(memory),
format_bytes(self.memory_limit),
)
break
k, v, weight = self.data.fast.evict()
del k, v
total += weight
count += 1
# If the current buffer is filled with a lot of small values,
# evicting one at a time is very slow and the worker might
# generate new data faster than it is able to evict. Therefore,
# only pass on control if we spent at least 0.5s evicting
if time() - start > 0.5:
await asyncio.sleep(0)
start = time()
memory = proc.memory_info().rss
if total > need and memory > target:
# Issue a GC to ensure that the evicted data is actually
# freed from memory and taken into account by the monitor
# before trying to evict even more data.
self._throttled_gc.collect()
memory = proc.memory_info().rss
check_pause(memory)
if count:
logger.debug(
"Moved %d tasks worth %s to disk",
count,
format_bytes(total),
)
self._memory_monitoring = False
return total
def cycle_profile(self):
now = time() + self.scheduler_delay
prof, self.profile_recent = self.profile_recent, profile.create()
self.profile_history.append((now, prof))
self.profile_keys_history.append((now, dict(self.profile_keys)))
self.profile_keys.clear()
def trigger_profile(self):
"""
Get a frame from all actively computing threads
Merge these frames into existing profile counts
"""
if not self.active_threads: # hope that this is thread-atomic?
return
start = time()
with self.active_threads_lock:
active_threads = self.active_threads.copy()
frames = sys._current_frames()
frames = {ident: frames[ident] for ident in active_threads}
llframes = {}
if self.low_level_profiler:
llframes = {ident: profile.ll_get_stack(ident) for ident in active_threads}
for ident, frame in frames.items():
if frame is not None:
key = key_split(active_threads[ident])
llframe = llframes.get(ident)
state = profile.process(
frame, True, self.profile_recent, stop="distributed/worker.py"
)
profile.llprocess(llframe, None, state)
profile.process(
frame, True, self.profile_keys[key], stop="distributed/worker.py"
)
stop = time()
if self.digests is not None:
self.digests["profile-duration"].add(stop - start)
async def get_profile(
self, comm=None, start=None, stop=None, key=None, server=False
):
now = time() + self.scheduler_delay
if server:
history = self.io_loop.profile
elif key is None:
history = self.profile_history
else:
history = [(t, d[key]) for t, d in self.profile_keys_history if key in d]
if start is None:
istart = 0
else:
istart = bisect.bisect_left(history, (start,))
if stop is None:
istop = None
else:
istop = bisect.bisect_right(history, (stop,)) + 1
if istop >= len(history):
istop = None # include end
if istart == 0 and istop is None:
history = list(history)
else:
iistop = len(history) if istop is None else istop
history = [history[i] for i in range(istart, iistop)]
prof = profile.merge(*pluck(1, history))
if not history:
return profile.create()
if istop is None and (start is None or start < now):
if key is None:
recent = self.profile_recent
else:
recent = self.profile_keys[key]
prof = profile.merge(prof, recent)
return prof
async def get_profile_metadata(self, comm=None, start=0, stop=None):
add_recent = stop is None
now = time() + self.scheduler_delay
stop = stop or now
start = start or 0
result = {
"counts": [
(t, d["count"]) for t, d in self.profile_history if start < t < stop
],
"keys": [
(t, {k: d["count"] for k, d in v.items()})
for t, v in self.profile_keys_history
if start < t < stop
],
}
if add_recent:
result["counts"].append((now, self.profile_recent["count"]))
result["keys"].append(
(now, {k: v["count"] for k, v in self.profile_keys.items()})
)
return result
def get_call_stack(self, comm=None, keys=None):
with self.active_threads_lock:
frames = sys._current_frames()
active_threads = self.active_threads.copy()
frames = {k: frames[ident] for ident, k in active_threads.items()}
if keys is not None:
frames = {k: frame for k, frame in frames.items() if k in keys}
result = {k: profile.call_stack(frame) for k, frame in frames.items()}
return result
def _notify_plugins(self, method_name, *args, **kwargs):
for name, plugin in self.plugins.items():
if hasattr(plugin, method_name):
if method_name == "release_key":
warnings.warn(
"The `WorkerPlugin.release_key` hook is depreacted and will be "
"removed in a future version. A similar event can now be "
"caught by filtering for a `finish=='released'` event in the "
"`WorkerPlugin.transition` hook.",
DeprecationWarning,
)
try:
getattr(plugin, method_name)(*args, **kwargs)
except Exception:
logger.info(
"Plugin '%s' failed with exception", name, exc_info=True
)
##############
# Validation #
##############
def validate_task_memory(self, ts):
assert ts.key in self.data or ts.key in self.actors
assert isinstance(ts.nbytes, int)
assert not ts.waiting_for_data
assert ts.key not in self.ready
assert ts.state == "memory"
def validate_task_executing(self, ts):
assert ts.state == "executing"
assert ts.runspec is not None
assert ts.key not in self.data
assert not ts.waiting_for_data
for dep in ts.dependencies:
assert dep.state == "memory", self.story(dep)
assert dep.key in self.data or dep.key in self.actors
def validate_task_ready(self, ts):
assert ts.key in pluck(1, self.ready)
assert ts.key not in self.data
assert ts.state != "executing"
assert not ts.waiting_for_data
assert all(
dep.key in self.data or dep.key in self.actors for dep in ts.dependencies
)
def validate_task_waiting(self, ts):
assert ts.key not in self.data
assert ts.state == "waiting"
if ts.dependencies and ts.runspec:
assert not all(dep.key in self.data for dep in ts.dependencies)
def validate_task_flight(self, ts):
assert ts.key not in self.data
assert ts in self._in_flight_tasks
assert not any(dep.key in self.ready for dep in ts.dependents)
assert ts.coming_from
assert ts.coming_from in self.in_flight_workers
assert ts.key in self.in_flight_workers[ts.coming_from]
def validate_task_fetch(self, ts):
assert ts.key not in self.data
assert self.address not in ts.who_has
for w in ts.who_has:
assert ts.key in self.has_what[w]
def validate_task_missing(self, ts):
assert ts.key not in self.data
assert not ts.who_has
assert not any(ts.key in has_what for has_what in self.has_what.values())
assert ts.key in self._missing_dep_flight
def validate_task_cancelled(self, ts):
assert ts.key not in self.data
assert ts._previous
def validate_task_resumed(self, ts):
assert ts.key not in self.data
assert ts._next
assert ts._previous
def validate_task_released(self, ts):
assert ts.key not in self.data
assert not ts._next
assert not ts._previous
assert ts not in self._executing
assert ts not in self._in_flight_tasks
assert ts not in self._missing_dep_flight
assert ts not in self._missing_dep_flight
assert not ts.who_has
assert not any(ts.key in has_what for has_what in self.has_what.values())
assert not ts.waiting_for_data
assert not ts.done
assert not ts.exception
assert not ts.traceback
def validate_task(self, ts):
try:
if ts.key in self.tasks:
assert self.tasks[ts.key] == ts
if ts.state == "memory":
self.validate_task_memory(ts)
elif ts.state == "waiting":
self.validate_task_waiting(ts)
elif ts.state == "missing":
self.validate_task_missing(ts)
elif ts.state == "cancelled":
self.validate_task_cancelled(ts)
elif ts.state == "resumed":
self.validate_task_resumed(ts)
elif ts.state == "ready":
self.validate_task_ready(ts)
elif ts.state == "executing":
self.validate_task_executing(ts)
elif ts.state == "flight":
self.validate_task_flight(ts)
elif ts.state == "fetch":
self.validate_task_fetch(ts)
elif ts.state == "released":
self.validate_task_released(ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def validate_state(self):
if self.status not in (Status.running, Status.paused):
return
try:
assert self.executing_count >= 0
waiting_for_data_count = 0
for ts in self.tasks.values():
assert ts.state is not None
# check that worker has task
for worker in ts.who_has:
assert ts.key in self.has_what[worker]
# check that deps have a set state and that dependency<->dependent links
# are there
for dep in ts.dependencies:
# self.tasks was just a dict of tasks
# and this check was originally that the key was in `task_state`
# so we may have popped the key out of `self.tasks` but the
# dependency can still be in `memory` before GC grabs it...?
# Might need better bookkeeping
assert dep.state is not None
assert ts in dep.dependents, ts
if ts.waiting_for_data:
waiting_for_data_count += 1
for ts_wait in ts.waiting_for_data:
assert ts_wait.key in self.tasks
assert (
ts_wait.state
in {"ready", "executing", "flight", "fetch", "missing"}
or ts_wait.key in self._missing_dep_flight
or ts_wait.who_has.issubset(self.in_flight_workers)
), (ts, ts_wait, self.story(ts), self.story(ts_wait))
if ts.state == "memory":
assert isinstance(ts.nbytes, int)
assert not ts.waiting_for_data
assert ts.key in self.data or ts.key in self.actors
assert self.waiting_for_data_count == waiting_for_data_count
for worker, keys in self.has_what.items():
for k in keys:
assert worker in self.tasks[k].who_has
for ts in self.tasks.values():
self.validate_task(ts)
except Exception as e:
self.loop.add_callback(self.close)
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
#######################################
# Worker Clients (advanced workloads) #
#######################################
@property
def client(self) -> Client:
with self._lock:
if self._client:
return self._client
else:
return self._get_client()
def _get_client(self, timeout=None) -> Client:
"""Get local client attached to this worker
If no such client exists, create one
See Also
--------
get_client
"""
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, "s")
try:
from .client import default_client
client = default_client()
except ValueError: # no clients found, need to make a new one
pass
else:
# must be lazy import otherwise cyclic import
from distributed.deploy.cluster import Cluster
if (
client.scheduler
and client.scheduler.address == self.scheduler.address
# The below conditions should only happen in case a second
# cluster is alive, e.g. if a submitted task spawned its onwn
# LocalCluster, see gh4565
or (
isinstance(client._start_arg, str)
and client._start_arg == self.scheduler.address
or isinstance(client._start_arg, Cluster)
and client._start_arg.scheduler_address == self.scheduler.address
)
):
self._client = client
if not self._client:
from .client import Client
asynchronous = self.loop is IOLoop.current()
self._client = Client(
self.scheduler,
loop=self.loop,
security=self.security,
set_as_default=True,
asynchronous=asynchronous,
direct_to_workers=True,
name="worker",
timeout=timeout,
)
Worker._initialized_clients.add(self._client)
if not asynchronous:
assert self._client.status == "running"
return self._client
def get_current_task(self):
"""Get the key of the task we are currently running
This only makes sense to run within a task
Examples
--------
>>> from dask.distributed import get_worker
>>> def f():
... return get_worker().get_current_task()
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
'f-1234'
See Also
--------
get_worker
"""
return self.active_threads[threading.get_ident()]
def get_worker() -> Worker:
"""Get the worker currently running this task
Examples
--------
>>> def f():
... worker = get_worker() # The worker on which this task is running
... return worker.address
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
'tcp://127.0.0.1:47373'
See Also
--------
get_client
worker_client
"""
try:
return thread_state.execution_state["worker"]
except AttributeError:
try:
return first(
w
for w in Worker._instances
if w.status in (Status.running, Status.paused)
)
except StopIteration:
raise ValueError("No workers found")
def get_client(address=None, timeout=None, resolve_address=True) -> Client:
"""Get a client while within a task.
This client connects to the same scheduler to which the worker is connected
Parameters
----------
address : str, optional
The address of the scheduler to connect to. Defaults to the scheduler
the worker is connected to.
timeout : int or str
Timeout (in seconds) for getting the Client. Defaults to the
``distributed.comm.timeouts.connect`` configuration value.
resolve_address : bool, default True
Whether to resolve `address` to its canonical form.
Returns
-------
Client
Examples
--------
>>> def f():
... client = get_client(timeout="10s")
... futures = client.map(lambda x: x + 1, range(10)) # spawn many tasks
... results = client.gather(futures)
... return sum(results)
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
55
See Also
--------
get_worker
worker_client
secede
"""
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, "s")
if address and resolve_address:
address = comm.resolve_address(address)
try:
worker = get_worker()
except ValueError: # could not find worker
pass
else:
if not address or worker.scheduler.address == address:
return worker._get_client(timeout=timeout)
from .client import Client
try:
client = Client.current() # TODO: assumes the same scheduler
except ValueError:
client = None
if client and (not address or client.scheduler.address == address):
return client
elif address:
return Client(address, timeout=timeout)
else:
raise ValueError("No global client found and no address provided")
def secede():
"""
Have this task secede from the worker's thread pool
This opens up a new scheduling slot and a new thread for a new task. This
enables the client to schedule tasks on this node, which is
especially useful while waiting for other jobs to finish (e.g., with
``client.gather``).
Examples
--------
>>> def mytask(x):
... # do some work
... client = get_client()
... futures = client.map(...) # do some remote work
... secede() # while that work happens, remove ourself from the pool
... return client.gather(futures) # return gathered results
See Also
--------
get_client
get_worker
"""
worker = get_worker()
tpe_secede() # have this thread secede from the thread pool
duration = time() - thread_state.start_time
worker.loop.add_callback(
worker.maybe_transition_long_running,
worker.tasks[thread_state.key],
compute_duration=duration,
stimulus_id=f"secede-{thread_state.key}-{time()}",
)
class Reschedule(Exception):
"""Reschedule this task
Raising this exception will stop the current execution of the task and ask
the scheduler to reschedule this task, possibly on a different machine.
This does not guarantee that the task will move onto a different machine.
The scheduler will proceed through its normal heuristics to determine the
optimal machine to accept this task. The machine will likely change if the
load across the cluster has significantly changed since first scheduling
the task.
"""
def parse_memory_limit(memory_limit, nthreads, total_cores=CPU_COUNT) -> int | None:
if memory_limit is None:
return None
if memory_limit == "auto":
memory_limit = int(system.MEMORY_LIMIT * min(1, nthreads / total_cores))
with suppress(ValueError, TypeError):
memory_limit = float(memory_limit)
if isinstance(memory_limit, float) and memory_limit <= 1:
memory_limit = int(memory_limit * system.MEMORY_LIMIT)
if isinstance(memory_limit, str):
memory_limit = parse_bytes(memory_limit)
else:
memory_limit = int(memory_limit)
return min(memory_limit, system.MEMORY_LIMIT)
async def get_data_from_worker(
rpc,
keys,
worker,
who=None,
max_connections=None,
serializers=None,
deserializers=None,
):
"""Get keys from worker
The worker has a two step handshake to acknowledge when data has been fully
delivered. This function implements that handshake.
See Also
--------
Worker.get_data
Worker.gather_deps
utils_comm.gather_data_from_workers
"""
if serializers is None:
serializers = rpc.serializers
if deserializers is None:
deserializers = rpc.deserializers
async def _get_data():
comm = await rpc.connect(worker)
comm.name = "Ephemeral Worker->Worker for gather"
try:
response = await send_recv(
comm,
serializers=serializers,
deserializers=deserializers,
op="get_data",
keys=keys,
who=who,
max_connections=max_connections,
)
try:
status = response["status"]
except KeyError:
raise ValueError("Unexpected response", response)
else:
if status == "OK":
await comm.write("OK")
return response
finally:
rpc.reuse(worker, comm)
return await retry_operation(_get_data, operation="get_data_from_worker")
job_counter = [0]
cache_loads = LRU(maxsize=100)
def loads_function(bytes_object):
"""Load a function from bytes, cache bytes"""
if len(bytes_object) < 100000:
try:
result = cache_loads[bytes_object]
except KeyError:
result = pickle.loads(bytes_object)
cache_loads[bytes_object] = result
return result
return pickle.loads(bytes_object)
def _deserialize(function=None, args=None, kwargs=None, task=no_value):
"""Deserialize task inputs and regularize to func, args, kwargs"""
if function is not None:
function = loads_function(function)
if args and isinstance(args, bytes):
args = pickle.loads(args)
if kwargs and isinstance(kwargs, bytes):
kwargs = pickle.loads(kwargs)
if task is not no_value:
assert not function and not args and not kwargs
function = execute_task
args = (task,)
return function, args or (), kwargs or {}
def execute_task(task):
"""Evaluate a nested task
>>> inc = lambda x: x + 1
>>> execute_task((inc, 1))
2
>>> execute_task((sum, [1, 2, (inc, 3)]))
7
"""
if istask(task):
func, args = task[0], task[1:]
return func(*map(execute_task, args))
elif isinstance(task, list):
return list(map(execute_task, task))
else:
return task
cache_dumps = LRU(maxsize=100)
_cache_lock = threading.Lock()
def dumps_function(func) -> bytes:
"""Dump a function to bytes, cache functions"""
try:
with _cache_lock:
result = cache_dumps[func]
except KeyError:
result = pickle.dumps(func, protocol=4)
if len(result) < 100000:
with _cache_lock:
cache_dumps[func] = result
except TypeError: # Unhashable function
result = pickle.dumps(func, protocol=4)
return result
def dumps_task(task):
"""Serialize a dask task
Returns a dict of bytestrings that can each be loaded with ``loads``
Examples
--------
Either returns a task as a function, args, kwargs dict
>>> from operator import add
>>> dumps_task((add, 1)) # doctest: +SKIP
{'function': b'\x80\x04\x95\x00\x8c\t_operator\x94\x8c\x03add\x94\x93\x94.'
'args': b'\x80\x04\x95\x07\x00\x00\x00K\x01K\x02\x86\x94.'}
Or as a single task blob if it can't easily decompose the result. This
happens either if the task is highly nested, or if it isn't a task at all
>>> dumps_task(1) # doctest: +SKIP
{'task': b'\x80\x04\x95\x03\x00\x00\x00\x00\x00\x00\x00K\x01.'}
"""
if istask(task):
if task[0] is apply and not any(map(_maybe_complex, task[2:])):
d = {"function": dumps_function(task[1]), "args": warn_dumps(task[2])}
if len(task) == 4:
d["kwargs"] = warn_dumps(task[3])
return d
elif not any(map(_maybe_complex, task[1:])):
return {"function": dumps_function(task[0]), "args": warn_dumps(task[1:])}
return to_serialize(task)
_warn_dumps_warned = [False]
def warn_dumps(obj, dumps=pickle.dumps, limit=1e6):
"""Dump an object to bytes, warn if those bytes are large"""
b = dumps(obj, protocol=4)
if not _warn_dumps_warned[0] and len(b) > limit:
_warn_dumps_warned[0] = True
s = str(obj)
if len(s) > 70:
s = s[:50] + " ... " + s[-15:]
warnings.warn(
"Large object of size %s detected in task graph: \n"
" %s\n"
"Consider scattering large objects ahead of time\n"
"with client.scatter to reduce scheduler burden and \n"
"keep data on workers\n\n"
" future = client.submit(func, big_data) # bad\n\n"
" big_future = client.scatter(big_data) # good\n"
" future = client.submit(func, big_future) # good"
% (format_bytes(len(b)), s)
)
return b
def apply_function(
function,
args,
kwargs,
execution_state,
key,
active_threads,
active_threads_lock,
time_delay,
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
thread_state.start_time = time()
thread_state.execution_state = execution_state
thread_state.key = key
msg = apply_function_simple(function, args, kwargs, time_delay)
with active_threads_lock:
del active_threads[ident]
return msg
def apply_function_simple(
function,
args,
kwargs,
time_delay,
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
start = time()
try:
result = function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
msg["op"] = "task-erred"
msg["actual-exception"] = e
else:
msg = {
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
finally:
end = time()
msg["start"] = start + time_delay
msg["stop"] = end + time_delay
msg["thread"] = ident
return msg
async def apply_function_async(
function,
args,
kwargs,
time_delay,
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
start = time()
try:
result = await function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
msg["op"] = "task-erred"
msg["actual-exception"] = e
else:
msg = {
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
finally:
end = time()
msg["start"] = start + time_delay
msg["stop"] = end + time_delay
msg["thread"] = ident
return msg
def apply_function_actor(
function, args, kwargs, execution_state, key, active_threads, active_threads_lock
):
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
thread_state.execution_state = execution_state
thread_state.key = key
thread_state.actor = True
result = function(*args, **kwargs)
with active_threads_lock:
del active_threads[ident]
return result
def get_msg_safe_str(msg):
"""Make a worker msg, which contains args and kwargs, safe to cast to str:
allowing for some arguments to raise exceptions during conversion and
ignoring them.
"""
class Repr:
def __init__(self, f, val):
self._f = f
self._val = val
def __repr__(self):
return self._f(self._val)
msg = msg.copy()
if "args" in msg:
msg["args"] = Repr(convert_args_to_str, msg["args"])
if "kwargs" in msg:
msg["kwargs"] = Repr(convert_kwargs_to_str, msg["kwargs"])
return msg
def convert_args_to_str(args, max_len: int | None = None) -> str:
"""Convert args to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.
"""
length = 0
strs = ["" for i in range(len(args))]
for i, arg in enumerate(args):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
strs[i] = sarg
length += len(sarg) + 2
if max_len is not None and length > max_len:
return "({}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "({})".format(", ".join(strs))
def convert_kwargs_to_str(kwargs: dict, max_len: int | None = None) -> str:
"""Convert kwargs to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.
"""
length = 0
strs = ["" for i in range(len(kwargs))]
for i, (argname, arg) in enumerate(kwargs.items()):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
skwarg = repr(argname) + ": " + sarg
strs[i] = skwarg
length += len(skwarg) + 2
if max_len is not None and length > max_len:
return "{{{}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "{{{}}}".format(", ".join(strs))
async def run(server, comm, function, args=(), kwargs=None, is_coro=None, wait=True):
kwargs = kwargs or {}
function = pickle.loads(function)
if is_coro is None:
is_coro = iscoroutinefunction(function)
else:
warnings.warn(
"The is_coro= parameter is deprecated. "
"We now automatically detect coroutines/async functions"
)
assert wait or is_coro, "Combination not supported"
if args:
args = pickle.loads(args)
if kwargs:
kwargs = pickle.loads(kwargs)
if has_arg(function, "dask_worker"):
kwargs["dask_worker"] = server
if has_arg(function, "dask_scheduler"):
kwargs["dask_scheduler"] = server
logger.info("Run out-of-band function %r", funcname(function))
try:
if not is_coro:
result = function(*args, **kwargs)
else:
if wait:
result = await function(*args, **kwargs)
else:
server.loop.add_callback(function, *args, **kwargs)
result = None
except Exception as e:
logger.warning(
"Run Failed\nFunction: %s\nargs: %s\nkwargs: %s\n",
str(funcname(function))[:1000],
convert_args_to_str(args, max_len=1000),
convert_kwargs_to_str(kwargs, max_len=1000),
exc_info=True,
)
response = error_message(e)
else:
response = {"status": "OK", "result": to_serialize(result)}
return response
_global_workers = Worker._instances
try:
if nvml.device_get_count() < 1:
raise RuntimeError
except (Exception, RuntimeError):
pass
else:
async def gpu_metric(worker):
result = await offload(nvml.real_time)
return result
DEFAULT_METRICS["gpu"] = gpu_metric
def gpu_startup(worker):
return nvml.one_time()
DEFAULT_STARTUP_INFORMATION["gpu"] = gpu_startup
def print(*args, **kwargs):
"""Dask print function
This prints both wherever this function is run, and also in the user's
client session
"""
try:
worker = get_worker()
except ValueError:
pass
else:
msg = {
"args": tuple(stringify(arg) for arg in args),
"kwargs": {k: stringify(v) for k, v in kwargs.items()},
}
worker.log_event("print", msg)
builtins.print(*args, **kwargs)
def warn(*args, **kwargs):
"""Dask warn function
This raises a warning both wherever this function is run, and also
in the user's client session
"""
try:
worker = get_worker()
except ValueError:
pass
else:
worker.log_event("warn", {"args": args, "kwargs": kwargs})
warnings.warn(*args, **kwargs)
|
from pony.orm import *
from datetime import datetime
from model.group import Group
from model.contact import Contact
from pymysql.converters import decoders
import random
class ORMFixture:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column='group_id')
name = Optional(str, column='group_name')
header = Optional(str, column='group_header')
footer = Optional(str, column='group_footer')
contacts = Set(lambda: ORMFixture.ORMContact, table='address_in_groups', column='id', reverse='groups', lazy=True)
class ORMContact(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column='id')
first_name = Optional(str, column='firstname')
last_name = Optional(str, column='lastname')
deprecated = Optional(str, column='deprecated')
groups = Set(lambda: ORMFixture.ORMGroup, table='address_in_groups', column='group_id', reverse='contacts', lazy=True)
def __init__(self, host, name, user, password):
self.db.bind('mysql', host=host, database=name, user=user, password=password, conv=decoders)
self.db.generate_mapping()
sql_debug(True)
def convert_groups_to_model(self, groups):
def convert(group):
return Group(id=str(group.id), name=group.name, header=group.header, footer=group.footer)
return list(map(convert, groups))
@db_session
def get_group_list(self):
return self.convert_groups_to_model(select(g for g in ORMFixture.ORMGroup))
def convert_contacts_to_model(self, contacts):
def convert(contact):
return Contact(id=str(contact.id), last_name=contact.last_name, first_name=contact.first_name)
return list(map(convert, contacts))
@db_session
def get_contact_list(self):
return self.convert_contacts_to_model(select(c for c in ORMFixture.ORMContact if c.deprecated is None))
@db_session
def get_contacts_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(orm_group.contacts)
@db_session
def get_contacts_not_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(
select(c for c in ORMFixture.ORMContact if c.deprecated is None and orm_group not in c.groups))
@db_session
def get_groups_with_contacts(self):
groups_with_contacts = []
group_id = self.db.select("group_id FROM address_in_groups")
for i in group_id:
groups_with_contacts.append(Group(id=str(i)))
return groups_with_contacts
def checker_that_we_have_groups_with_contacts(self, app):
if len(self.get_groups_with_contacts()) == 0:
old_contacts = self.get_contact_list()
old_groups = self.get_group_list()
app.contact.checker_that_old_contacts_not_zero(old_contacts)
app.group.checker_that_old_groups_not_zero(old_groups)
old_contactsNEW = self.get_contact_list()
old_groupsNEW = self.get_group_list()
available_groups = app.group.get_available_groups(old_groupsNEW, old_contactsNEW, self)
group = random.choice(available_groups)
contacts_not_in_group = self.get_contacts_not_in_group(group)
contact = random.choice(contacts_not_in_group)
app.contact.add_contact_to_group(contact.id, group)
|
# Copyright 2004-present, Facebook. All Rights Reserved.
import os
from core.models import BaseModel
from django.conf import settings
from django.db import models
from shop.models import Store
from shop.models.choices import Currency
from .choices import BusinessVertical, FBEChannel
class FacebookMetadata(BaseModel):
"""All FB specific metadata the platform needs to store
fields:
store: store this set of metadata belongs to
fbe_external_business_id: unique id for FB to identify this store
fbe_business_vertical: always 'ECOMMERCE' for reference implementation
fbe_domain: Domain used for Instagram manual approval. required for COMMERCE and COMMERCE_OFFSITE channels
fbe_channel: Not optional for RI. Only options are COMMERCE and COMMERCE_OFFSITE.
fbe_pixel: Pixel ID for the user's existing Pixel that a partner can pass in to preselect for the user in the setup flow.
"""
_FB_COMMERCE_MANAGER_URL = "https://www.facebook.com/commerce_manager/{}"
_FB_FBE_MANAGEMENT_VIEW_URL = "https://www.facebook.com/facebook_business_extension/management/?app_id={}&external_business_id={}&tab=Commerce"
_FB_BUSINESS_MANAGER_URL = "https://business.facebook.com/settings/?business_id={}"
_FB_CATALOG_MANAGER_URL = "https://www.facebook.com/products/catalogs/{}/products"
class GraphUserTokenType(models.TextChoices):
USER = "USER"
SYSTEM_USER = "SYSTEM_USER"
store = models.OneToOneField(
Store,
on_delete=models.CASCADE,
primary_key=True,
)
# the Facebook commerce account id for the Store
commerce_account_id = models.CharField(max_length=50, blank=True, null=True)
# Not currently used, but should store webhook information for this metadata object
webhook = models.TextField(blank=True, null=True)
# Also not currently used
app_info = models.TextField(blank=True, null=True)
fbe_external_business_id = models.CharField(max_length=255, blank=True, default="")
""" The following fields are needed when creating the URL to launch FBE """
fbe_timezone = models.TextField(
default="UTC",
)
fbe_currency = models.CharField(
max_length=3,
choices=Currency.choices,
default=Currency.USD,
)
fbe_business_vertical = models.CharField(
max_length=9,
choices=BusinessVertical.choices,
default="",
)
fbe_domain = models.CharField(
max_length=255,
default="",
)
fbe_channel = models.CharField(
max_length=16,
choices=FBEChannel.choices,
default="",
)
""" The following fields are set after the FBE setup is complete.
This information is needed for API calls and troubleshooting."""
fbe_business_manager_id = models.CharField(max_length=50, blank=True, default="")
fbe_ad_account_id = models.CharField(max_length=50, blank=True, default="")
fbe_page_id = models.CharField(max_length=50, blank=True, default="")
fbe_ig_profile_id = models.CharField(max_length=50, blank=True, default="")
fbe_pixel_id = models.CharField(max_length=50, blank=True, default="")
# including fb_catalog_id since this is the FB side catalog id as opposed to the local RI id
fb_catalog_id = models.CharField(max_length=50, blank=True, default="")
fb_shop_setup_status = models.CharField(max_length=12, blank=True, default="")
fb_shop_payment_setup_status = models.CharField(
max_length=12, blank=True, default=""
)
fb_shop_review_status = models.CharField(max_length=12, blank=True, default="")
token_info = models.TextField(blank=True, null=True)
token_creation_date = models.DateTimeField(null=True, blank=True)
token_expiration_date = models.DateTimeField(null=True, blank=True)
token_type = models.CharField(
max_length=20,
default=GraphUserTokenType.USER,
choices=GraphUserTokenType.choices,
)
# The following properties generate URLs based on the current FacebookMetadata's other fields
@property
def commerce_manager_url(self):
return self._FB_COMMERCE_MANAGER_URL.format(self.commerce_account_id)
@property
def business_manager_url(self):
return self._FB_BUSINESS_MANAGER_URL.format(self.fbe_business_manager_id)
@property
def fbe_management_view_url(self):
return self._FB_FBE_MANAGEMENT_VIEW_URL.format(
settings.APP_ID, self.fbe_external_business_id
)
@property
def catalog_manager_url(self):
return self._FB_CATALOG_MANAGER_URL.format(self.fb_catalog_id)
def __str__(self):
return "Store: [{}] Commerce Account: [{}]".format(
self.store, self.commerce_account_id
)
|
__all__ = ['CovidDashboard']
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from dash_oop_components import DashFigureFactory, DashComponent, DashComponentTabs, DashApp
import pandas as pd
import plotly.express as px
class CovidPlots(DashFigureFactory):
def __init__(self, datafile="covid.csv", exclude_countries=[]):
super().__init__()
self.df = pd.read_csv(datafile)
if exclude_countries:
self.df = self.df[~self.df.countriesAndTerritories.isin(exclude_countries)]
self.countries = self.df.countriesAndTerritories.unique().tolist()
self.metrics = ['cases', 'deaths']
def plot_time_series(self, countries, metric):
return px.line(
data_frame=self.df[self.df.countriesAndTerritories.isin(countries)],
x='dateRep',
y=metric,
color='countriesAndTerritories',
labels={'countriesAndTerritories':'Countries', 'dateRep':'date'},
)
def plot_pie_chart(self, countries, metric):
return px.pie(
data_frame=self.df[self.df.countriesAndTerritories.isin(countries)],
names='countriesAndTerritories',
values=metric,
hole=.3,
labels={'countriesAndTerritories':'Countries'}
)
class CovidTimeSeries(DashComponent):
def __init__(self, plot_factory,
hide_country_dropdown=False, include_countries=None, countries=None,
hide_metric_dropdown=False, include_metrics=None, metric='cases'):
super().__init__()
if not self.include_countries:
self.include_countries = self.plot_factory.countries
if not self.countries:
self.countries = self.include_countries
if not self.include_metrics:
self.include_metrics = self.plot_factory.metrics
if not self.metric:
self.metric = self.include_metrics[0]
def layout(self):
return dbc.Container([
dbc.Row([
dbc.Col([
html.H3("Covid Time Series"),
self.make_hideable(
dcc.Dropdown(
id='timeseries-metric-dropdown-'+self.name,
options=[{'label': metric, 'value': metric} for metric in self.include_metrics],
value=self.metric,
), hide=self.hide_metric_dropdown),
self.make_hideable(
dcc.Dropdown(
id='timeseries-country-dropdown-'+self.name,
options=[{'label': country, 'value': country} for country in self.include_countries],
value=self.countries,
multi=True,
), hide=self.hide_country_dropdown),
dcc.Graph(id='timeseries-figure-'+self.name)
]),
])
])
def _register_callbacks(self, app):
@app.callback(
Output('timeseries-figure-'+self.name, 'figure'),
Input('timeseries-country-dropdown-'+self.name, 'value'),
Input('timeseries-metric-dropdown-'+self.name, 'value')
)
def update_timeseries_plot(countries, metric):
if countries and metric:
return self.plot_factory.plot_time_series(countries, metric)
raise PreventUpdate
class CovidPieChart(DashComponent):
def __init__(self, plot_factory,
hide_country_dropdown=False, include_countries=None, countries=None,
hide_metric_dropdown=False, include_metrics=None, metric='cases'):
super().__init__()
if not self.include_countries:
self.include_countries = self.plot_factory.countries
if not self.countries:
self.countries = self.include_countries
if not self.include_metrics:
self.include_metrics = self.plot_factory.metrics
if not self.metric:
self.metric = self.include_metrics[0]
def layout(self, params=None):
return dbc.Container([
dbc.Row([
dbc.Col([
html.H3("Covid Pie Chart"),
self.make_hideable(
dcc.Dropdown(
id='piechart-metric-dropdown-'+self.name,
options=[{'label': metric, 'value': metric} for metric in self.include_metrics],
value=self.metric,
), hide=self.hide_metric_dropdown),
self.make_hideable(
dcc.Dropdown(
id='piechart-country-dropdown-'+self.name,
options=[{'label': country, 'value': country} for country in self.include_countries],
value=self.countries,
multi=True
), hide=self.hide_country_dropdown),
dcc.Graph(id='piechart-figure-'+self.name)
]),
])
])
def _register_callbacks(self, app):
@app.callback(
Output('piechart-figure-'+self.name, 'figure'),
Input('piechart-country-dropdown-'+self.name, 'value'),
Input('piechart-metric-dropdown-'+self.name, 'value')
)
def update_timeseries_plot(countries, metric):
if countries and metric:
return self.plot_factory.plot_pie_chart(countries, metric)
raise PreventUpdate
class CovidComposite(DashComponent):
def __init__(self, plot_factory, title="Covid Analysis",
hide_country_dropdown=False,
include_countries=None, countries=None,
hide_metric_dropdown=False,
include_metrics=None, metric='cases', name=None):
super().__init__(title=title)
if not self.include_countries:
self.include_countries = self.plot_factory.countries
if not self.countries:
self.countries = self.include_countries
if not self.include_metrics:
self.include_metrics = self.plot_factory.metrics
if not self.metric:
self.metric = self.include_metrics[0]
self.timeseries = CovidTimeSeries(
plot_factory,
hide_country_dropdown=True, countries=self.countries,
hide_metric_dropdown=True, metric=self.metric)
self.piechart = CovidPieChart(
plot_factory,
hide_country_dropdown=True, countries=self.countries,
hide_metric_dropdown=True, metric=self.metric)
def layout(self, params=None):
return dbc.Container([
dbc.Row([
dbc.Col([
html.H1(self.title),
self.make_hideable(
self.querystring(params)(dcc.Dropdown)(
id='dashboard-metric-dropdown-'+self.name,
options=[{'label': metric, 'value': metric} for metric in self.include_metrics],
value=self.metric,
), hide=self.hide_metric_dropdown),
self.make_hideable(
self.querystring(params)(dcc.Dropdown)(
id='dashboard-country-dropdown-'+self.name,
options=[{'label': metric, 'value': metric} for metric in self.include_countries],
value=self.countries,
multi=True,
), hide=self.hide_country_dropdown),
], md=6),
], justify="center"),
dbc.Row([
dbc.Col([
self.timeseries.layout(),
], md=6),
dbc.Col([
self.piechart.layout(),
], md=6)
])
], fluid=True)
def _register_callbacks(self, app):
@app.callback(
Output('timeseries-country-dropdown-'+self.timeseries.name, 'value'),
Output('piechart-country-dropdown-'+self.piechart.name, 'value'),
Input('dashboard-country-dropdown-'+self.name, 'value'),
)
def update_timeseries_plot(countries):
return countries, countries
@app.callback(
Output('timeseries-metric-dropdown-'+self.timeseries.name, 'value'),
Output('piechart-metric-dropdown-'+self.piechart.name, 'value'),
Input('dashboard-metric-dropdown-'+self.name, 'value'),
)
def update_timeseries_plot(metric):
return metric, metric
class CovidDashboard(DashComponent):
def __init__(self, plot_factory,
europe_countries = ['Italy', 'Spain', 'Germany', 'France',
'United_Kingdom', 'Switzerland', 'Netherlands',
'Belgium', 'Austria', 'Portugal', 'Norway'],
asia_countries = ['China', 'Vietnam', 'Malaysia', 'Philippines',
'Taiwan', 'Myanmar', 'Thailand', 'South_Korea', 'Japan']):
super().__init__(title="Covid Dashboard")
self.europe = CovidComposite(self.plot_factory, "Europe",
include_countries=self.europe_countries, name="eur")
self.asia = CovidComposite(self.plot_factory, "Asia",
include_countries=self.asia_countries, name="asia")
self.cases_only = CovidComposite(self.plot_factory, "Cases Only",
metric='cases', hide_metric_dropdown=True,
countries=['China', 'Italy', 'Brazil'], name="cases")
self.deaths_only = CovidComposite(self.plot_factory, "Deaths Only",
metric='deaths', hide_metric_dropdown=True,
countries=['China', 'Italy', 'Brazil'], name="deaths")
def layout(self, params=None):
return dbc.Container([
dbc.Row([
html.H1("Covid Dashboard"),
]),
dbc.Row([
dbc.Col([
self.querystring(params)(DashComponentTabs)(id="tabs",
tabs=[self.europe, self.asia, self.cases_only, self.deaths_only],
params=params, component=self, single_tab_querystrings=True)
])
])
], fluid=True)
|
"""
This is a command line application that allows you to scrape twitter!
"""
import csv
import json
import argparse
import collections
import datetime as dt
from os.path import isfile
from twitterscraper.query import query_tweets, query_tweets_from_user
from twitterscraper.ts_logger import logger
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, '__json__'):
return obj.__json__()
elif isinstance(obj, collections.Iterable):
return list(obj)
elif isinstance(obj, dt.datetime):
return obj.isoformat()
elif hasattr(obj, '__getitem__') and hasattr(obj, 'keys'):
return dict(obj)
elif hasattr(obj, '__dict__'):
return {member: getattr(obj, member)
for member in dir(obj)
if not member.startswith('_') and
not hasattr(getattr(obj, member), '__call__')}
return json.JSONEncoder.default(self, obj)
def valid_date(s):
try:
return dt.datetime.strptime(s, "%Y-%m-%d").date()
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def main():
try:
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
description=__doc__
)
parser.add_argument("query", type=str, help="Advanced twitter query")
parser.add_argument("-o", "--output", type=str, default="tweets.json",
help="Path to a JSON file to store the gathered "
"tweets to.")
parser.add_argument("-l", "--limit", type=int, default=None,
help="Number of minimum tweets to gather.")
parser.add_argument("-a", "--all", action='store_true',
help="Set this flag if you want to get all tweets "
"in the history of twitter. Begindate is set to 2006-03-01."
"This may take a while. You can increase the number of parallel"
"processes depending on the computational power you have.")
parser.add_argument("-c", "--csv", action='store_true',
help="Set this flag if you want to save the results to a CSV format.")
parser.add_argument("-u", "--user", action='store_true',
help="Set this flag to if you want to scrape tweets from a specific user"
"The query should then consist of the profilename you want to scrape without @")
parser.add_argument("--lang", type=str, default=None,
help="Set this flag if you want to query tweets in \na specific language. You can choose from:\n"
"en (English)\nar (Arabic)\nbn (Bengali)\n"
"cs (Czech)\nda (Danish)\nde (German)\nel (Greek)\nes (Spanish)\n"
"fa (Persian)\nfi (Finnish)\nfil (Filipino)\nfr (French)\n"
"he (Hebrew)\nhi (Hindi)\nhu (Hungarian)\n"
"id (Indonesian)\nit (Italian)\nja (Japanese)\n"
"ko (Korean)\nmsa (Malay)\nnl (Dutch)\n"
"no (Norwegian)\npl (Polish)\npt (Portuguese)\n"
"ro (Romanian)\nru (Russian)\nsv (Swedish)\n"
"th (Thai)\ntr (Turkish)\nuk (Ukranian)\n"
"ur (Urdu)\nvi (Vietnamese)\n"
"zh-cn (Chinese Simplified)\n"
"zh-tw (Chinese Traditional)"
)
parser.add_argument("-d", "--dump", action="store_true",
help="Set this flag if you want to dump the tweets \nto the console rather than outputting to a file")
parser.add_argument("-bd", "--begindate", type=valid_date, default="2006-03-21",
help="Scrape for tweets starting from this date. Format YYYY-MM-DD. \nDefault value is 2006-03-21", metavar='\b')
parser.add_argument("-ed", "--enddate", type=valid_date, default=dt.date.today(),
help="Scrape for tweets until this date. Format YYYY-MM-DD. \nDefault value is the date of today.", metavar='\b')
parser.add_argument("-p", "--poolsize", type=int, default=20, help="Specify the number of parallel process you want to run. \n"
"Default value is set to 20. \nYou can change this number if you have more computing power available. \n"
"Set to 1 if you dont want to run any parallel processes.", metavar='\b')
parser.add_argument("--connect-timeout", type=float, default=-1.0, dest="ctimeout",
help="Maximum time (in seconds) to allow for establishing connections.")
parser.add_argument("--read-timeout", type=float, default=-1.0, dest="rtimeout",
help="Maximum time (in seconds) to allow for each request.")
args = parser.parse_args()
if isfile(args.output) and not args.dump:
logger.error("Output file already exists! Aborting.")
exit(-1)
if args.all:
args.begindate = dt.date(2006,3,1)
timeout = (
args.ctimeout if args.ctimeout != -1.0 else None,
args.rtimeout if args.rtimeout != -1.0 else None
)
if args.user:
tweets = query_tweets_from_user(user = args.query, limit = args.limit)
else:
tweets = query_tweets(
query=args.query, limit=args.limit, begindate=args.begindate,
enddate=args.enddate, poolsize=args.poolsize, lang=args.lang,
timeout=timeout
)
if args.dump:
for tweet in tweets:
print(json.dumps(tweet, cls=JSONEncoder))
else:
if tweets:
with open(args.output, "w", encoding="utf-8") as output:
if args.csv:
f = csv.writer(output)
f.writerow(["user", "fullname", "tweet-id", "timestamp", "url", "likes", "replies", "retweets", "text", "html"])
for x in tweets:
f.writerow([x.user, x.fullname, x.id, x.timestamp, x.url,
x.likes, x.replies, x.retweets,
x.text, x.html])
else:
json.dump(tweets, output, cls=JSONEncoder)
except KeyboardInterrupt:
logger.info("Program interrupted by user. Quitting...")
|
from math import log
import numpy as np
import functools
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.utils import MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT, \
SMALL_NUMBER, try_import_tree
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.framework import try_import_tf, try_import_tfp
from ray.rllib.utils.spaces.space_utils import get_base_struct_from_space
tf = try_import_tf()
tfp = try_import_tfp()
tree = try_import_tree()
@DeveloperAPI
class TFActionDistribution(ActionDistribution):
"""TF-specific extensions for building action distributions."""
@DeveloperAPI
def __init__(self, inputs, model):
super().__init__(inputs, model)
self.sample_op = self._build_sample_op()
self.sampled_action_logp_op = self.logp(self.sample_op)
@DeveloperAPI
def _build_sample_op(self):
"""Implement this instead of sample(), to enable op reuse.
This is needed since the sample op is non-deterministic and is shared
between sample() and sampled_action_logp().
"""
raise NotImplementedError
@override(ActionDistribution)
def sample(self):
"""Draw a sample from the action distribution."""
return self.sample_op
@override(ActionDistribution)
def sampled_action_logp(self):
"""Returns the log probability of the sampled action."""
return self.sampled_action_logp_op
class Categorical(TFActionDistribution):
"""Categorical distribution for discrete action spaces."""
@DeveloperAPI
def __init__(self, inputs, model=None, temperature=1.0):
assert temperature > 0.0, "Categorical `temperature` must be > 0.0!"
# Allow softmax formula w/ temperature != 1.0:
# Divide inputs by temperature.
super().__init__(inputs / temperature, model)
@override(ActionDistribution)
def deterministic_sample(self):
return tf.math.argmax(self.inputs, axis=1)
@override(ActionDistribution)
def logp(self, x):
return -tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.inputs, labels=tf.cast(x, tf.int32))
@override(ActionDistribution)
def entropy(self):
a0 = self.inputs - tf.reduce_max(self.inputs, axis=1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.math.log(z0) - a0), axis=1)
@override(ActionDistribution)
def kl(self, other):
a0 = self.inputs - tf.reduce_max(self.inputs, axis=1, keepdims=True)
a1 = other.inputs - tf.reduce_max(other.inputs, axis=1, keepdims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = tf.reduce_sum(ea0, axis=1, keepdims=True)
z1 = tf.reduce_sum(ea1, axis=1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(
p0 * (a0 - tf.math.log(z0) - a1 + tf.math.log(z1)), axis=1)
@override(TFActionDistribution)
def _build_sample_op(self):
return tf.squeeze(tf.multinomial(self.inputs, 1), axis=1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return action_space.n
class MultiCategorical(TFActionDistribution):
"""MultiCategorical distribution for MultiDiscrete action spaces."""
def __init__(self, inputs, model, input_lens):
# skip TFActionDistribution init
ActionDistribution.__init__(self, inputs, model)
self.cats = [
Categorical(input_, model)
for input_ in tf.split(inputs, input_lens, axis=1)
]
self.sample_op = self._build_sample_op()
self.sampled_action_logp_op = self.logp(self.sample_op)
@override(ActionDistribution)
def deterministic_sample(self):
return tf.stack(
[cat.deterministic_sample() for cat in self.cats], axis=1)
@override(ActionDistribution)
def logp(self, actions):
# If tensor is provided, unstack it into list.
if isinstance(actions, tf.Tensor):
actions = tf.unstack(tf.cast(actions, tf.int32), axis=1)
logps = tf.stack(
[cat.logp(act) for cat, act in zip(self.cats, actions)])
return tf.reduce_sum(logps, axis=0)
@override(ActionDistribution)
def multi_entropy(self):
return tf.stack([cat.entropy() for cat in self.cats], axis=1)
@override(ActionDistribution)
def entropy(self):
return tf.reduce_sum(self.multi_entropy(), axis=1)
@override(ActionDistribution)
def multi_kl(self, other):
return tf.stack(
[cat.kl(oth_cat) for cat, oth_cat in zip(self.cats, other.cats)],
axis=1)
@override(ActionDistribution)
def kl(self, other):
return tf.reduce_sum(self.multi_kl(other), axis=1)
@override(TFActionDistribution)
def _build_sample_op(self):
return tf.stack([cat.sample() for cat in self.cats], axis=1)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.sum(action_space.nvec)
class GumbelSoftmax(TFActionDistribution):
"""GumbelSoftmax distr. (for differentiable sampling in discr. actions
The Gumbel Softmax distribution [1] (also known as the Concrete [2]
distribution) is a close cousin of the relaxed one-hot categorical
distribution, whose tfp implementation we will use here plus
adjusted `sample_...` and `log_prob` methods. See discussion at [0].
[0] https://stackoverflow.com/questions/56226133/
soft-actor-critic-with-discrete-action-space
[1] Categorical Reparametrization with Gumbel-Softmax (Jang et al, 2017):
https://arxiv.org/abs/1611.01144
[2] The Concrete Distribution: A Continuous Relaxation of Discrete Random
Variables (Maddison et al, 2017) https://arxiv.org/abs/1611.00712
"""
@DeveloperAPI
def __init__(self, inputs, model=None, temperature=1.0):
"""Initializes a GumbelSoftmax distribution.
Args:
temperature (float): Temperature parameter. For low temperatures,
the expected value approaches a categorical random variable.
For high temperatures, the expected value approaches a uniform
distribution.
"""
assert temperature >= 0.0
self.dist = tfp.distributions.RelaxedOneHotCategorical(
temperature=temperature, logits=inputs)
super().__init__(inputs, model)
@override(ActionDistribution)
def deterministic_sample(self):
# Return the dist object's prob values.
return self.dist._distribution.probs
@override(ActionDistribution)
def logp(self, x):
# Override since the implementation of tfp.RelaxedOneHotCategorical
# yields positive values.
if x.shape != self.dist.logits.shape:
values = tf.one_hot(
x, self.dist.logits.shape.as_list()[-1], dtype=tf.float32)
assert values.shape == self.dist.logits.shape, (
values.shape, self.dist.logits.shape)
# [0]'s implementation (see line below) seems to be an approximation
# to the actual Gumbel Softmax density.
return -tf.reduce_sum(
-x * tf.nn.log_softmax(self.dist.logits, axis=-1), axis=-1)
@override(TFActionDistribution)
def _build_sample_op(self):
return self.dist.sample()
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return action_space.n
class DiagGaussian(TFActionDistribution):
"""Action distribution where each vector element is a gaussian.
The first half of the input vector defines the gaussian means, and the
second half the gaussian standard deviations.
"""
def __init__(self, inputs, model):
mean, log_std = tf.split(inputs, 2, axis=1)
self.mean = mean
self.log_std = log_std
self.std = tf.exp(log_std)
super().__init__(inputs, model)
@override(ActionDistribution)
def deterministic_sample(self):
return self.mean
@override(ActionDistribution)
def logp(self, x):
return -0.5 * tf.reduce_sum(
tf.math.square((tf.cast(x, tf.float32) - self.mean) / self.std),
axis=1
) - 0.5 * np.log(2.0 * np.pi) * tf.cast(tf.shape(x)[1], tf.float32) - \
tf.reduce_sum(self.log_std, axis=1)
@override(ActionDistribution)
def kl(self, other):
assert isinstance(other, DiagGaussian)
return tf.reduce_sum(
other.log_std - self.log_std +
(tf.math.square(self.std) +
tf.math.square(self.mean - other.mean)) /
(2.0 * tf.math.square(other.std)) - 0.5,
axis=1)
@override(ActionDistribution)
def entropy(self):
return tf.reduce_sum(
self.log_std + .5 * np.log(2.0 * np.pi * np.e), axis=1)
@override(TFActionDistribution)
def _build_sample_op(self):
return self.mean + self.std * tf.random.normal(tf.shape(self.mean))
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape) * 2
class SquashedGaussian(TFActionDistribution):
"""A tanh-squashed Gaussian distribution defined by: mean, std, low, high.
The distribution will never return low or high exactly, but
`low`+SMALL_NUMBER or `high`-SMALL_NUMBER respectively.
"""
def __init__(self, inputs, model, low=-1.0, high=1.0):
"""Parameterizes the distribution via `inputs`.
Args:
low (float): The lowest possible sampling value
(excluding this value).
high (float): The highest possible sampling value
(excluding this value).
"""
assert tfp is not None
mean, log_std = tf.split(inputs, 2, axis=-1)
# Clip `scale` values (coming from NN) to reasonable values.
log_std = tf.clip_by_value(log_std, MIN_LOG_NN_OUTPUT,
MAX_LOG_NN_OUTPUT)
std = tf.exp(log_std)
self.distr = tfp.distributions.Normal(loc=mean, scale=std)
assert np.all(np.less(low, high))
self.low = low
self.high = high
super().__init__(inputs, model)
@override(ActionDistribution)
def deterministic_sample(self):
mean = self.distr.mean()
return self._squash(mean)
@override(TFActionDistribution)
def _build_sample_op(self):
return self._squash(self.distr.sample())
@override(ActionDistribution)
def logp(self, x):
# Unsquash values (from [low,high] to ]-inf,inf[)
unsquashed_values = self._unsquash(x)
# Get log prob of unsquashed values from our Normal.
log_prob_gaussian = self.distr.log_prob(unsquashed_values)
# For safety reasons, clamp somehow, only then sum up.
log_prob_gaussian = tf.clip_by_value(log_prob_gaussian, -100, 100)
log_prob_gaussian = tf.reduce_sum(log_prob_gaussian, axis=-1)
# Get log-prob for squashed Gaussian.
unsquashed_values_tanhd = tf.math.tanh(unsquashed_values)
log_prob = log_prob_gaussian - tf.reduce_sum(
tf.math.log(1 - unsquashed_values_tanhd**2 + SMALL_NUMBER),
axis=-1)
return log_prob
def _squash(self, raw_values):
# Returned values are within [low, high] (including `low` and `high`).
squashed = ((tf.math.tanh(raw_values) + 1.0) / 2.0) * \
(self.high - self.low) + self.low
return tf.clip_by_value(squashed, self.low, self.high)
def _unsquash(self, values):
normed_values = (values - self.low) / (self.high - self.low) * 2.0 - \
1.0
# Stabilize input to atanh.
save_normed_values = tf.clip_by_value(
normed_values, -1.0 + SMALL_NUMBER, 1.0 - SMALL_NUMBER)
unsquashed = tf.math.atanh(save_normed_values)
return unsquashed
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape) * 2
class Beta(TFActionDistribution):
"""
A Beta distribution is defined on the interval [0, 1] and parameterized by
shape parameters alpha and beta (also called concentration parameters).
PDF(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z
with Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)
and Gamma(n) = (n - 1)!
"""
def __init__(self, inputs, model, low=0.0, high=1.0):
# Stabilize input parameters (possibly coming from a linear layer).
inputs = tf.clip_by_value(inputs, log(SMALL_NUMBER),
-log(SMALL_NUMBER))
inputs = tf.math.log(tf.math.exp(inputs) + 1.0) + 1.0
self.low = low
self.high = high
alpha, beta = tf.split(inputs, 2, axis=-1)
# Note: concentration0==beta, concentration1=alpha (!)
self.dist = tfp.distributions.Beta(
concentration1=alpha, concentration0=beta)
super().__init__(inputs, model)
@override(ActionDistribution)
def deterministic_sample(self):
mean = self.dist.mean()
return self._squash(mean)
@override(TFActionDistribution)
def _build_sample_op(self):
return self._squash(self.dist.sample())
@override(ActionDistribution)
def logp(self, x):
unsquashed_values = self._unsquash(x)
return tf.math.reduce_sum(
self.dist.log_prob(unsquashed_values), axis=-1)
def _squash(self, raw_values):
return raw_values * (self.high - self.low) + self.low
def _unsquash(self, values):
return (values - self.low) / (self.high - self.low)
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape) * 2
class Deterministic(TFActionDistribution):
"""Action distribution that returns the input values directly.
This is similar to DiagGaussian with standard deviation zero (thus only
requiring the "mean" values as NN output).
"""
@override(ActionDistribution)
def deterministic_sample(self):
return self.inputs
@override(TFActionDistribution)
def logp(self, x):
return tf.zeros_like(self.inputs)
@override(TFActionDistribution)
def _build_sample_op(self):
return self.inputs
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape)
class MultiActionDistribution(TFActionDistribution):
"""Action distribution that operates on a set of actions.
Args:
inputs (Tensor list): A list of tensors from which to compute samples.
"""
def __init__(self, inputs, model, *, child_distributions, input_lens,
action_space):
ActionDistribution.__init__(self, inputs, model)
self.action_space_struct = get_base_struct_from_space(action_space)
input_lens = np.array(input_lens, dtype=np.int32)
split_inputs = tf.split(inputs, input_lens, axis=1)
self.flat_child_distributions = tree.map_structure(
lambda dist, input_: dist(input_, model), child_distributions,
split_inputs)
@override(ActionDistribution)
def logp(self, x):
# Single tensor input (all merged).
if isinstance(x, (tf.Tensor, np.ndarray)):
split_indices = []
for dist in self.flat_child_distributions:
if isinstance(dist, Categorical):
split_indices.append(1)
else:
split_indices.append(tf.shape(dist.sample())[1])
split_x = tf.split(x, split_indices, axis=1)
# Structured or flattened (by single action component) input.
else:
split_x = tree.flatten(x)
def map_(val, dist):
# Remove extra categorical dimension.
if isinstance(dist, Categorical):
val = tf.cast(tf.squeeze(val, axis=-1), tf.int32)
return dist.logp(val)
# Remove extra categorical dimension and take the logp of each
# component.
flat_logps = tree.map_structure(map_, split_x,
self.flat_child_distributions)
return functools.reduce(lambda a, b: a + b, flat_logps)
@override(ActionDistribution)
def kl(self, other):
kl_list = [
d.kl(o) for d, o in zip(self.flat_child_distributions,
other.flat_child_distributions)
]
return functools.reduce(lambda a, b: a + b, kl_list)
@override(ActionDistribution)
def entropy(self):
entropy_list = [d.entropy() for d in self.flat_child_distributions]
return functools.reduce(lambda a, b: a + b, entropy_list)
@override(ActionDistribution)
def sample(self):
child_distributions = tree.unflatten_as(self.action_space_struct,
self.flat_child_distributions)
return tree.map_structure(lambda s: s.sample(), child_distributions)
@override(ActionDistribution)
def deterministic_sample(self):
child_distributions = tree.unflatten_as(self.action_space_struct,
self.flat_child_distributions)
return tree.map_structure(lambda s: s.deterministic_sample(),
child_distributions)
@override(TFActionDistribution)
def sampled_action_logp(self):
p = self.flat_child_distributions[0].sampled_action_logp()
for c in self.flat_child_distributions[1:]:
p += c.sampled_action_logp()
return p
class Dirichlet(TFActionDistribution):
"""Dirichlet distribution for continuous actions that are between
[0,1] and sum to 1.
e.g. actions that represent resource allocation."""
def __init__(self, inputs, model):
"""Input is a tensor of logits. The exponential of logits is used to
parametrize the Dirichlet distribution as all parameters need to be
positive. An arbitrary small epsilon is added to the concentration
parameters to be zero due to numerical error.
See issue #4440 for more details.
"""
self.epsilon = 1e-7
concentration = tf.exp(inputs) + self.epsilon
self.dist = tf.distributions.Dirichlet(
concentration=concentration,
validate_args=True,
allow_nan_stats=False,
)
super().__init__(concentration, model)
@override(ActionDistribution)
def logp(self, x):
# Support of Dirichlet are positive real numbers. x is already
# an array of positive numbers, but we clip to avoid zeros due to
# numerical errors.
x = tf.maximum(x, self.epsilon)
x = x / tf.reduce_sum(x, axis=-1, keepdims=True)
return self.dist.log_prob(x)
@override(ActionDistribution)
def entropy(self):
return self.dist.entropy()
@override(ActionDistribution)
def kl(self, other):
return self.dist.kl_divergence(other.dist)
@override(TFActionDistribution)
def _build_sample_op(self):
return self.dist.sample()
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(action_space, model_config):
return np.prod(action_space.shape)
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.16.14
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes_asyncio.client.api_client import ApiClient
from kubernetes_asyncio.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class DiscoveryV1alpha1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_endpoint_slice(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_endpoint_slice # noqa: E501
create an EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_endpoint_slice(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1EndpointSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1EndpointSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_namespaced_endpoint_slice_with_http_info(namespace, body, **kwargs) # noqa: E501
def create_namespaced_endpoint_slice_with_http_info(self, namespace, body, **kwargs): # noqa: E501
"""create_namespaced_endpoint_slice # noqa: E501
create an EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_endpoint_slice_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1EndpointSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1EndpointSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_endpoint_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `create_namespaced_endpoint_slice`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_namespaced_endpoint_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/discovery.k8s.io/v1alpha1/namespaces/{namespace}/endpointslices', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1EndpointSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_endpoint_slice(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_endpoint_slice # noqa: E501
delete collection of EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_endpoint_slice(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_namespaced_endpoint_slice_with_http_info(namespace, **kwargs) # noqa: E501
def delete_collection_namespaced_endpoint_slice_with_http_info(self, namespace, **kwargs): # noqa: E501
"""delete_collection_namespaced_endpoint_slice # noqa: E501
delete collection of EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_endpoint_slice_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_endpoint_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_endpoint_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/discovery.k8s.io/v1alpha1/namespaces/{namespace}/endpointslices', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_endpoint_slice(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_endpoint_slice # noqa: E501
delete an EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_endpoint_slice(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the EndpointSlice (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_namespaced_endpoint_slice_with_http_info(name, namespace, **kwargs) # noqa: E501
def delete_namespaced_endpoint_slice_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_endpoint_slice # noqa: E501
delete an EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_endpoint_slice_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the EndpointSlice (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_endpoint_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_namespaced_endpoint_slice`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `delete_namespaced_endpoint_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/discovery.k8s.io/v1alpha1/namespaces/{namespace}/endpointslices/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/discovery.k8s.io/v1alpha1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_endpoint_slice_for_all_namespaces(self, **kwargs): # noqa: E501
"""list_endpoint_slice_for_all_namespaces # noqa: E501
list or watch objects of kind EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_endpoint_slice_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored. This field is beta.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1EndpointSliceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_endpoint_slice_for_all_namespaces_with_http_info(**kwargs) # noqa: E501
def list_endpoint_slice_for_all_namespaces_with_http_info(self, **kwargs): # noqa: E501
"""list_endpoint_slice_for_all_namespaces # noqa: E501
list or watch objects of kind EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_endpoint_slice_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored. This field is beta.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1EndpointSliceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'pretty',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_endpoint_slice_for_all_namespaces" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/discovery.k8s.io/v1alpha1/endpointslices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1EndpointSliceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_endpoint_slice(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_endpoint_slice # noqa: E501
list or watch objects of kind EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_endpoint_slice(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored. This field is beta.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1EndpointSliceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_namespaced_endpoint_slice_with_http_info(namespace, **kwargs) # noqa: E501
def list_namespaced_endpoint_slice_with_http_info(self, namespace, **kwargs): # noqa: E501
"""list_namespaced_endpoint_slice # noqa: E501
list or watch objects of kind EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_endpoint_slice_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored. This field is beta.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1EndpointSliceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'namespace',
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_endpoint_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `list_namespaced_endpoint_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/discovery.k8s.io/v1alpha1/namespaces/{namespace}/endpointslices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1EndpointSliceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_endpoint_slice(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_endpoint_slice # noqa: E501
partially update the specified EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_endpoint_slice(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the EndpointSlice (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1EndpointSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_namespaced_endpoint_slice_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def patch_namespaced_endpoint_slice_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""patch_namespaced_endpoint_slice # noqa: E501
partially update the specified EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_endpoint_slice_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the EndpointSlice (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1EndpointSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_endpoint_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_namespaced_endpoint_slice`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `patch_namespaced_endpoint_slice`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_namespaced_endpoint_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/discovery.k8s.io/v1alpha1/namespaces/{namespace}/endpointslices/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1EndpointSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_endpoint_slice(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_endpoint_slice # noqa: E501
read the specified EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_endpoint_slice(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the EndpointSlice (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1EndpointSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_namespaced_endpoint_slice_with_http_info(name, namespace, **kwargs) # noqa: E501
def read_namespaced_endpoint_slice_with_http_info(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_endpoint_slice # noqa: E501
read the specified EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_endpoint_slice_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the EndpointSlice (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1EndpointSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'pretty',
'exact',
'export'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_endpoint_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_namespaced_endpoint_slice`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `read_namespaced_endpoint_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'exact' in local_var_params and local_var_params['exact'] is not None: # noqa: E501
query_params.append(('exact', local_var_params['exact'])) # noqa: E501
if 'export' in local_var_params and local_var_params['export'] is not None: # noqa: E501
query_params.append(('export', local_var_params['export'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/discovery.k8s.io/v1alpha1/namespaces/{namespace}/endpointslices/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1EndpointSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_endpoint_slice(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_endpoint_slice # noqa: E501
replace the specified EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_endpoint_slice(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the EndpointSlice (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1EndpointSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1alpha1EndpointSlice
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_namespaced_endpoint_slice_with_http_info(name, namespace, body, **kwargs) # noqa: E501
def replace_namespaced_endpoint_slice_with_http_info(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_endpoint_slice # noqa: E501
replace the specified EndpointSlice # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_endpoint_slice_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the EndpointSlice (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1alpha1EndpointSlice body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1alpha1EndpointSlice, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'namespace',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_endpoint_slice" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_namespaced_endpoint_slice`") # noqa: E501
# verify the required parameter 'namespace' is set
if self.api_client.client_side_validation and ('namespace' not in local_var_params or # noqa: E501
local_var_params['namespace'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `namespace` when calling `replace_namespaced_endpoint_slice`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_namespaced_endpoint_slice`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
if 'namespace' in local_var_params:
path_params['namespace'] = local_var_params['namespace'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/discovery.k8s.io/v1alpha1/namespaces/{namespace}/endpointslices/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1alpha1EndpointSlice', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
import random
import numpy as np
import numpy.random
from PIL import Image
from faker import Faker
from CNNScan.Ballot import BallotDefinitions, MarkedBallots, Positions
import CNNScan.Mark.Marks
# Create a single, fixed fake race with 4 candidates.
def create_fake_contest(pagenum, contest_index=0, min_candidate=1, max_candidates=8, min_xy_per_candidate=(18,16), max_xy_per_candidate=(64,16)):
min_x, min_y = min_xy_per_candidate
max_x, max_y = max_xy_per_candidate
candidate_number = random.randint(min_candidate, max_candidates)
fake = Faker()
name = fake.catch_phrase()
description = fake.text()
options = []
x_size = random.randint(min_x, max_x)
y_rolling = 0
sizes = [random.randint(min_y, max_y) for y in range(candidate_number)]
max_y_size = np.sum(sizes)
for i in range(candidate_number):
y_size = sizes[i]
rel_bound = Positions.to_percent_pos(0, y_rolling/max_y_size, 1, (y_rolling+y_size)/max_y_size, pagenum)
abs_bound = Positions.to_pixel_pos(0, y_rolling, max_x, y_rolling+y_size, pagenum)
new_option = BallotDefinitions.Option(i, fake.name(), rel_bounding_rect=(rel_bound))
new_option.abs_bounding_rect = abs_bound
options.append(new_option)
y_rolling += y_size
print(f"{candidate_number} candidates, with a ballot that is {x_size}x{y_rolling}")
abs_bound = Positions.to_pixel_pos(0,0, x_size, y_rolling, pagenum)
contest = BallotDefinitions.Contest(contest_index, name=name, description=description,
options=options, rel_bounding_rect=Positions.to_percent_pos(0,0,1,1,pagenum))
contest.abs_bounding_rect = abs_bound
return contest
def create_fake_ballot(factory, min_contests=2, max_contests=8)->BallotDefinitions.Ballot:
contests = random.randint(min_contests, max_contests)
contests_list = []
for i in range(0, contests):
current = create_fake_contest(i,contest_index=i)
contests_list.append(current)
ballot = factory.Ballot(contests_list)
return ballot
# Create random noise with a dimensions matching that of the ballot.
def create_fake_contest_image(contest):
# Pictures are stored in column major order, but numpy arrays are stored in row major order.
# Must transpose for both kinds of images to compute correctly.
# See:
# https://stackoverflow.com/questions/19016144/conversion-between-pillow-image-object-and-numpy-array-changes-dimension
# Additionally, PNG's contain data in [0, 255], so we must create an int ditribution to approximate this.
shape = (contest.abs_bounding_rect.lower_right.y, contest.abs_bounding_rect.lower_right.x)
r_data = numpy.random.randint(0,255, shape) # Test data
alpha = numpy.ndarray(shape)
alpha.fill(255)
r_data = numpy.stack((r_data, r_data, r_data, alpha), axis=2)
return Image.fromarray(r_data, mode='RGBA')
#return r_data
|
"""
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = []
y = []
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
|
from enum import Enum
import tensorflow as tf
import cv2
regularizer_conv = 0.004
regularizer_dsconv = 0.0004
batchnorm_fused = True
activation_fn = tf.nn.relu
class CocoPart(Enum):
Nose = 0
Neck = 1
RShoulder = 2
RElbow = 3
RWrist = 4
LShoulder = 5
LElbow = 6
LWrist = 7
RHip = 8
RKnee = 9
RAnkle = 10
LHip = 11
LKnee = 12
LAnkle = 13
REye = 14
LEye = 15
REar = 16
LEar = 17
Background = 18
class MPIIPart(Enum):
RAnkle = 0
RKnee = 1
RHip = 2
LHip = 3
LKnee = 4
LAnkle = 5
RWrist = 6
RElbow = 7
RShoulder = 8
LShoulder = 9
LElbow = 10
LWrist = 11
Neck = 12
Head = 13
@staticmethod
def from_coco(human):
# t = {
# MPIIPart.RAnkle: CocoPart.RAnkle,
# MPIIPart.RKnee: CocoPart.RKnee,
# MPIIPart.RHip: CocoPart.RHip,
# MPIIPart.LHip: CocoPart.LHip,
# MPIIPart.LKnee: CocoPart.LKnee,
# MPIIPart.LAnkle: CocoPart.LAnkle,
# MPIIPart.RWrist: CocoPart.RWrist,
# MPIIPart.RElbow: CocoPart.RElbow,
# MPIIPart.RShoulder: CocoPart.RShoulder,
# MPIIPart.LShoulder: CocoPart.LShoulder,
# MPIIPart.LElbow: CocoPart.LElbow,
# MPIIPart.LWrist: CocoPart.LWrist,
# MPIIPart.Neck: CocoPart.Neck,
# MPIIPart.Nose: CocoPart.Nose,
# }
t = [
(MPIIPart.Head, CocoPart.Nose),
(MPIIPart.Neck, CocoPart.Neck),
(MPIIPart.RShoulder, CocoPart.RShoulder),
(MPIIPart.RElbow, CocoPart.RElbow),
(MPIIPart.RWrist, CocoPart.RWrist),
(MPIIPart.LShoulder, CocoPart.LShoulder),
(MPIIPart.LElbow, CocoPart.LElbow),
(MPIIPart.LWrist, CocoPart.LWrist),
(MPIIPart.RHip, CocoPart.RHip),
(MPIIPart.RKnee, CocoPart.RKnee),
(MPIIPart.RAnkle, CocoPart.RAnkle),
(MPIIPart.LHip, CocoPart.LHip),
(MPIIPart.LKnee, CocoPart.LKnee),
(MPIIPart.LAnkle, CocoPart.LAnkle),
]
pose_2d_mpii = []
visibilty = []
for mpi, coco in t:
if coco.value not in human.body_parts.keys():
pose_2d_mpii.append((0, 0))
visibilty.append(False)
continue
pose_2d_mpii.append((human.body_parts[coco.value].x, human.body_parts[coco.value].y))
visibilty.append(True)
return pose_2d_mpii, visibilty
CocoPairs = [
(1, 2), (1, 5), (2, 3), (3, 4), (5, 6), (6, 7), (1, 8), (8, 9), (9, 10), (1, 11),
(11, 12), (12, 13), (1, 0), (0, 14), (14, 16), (0, 15), (15, 17), (2, 16), (5, 17)
] # = 19
CocoPairsRender = CocoPairs[:-2]
# CocoPairsNetwork = [
# (12, 13), (20, 21), (14, 15), (16, 17), (22, 23), (24, 25), (0, 1), (2, 3), (4, 5),
# (6, 7), (8, 9), (10, 11), (28, 29), (30, 31), (34, 35), (32, 33), (36, 37), (18, 19), (26, 27)
# ] # = 19
CocoColors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
def read_imgfile(path, width=None, height=None):
val_image = cv2.imread(path, cv2.IMREAD_COLOR)
if width is not None and height is not None:
val_image = cv2.resize(val_image, (width, height))
return val_image
def get_sample_images(w, h):
val_image = [
read_imgfile('./images/p1.jpg', w, h),
read_imgfile('./images/p2.jpg', w, h),
read_imgfile('./images/p3.jpg', w, h),
read_imgfile('./images/golf.jpg', w, h),
read_imgfile('./images/hand1.jpg', w, h),
read_imgfile('./images/hand2.jpg', w, h),
read_imgfile('./images/apink1_crop.jpg', w, h),
read_imgfile('./images/ski.jpg', w, h),
read_imgfile('./images/apink2.jpg', w, h),
read_imgfile('./images/apink3.jpg', w, h),
read_imgfile('./images/handsup1.jpg', w, h),
read_imgfile('./images/p3_dance.png', w, h),
]
return val_image
def to_str(s):
if not isinstance(s, str):
return s.decode('utf-8')
return s
|
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Eleccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Wallet encryption"""
import time
from test_framework.test_framework import EleccoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_greater_than,
assert_greater_than_or_equal,
)
class WalletEncryptionTest(EleccoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
passphrase = "WalletPassphrase"
passphrase2 = "SecondWalletPassphrase"
# Make sure the wallet isn't encrypted first
msg = "test message"
address = self.nodes[0].getnewaddress(address_type='legacy')
sig = self.nodes[0].signmessage(address, msg)
assert self.nodes[0].verifymessage(address, sig, msg)
assert_raises_rpc_error(-15, "Error: running with an unencrypted wallet, but walletpassphrase was called", self.nodes[0].walletpassphrase, 'ff', 1)
assert_raises_rpc_error(-15, "Error: running with an unencrypted wallet, but walletpassphrasechange was called.", self.nodes[0].walletpassphrasechange, 'ff', 'ff')
# Encrypt the wallet
assert_raises_rpc_error(-8, "passphrase can not be empty", self.nodes[0].encryptwallet, '')
self.nodes[0].encryptwallet(passphrase)
# Test that the wallet is encrypted
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].signmessage, address, msg)
assert_raises_rpc_error(-15, "Error: running with an encrypted wallet, but encryptwallet was called.", self.nodes[0].encryptwallet, 'ff')
assert_raises_rpc_error(-8, "passphrase can not be empty", self.nodes[0].walletpassphrase, '', 1)
assert_raises_rpc_error(-8, "passphrase can not be empty", self.nodes[0].walletpassphrasechange, '', 'ff')
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
sig = self.nodes[0].signmessage(address, msg)
assert self.nodes[0].verifymessage(address, sig, msg)
# Check that the timeout is right
time.sleep(3)
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].signmessage, address, msg)
# Test wrong passphrase
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
sig = self.nodes[0].signmessage(address, msg)
assert self.nodes[0].verifymessage(address, sig, msg)
self.nodes[0].walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].signmessage, address, msg)
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
sig = self.nodes[0].signmessage(address, msg)
assert self.nodes[0].verifymessage(address, sig, msg)
self.nodes[0].walletlock()
# Test timeout bounds
assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10)
self.log.info('Check a timeout less than the limit')
MAX_VALUE = 100000000
expected_time = int(time.time()) + MAX_VALUE - 600
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE - 600)
# give buffer for walletpassphrase, since it iterates over all crypted keys
expected_time_with_buffer = time.time() + MAX_VALUE - 600
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time_with_buffer, actual_time)
self.log.info('Check a timeout greater than the limit')
expected_time = int(time.time()) + MAX_VALUE - 1
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE + 1000)
expected_time_with_buffer = time.time() + MAX_VALUE
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time_with_buffer, actual_time)
if __name__ == '__main__':
WalletEncryptionTest().main()
|
"""
Shelly platform for the cover component.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/shelly/
"""
#pylint: disable=import-error
from homeassistant.components.cover import (ATTR_POSITION,
SUPPORT_CLOSE,
SUPPORT_OPEN, SUPPORT_STOP,
SUPPORT_SET_POSITION)
try:
from homeassistant.components.cover import CoverEntity
except:
from homeassistant.components.cover import \
CoverDevice as CoverEntity
from .device import ShellyDevice
from homeassistant.helpers.dispatcher import async_dispatcher_connect
#def setup_platform(hass, _config, add_devices, discovery_info=None):
# """Set up the Shelly cover platform."""
# dev = get_device_from_hass(hass, discovery_info)
# add_devices([ShellyCover(dev, instance)])
async def async_setup_entry(hass, _config_entry, async_add_entities):
"""Set up Shelly cover dynamically."""
async def async_discover_cover(dev, instance):
"""Discover and add a discovered cover."""
async_add_entities([ShellyCover(dev, instance)])
async_dispatcher_connect(
hass,
"shelly_new_cover",
async_discover_cover
)
class ShellyCover(ShellyDevice, CoverEntity):
"""Shelly cover device."""
def __init__(self, dev, instance):
"""Initialize the cover."""
ShellyDevice.__init__(self, dev, instance)
self.entity_id = "cover" + self.entity_id
self._position = None
self._last_direction = None
self._motion_state = None
self._support_position = None
self._state = None
self._master_unit = True
self.update()
@property
def should_poll(self):
"""No polling needed."""
return True
@property
def current_cover_position(self):
"""Return current position"""
if self._support_position:
return self._position
return None
@property
def is_closed(self):
"""Return if the cover is closed or not."""
if self._support_position:
return self._position == 0
return None
@property
def is_closing(self):
"""Return if the cover is closing."""
return self._motion_state == "close"
@property
def is_opening(self):
"""Return if the cover is opening."""
return self._motion_state == "open"
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
if self._support_position:
supported_features |= SUPPORT_SET_POSITION
return supported_features
def close_cover(self, **_kwargs):
"""Close the cover."""
self._dev.down()
def open_cover(self, **_kwargs):
"""Open the cover."""
self._dev.up()
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
pos = kwargs[ATTR_POSITION]
self._dev.set_position(pos)
self._position = pos
self._update_ha_state()
def stop_cover(self, **_kwargs):
"""Stop the cover."""
self._dev.stop()
def update(self):
"""Fetch new state data for this light.
This is the only method that should fetch new data for Home Assistant.
"""
self._state = self._dev.state
self._position = self._dev.position
self._last_direction = self._dev.last_direction
self._motion_state = self._dev.motion_state
self._support_position = self._dev.support_position
|
import os
import sys
import numpy as np
import matplotlib.pyplot as pp
import warnings
from datetime import datetime
#try:
# from PIL import Iamge
#except ImportError:
# print ("Python module 'PIL' not available."
# pass
class EOS1_Img:
"""Python class for analyzing spectra images taken with the EOS 1 open-source spectrometer.
Jiansheng Feng ( jfeng@uakron.edu www.erieopen.tech )
To initialize, need input:
- file name of the image
- file path (from the Python folder)
- whether to trim edge #default False
Functions:
- show_RGB
- show_hmap: requires running cal_heatmap first
- cal_heatmap
- find_ref: requires running cal_heatmap first
- norm_sam: requires running find_ref (and therefore cal_heatmap) first
- cal_I: requires running norm_sam (and therefore find_ref and cal_heatmap) first
- test_N: runs cal_heatmap, find_ref, norm_sam, cal_I; requires nitrate_calibration.csv
"""
def __init__( self, filename, filepath, trim_edge=False ):
self.filename = filename
self.filepath = filepath
self.xi = pp.imread( self.filepath+self.filename )
if self.xi.ndim != 3:
raise ValueError( "Image array dimension incorrect. Expect an RGB image." )
else:
pass
if self.xi.dtype != np.uint8:
raise ValueError( "Image array datatype incorrect. Expect an 8-bit image." )
else:
pass
self.h, self.w, self.d = self.xi.shape
if self.h < self.w:
warnings.warn( "Image appears to be landscape." )
proceed = raw_input( "Continue? (y/N): " )
if proceed=='y' or proceed=='Y':
pass
else:
raise RuntimeError( "Program terminated by user." )
else:
pass
if trim_edge == True:
self.xi = self.xi[self.h/4:self.h*3/4, self.w/4:self.w*3/4, :]
self.show_RGB( fig_dpi=100 )
proceed = raw_input( "Continue with this image? (y/N): " )
if proceed=='y' or proceed=='Y':
pass
else:
raise RuntimeError( "Restart and set 'trim_edge' to False." )
else:
pass
def show_RGB( self, fig_dpi=200 ):
pp.figure( dpi=fig_dpi )
pp.style.use( "seaborn-dark" )
pp.imshow( self.xi )
def show_hmap( self, fig_dpi=200, x_max=510 ):
pp.figure( dpi=fig_dpi )
pp.style.use( "seaborn-dark" )
pp.imshow( self.xh, cmap="gray", vmin=0, vmax=x_max )
pp.colorbar()
def cal_heatmap( self ):
self.xf = self.xi.astype( float )
self.xh = abs( self.xf[:,:,0] - self.xf[:,:,1] )
self.xh += abs( self.xf[:,:,0] - self.xf[:,:,2] )
self.xh += abs( self.xf[:,:,1] - self.xf[:,:,2] )
def find_ref( self, n=0.25 ):
n = float( n )
if n<0.1 or n>0.9:
n = 0.25
else:
pass
self.x0 = self.xh.mean( axis=0 )
self.x0thres = np.argwhere( self.x0 > self.x0.max()*n ).flatten()
self.x0diff = self.x0thres[1:] - self.x0thres[:-1]
self.x0gap = np.where( self.x0diff > 2. )[0].flatten()
if len( self.x0gap )==0:
self.l_edge, self.r_edge = self.x0thres[0], self.x0thres[-1]
else:
self.d_to_center = []
for i in self.x0gap:
self.d_to_center.append( abs( self.w/2. - self.x0thres[i:i+2].mean() ) )
self.d_min = np.argmin( self.d_to_center )
if self.d_min==0:
self.l_edge, self.r_edge = self.x0thres[0], self.x0thres[ self.x0gap[0] ]
else:
self.l_edge = self.x0thres[ self.x0gap[self.d_min-1]+1 ]
self.r_edge = self.x0thres[ self.x0gap[self.d_min] ]
self.x_ref_band = self.xh[ :, self.l_edge:self.r_edge+1 ]
self.x1 = self.x_ref_band.mean( axis=1 )
self.x1thres = np.argwhere( self.x1 > self.x1.max()*n ).flatten()
self.t_edge, self.b_edge = self.x1thres[0], self.x1thres[-1]
def norm_sam( self, bpeak_chl='r', trim_margin=True, gapcal='p' ):
self.ref_wid = self.r_edge - self.l_edge
if trim_margin == True:
self.mrg = int( self.ref_wid/10. )
else:
self.mrg = 0
self.half_hgt = int( (self.b_edge - self.t_edge)/2. )
self.x_ref = self.xi[ self.t_edge:self.b_edge,
self.l_edge+self.mrg:self.r_edge-self.mrg, : ]
self.y_ref = self.x_ref.mean( axis=1 )
if bpeak_chl == 'r':
self.peak_r = self.y_ref[:self.half_hgt,0].argmax()
self.peak_b = self.y_ref[self.half_hgt:,0].argmax()+self.half_hgt
else:
self.peak_rgb = self.y_ref.argmax( axis=0 )
self.peak_r, self.peak_b = self.peak_rgb[[0,2]]
if gapcal == 'w':
self.gap = int( self.ref_wid*0.901 )
else:
self.gap = int( ( self.peak_b-self.peak_r )*0.368 )
self.x_sam = self.xi[ self.t_edge:self.b_edge,
self.r_edge+self.gap+self.mrg:self.r_edge+self.gap+self.ref_wid-self.mrg, : ]
self.y_sam = self.x_sam.mean( axis=1 )
self.max_rgb = self.y_ref.max( axis=0 )
self.peak_px = np.array([self.peak_r, self.peak_b]).flatten()
self.peak_nm = np.array([610.65, 449.1])
self.f = np.polyfit( self.peak_px, self.peak_nm, 1 )
self.wl = np.arange(self.b_edge-self.t_edge)*self.f[0]+self.f[1]
self.y_sam_norm_r = self.y_sam[:, 0]/self.max_rgb[0]
self.y_sam_norm_g = self.y_sam[:, 1]/self.max_rgb[1]
self.y_sam_norm_b = self.y_sam[:, 2]/self.max_rgb[2]
self.y_sam_norm = np.dstack((self.y_sam_norm_r, self.y_sam_norm_g, self.y_sam_norm_b))[0]
def cal_I( self, chl='g', wl_low=525., wl_high=535. ):
if chl=='r' or chl=='R':
self.sam_arr = self.y_sam_norm_r
elif chl=='g' or chl=='G':
self.sam_arr = self.y_sam_norm_g
elif chl=='b' or chl=='B':
self.sam_arr = self.y_sam_norm_b
else:
raise ValueError( "Color channel should be 'r', 'g', or 'b'." )
arg_low = np.where( self.wl < wl_high )[0][0]
arg_high = np.where( self.wl > wl_low )[0][-1]
I_sum = self.sam_arr[arg_low:arg_high+1].sum()
I_ave = I_sum/(arg_high-arg_low+1)
return I_ave
def test_N( self, wlc=530., wlhw=5., cali="nitrate_calibration.csv" ):
wl_l = wlc-wlhw
wl_h = wlc+wlhw
self.cal_heatmap()
self.find_ref()
self.norm_sam()
I_ave = self.cal_I( wl_low=wl_l, wl_high=wl_h )
if os.path.isfile(cali) == True:
f = open( cali )
ls = f.readlines()
f.close()
cali_date = ls[0].strip().split(',')[1]
cali_time = ls[1].strip().split(',')[1]
k_N = float( ls[2].strip().split(',')[1] )
b_N = float( ls[3].strip().split(',')[1] )
print ("Using calibration record from "+cali_date+" "+cali_time)
else:
print ("Calibration record not found.")
input_kb = raw_input( "Manually input k and b values? (y/N): " )
if input_kb == 'y' or input_kb == 'Y':
k_N = float( raw_input("Please specify k_N: ") )
b_N = float( raw_input("Please specify b_N: ") )
else:
k_N = -7.8279
b_N = -0.14917
lgI = np.log10(I_ave)
nc = lgI*k_N + b_N
return nc
def cali_N( img_arr, nc_arr, f_path, wl=530., fig_out=True ):
if len(img_arr) != len(nc_arr):
raise ValueError( "img_arr and nc_arr should have the same length." )
else:
pass
nc_arr = np.array( nc_arr )
I_arr = []
for img in img_arr:
eos = EOS1_Img( img, f_path )
eos.cal_heatmap()
eos.find_ref()
eos.norm_sam()
I_arr.append( eos.cal_I('g', wl-5., wl+5.) )
I_arr = np.array( I_arr )
lgI = np.log10(I_arr)
k, b = np.polyfit( lgI, nc_arr, 1 )
cali_rec = "nitrate_calibration.csv"
if os.path.isfile( cali_rec ) == True:
print ("Calibration record '"+cali_rec+"' already exist.")
proceed = raw_input("Overwrite this record? (Y/n): ")
if proceed == 'n' or proceed == 'N':
cali_rec = raw_input("File name of new record (including file extension): ")
else:
pass
else:
pass
print ("Writing to calibration record: "+cali_rec+'.')
f = open( cali_rec, 'w' )
day_str, time_str = str( datetime.now() ).split()
time_str = time_str.split('.')[0]
f.write("date,"+day_str+'\n' )
f.write("time,"+time_str+'\n' )
f.write("k,"+str(round(k, 5))+'\n' )
f.write("b,"+str(round(b, 5))+'\n' )
f.close()
print ("Done writing to calibration record.")
if fig_out == True:
Ab_arr = (-1.)*lgI
kf, bf = np.polyfit( nc_arr, Ab_arr, 1 )
pp.style.use( "seaborn-darkgrid" )
pp.figure( dpi=150 )
pp.plot( nc_arr, Ab_arr, 'k.', label="Calibration Data" )
pp.plot( nc_arr, nc_arr*kf+bf, 'k-', label="Linear Fit" )
pp.xlabel( "Nitrate Concentration (mg/L)", size=12)
pp.ylabel( "Absorbance ("+str(wl-5)+"nm $-$ "+str(wl+5)+"nm)", size=12 )
pp.legend( loc="upper left" )
pp.show()
else:
pass
if __name__ == "__main__":
print ("###############")
print ("This script is written for Python v2.7.15")
print ("You are using Python "+str( sys.version ))
print ("###############")
print ('\n')
print ("Please make sure: ")
print ("-1- The picture is portrait (i.e., height > width).")
print ("-2- The reference spectrum is on the left-hand-side.")
print ("-3- The images are 8-bit RGB images (usually .jpg or .tif, but not .png).")
print ('\n')
update_cali = raw_input( "Generate or update calibration record? (y/N): " )
if update_cali=='y' or update_cali=='Y':
img_path = raw_input( "Please specify the path to the images: " )
img_list_str = raw_input( "Please list images, separated by commas: " )
nc_list_str = raw_input( "Please list concentrations, separated by commas: " )
wavelength = float( raw_input( "Please specify wavelength (nm): " ) )
img_list, nc_list = [], []
for img_str in img_list_str.strip().split(','):
img_list.append( img_str.strip() )
for nc_str in nc_list_str.strip().split(','):
nc_list.append( float( nc_str.strip() ) )
cali_N( img_list, nc_list, img_path, wavelength )
else:
pass
meas_N = raw_input( "Measure nitrate? (y/N): " )
if meas_N=='y' or meas_N=='Y':
file_path = raw_input( "Please input image file path: " )
file_name = raw_input( "Please input image file name: " )
wavelength = float( raw_input( "Please input wavelength (nm): " ) )
eos = EOS1_Img( file_name, file_path )
nc = eos.test_N( wlc=wavelength )
print ("Nitrate Concentration: "+str(round(nc, 2))+" mg/L" )
else:
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import subprocess
import sys
def initial_setup():
# Necessary for install programs and edit protected files.
if os.geteuid() != 0:
print("Script must need root access")
subprocess.call(['sudo', 'python3', *sys.argv])
# Install pip for python3
subprocess.call("sudo apt-get install -y python3-distutils", shell=True)
subprocess.call("wget https://bootstrap.pypa.io/get-pip.py", shell=True)
subprocess.call("sudo python3 get-pip.py", shell=True)
subprocess.call("rm get-pip.py", shell=True)
# install virtualenv and create one
subprocess.call("sudo pip install virtualenv", shell=True)
subprocess.call("sudo pip install cryptography --upgrade", shell=True)
subprocess.call("virtualenv --system-site-packages venv", shell=True)
subprocess.call("venv/bin/pip install --editable .", shell=True)
# Creates an alias for use the script
subprocess.call("echo 'alias ansible_tools=$(pwd)/venv/bin/ansible_tools'' >> ~/.bashrc",
shell=True)
subprocess.call("source ~/.bashrc", shell=True)
# Maybe use ansible for now on to install and configure files
# Install openssh-server
subprocess.call("sudo apt install openssh-server", shell=True)
filename = '/etc/ssh/sshd_config'
subprocess.call("sudo cp {0} {0}.original".format(filename), shell=True)
with open(filename, 'a') as file:
file.write("Port 22\n")
file.close()
subprocess.call("sudo systemctl restart sshd.service", shell=True)
# TODO: Include a self-kill option for clean up all the repo. For slave machines only.
if __name__ == "__main__":
initial_setup()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .acl_create_or_update_parameters_py3 import AclCreateOrUpdateParameters
from .acl_delete_parameters_py3 import AclDeleteParameters
from .acl_py3 import Acl
from .data_lake_analytics_catalog_secret_create_or_update_parameters_py3 import DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters
from .data_lake_analytics_catalog_credential_create_parameters_py3 import DataLakeAnalyticsCatalogCredentialCreateParameters
from .data_lake_analytics_catalog_credential_delete_parameters_py3 import DataLakeAnalyticsCatalogCredentialDeleteParameters
from .data_lake_analytics_catalog_credential_update_parameters_py3 import DataLakeAnalyticsCatalogCredentialUpdateParameters
from .usql_secret_py3 import USqlSecret
from .usql_external_data_source_py3 import USqlExternalDataSource
from .usql_credential_py3 import USqlCredential
from .usql_procedure_py3 import USqlProcedure
from .usql_table_column_py3 import USqlTableColumn
from .usql_directed_column_py3 import USqlDirectedColumn
from .usql_distribution_info_py3 import USqlDistributionInfo
from .usql_index_py3 import USqlIndex
from .ddl_name_py3 import DdlName
from .entity_id_py3 import EntityId
from .external_table_py3 import ExternalTable
from .type_field_info_py3 import TypeFieldInfo
from .usql_table_preview_py3 import USqlTablePreview
from .usql_table_py3 import USqlTable
from .usql_table_fragment_py3 import USqlTableFragment
from .usql_table_type_py3 import USqlTableType
from .usql_view_py3 import USqlView
from .usql_package_py3 import USqlPackage
from .usql_table_partition_py3 import USqlTablePartition
from .usql_table_statistics_py3 import USqlTableStatistics
from .usql_type_py3 import USqlType
from .usql_table_valued_function_py3 import USqlTableValuedFunction
from .usql_assembly_file_info_py3 import USqlAssemblyFileInfo
from .usql_assembly_dependency_info_py3 import USqlAssemblyDependencyInfo
from .usql_assembly_py3 import USqlAssembly
from .usql_assembly_clr_py3 import USqlAssemblyClr
from .usql_schema_py3 import USqlSchema
from .usql_database_py3 import USqlDatabase
from .catalog_item_py3 import CatalogItem
from .catalog_item_list_py3 import CatalogItemList
except (SyntaxError, ImportError):
from .acl_create_or_update_parameters import AclCreateOrUpdateParameters
from .acl_delete_parameters import AclDeleteParameters
from .acl import Acl
from .data_lake_analytics_catalog_secret_create_or_update_parameters import DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters
from .data_lake_analytics_catalog_credential_create_parameters import DataLakeAnalyticsCatalogCredentialCreateParameters
from .data_lake_analytics_catalog_credential_delete_parameters import DataLakeAnalyticsCatalogCredentialDeleteParameters
from .data_lake_analytics_catalog_credential_update_parameters import DataLakeAnalyticsCatalogCredentialUpdateParameters
from .usql_secret import USqlSecret
from .usql_external_data_source import USqlExternalDataSource
from .usql_credential import USqlCredential
from .usql_procedure import USqlProcedure
from .usql_table_column import USqlTableColumn
from .usql_directed_column import USqlDirectedColumn
from .usql_distribution_info import USqlDistributionInfo
from .usql_index import USqlIndex
from .ddl_name import DdlName
from .entity_id import EntityId
from .external_table import ExternalTable
from .type_field_info import TypeFieldInfo
from .usql_table_preview import USqlTablePreview
from .usql_table import USqlTable
from .usql_table_fragment import USqlTableFragment
from .usql_table_type import USqlTableType
from .usql_view import USqlView
from .usql_package import USqlPackage
from .usql_table_partition import USqlTablePartition
from .usql_table_statistics import USqlTableStatistics
from .usql_type import USqlType
from .usql_table_valued_function import USqlTableValuedFunction
from .usql_assembly_file_info import USqlAssemblyFileInfo
from .usql_assembly_dependency_info import USqlAssemblyDependencyInfo
from .usql_assembly import USqlAssembly
from .usql_assembly_clr import USqlAssemblyClr
from .usql_schema import USqlSchema
from .usql_database import USqlDatabase
from .catalog_item import CatalogItem
from .catalog_item_list import CatalogItemList
from .usql_credential_paged import USqlCredentialPaged
from .usql_external_data_source_paged import USqlExternalDataSourcePaged
from .usql_procedure_paged import USqlProcedurePaged
from .usql_table_fragment_paged import USqlTableFragmentPaged
from .usql_table_paged import USqlTablePaged
from .usql_table_statistics_paged import USqlTableStatisticsPaged
from .usql_table_type_paged import USqlTableTypePaged
from .usql_package_paged import USqlPackagePaged
from .usql_view_paged import USqlViewPaged
from .usql_table_partition_paged import USqlTablePartitionPaged
from .usql_type_paged import USqlTypePaged
from .usql_table_valued_function_paged import USqlTableValuedFunctionPaged
from .usql_assembly_clr_paged import USqlAssemblyClrPaged
from .usql_schema_paged import USqlSchemaPaged
from .acl_paged import AclPaged
from .usql_database_paged import USqlDatabasePaged
from .data_lake_analytics_catalog_management_client_enums import (
AclType,
PermissionType,
FileType,
)
__all__ = [
'AclCreateOrUpdateParameters',
'AclDeleteParameters',
'Acl',
'DataLakeAnalyticsCatalogSecretCreateOrUpdateParameters',
'DataLakeAnalyticsCatalogCredentialCreateParameters',
'DataLakeAnalyticsCatalogCredentialDeleteParameters',
'DataLakeAnalyticsCatalogCredentialUpdateParameters',
'USqlSecret',
'USqlExternalDataSource',
'USqlCredential',
'USqlProcedure',
'USqlTableColumn',
'USqlDirectedColumn',
'USqlDistributionInfo',
'USqlIndex',
'DdlName',
'EntityId',
'ExternalTable',
'TypeFieldInfo',
'USqlTablePreview',
'USqlTable',
'USqlTableFragment',
'USqlTableType',
'USqlView',
'USqlPackage',
'USqlTablePartition',
'USqlTableStatistics',
'USqlType',
'USqlTableValuedFunction',
'USqlAssemblyFileInfo',
'USqlAssemblyDependencyInfo',
'USqlAssembly',
'USqlAssemblyClr',
'USqlSchema',
'USqlDatabase',
'CatalogItem',
'CatalogItemList',
'USqlCredentialPaged',
'USqlExternalDataSourcePaged',
'USqlProcedurePaged',
'USqlTableFragmentPaged',
'USqlTablePaged',
'USqlTableStatisticsPaged',
'USqlTableTypePaged',
'USqlPackagePaged',
'USqlViewPaged',
'USqlTablePartitionPaged',
'USqlTypePaged',
'USqlTableValuedFunctionPaged',
'USqlAssemblyClrPaged',
'USqlSchemaPaged',
'AclPaged',
'USqlDatabasePaged',
'AclType',
'PermissionType',
'FileType',
]
|
"""
The main file used to train student and teacher models. Mainly based on [GitHub repository](https://github.com/intersun/PKD-for-BERT-Model-Compression) for [Patient Knowledge Distillation for BERT Model Compression](https://arxiv.org/abs/1908.09355).
"""
import logging
import os
import random
import pickle
import numpy as np
import torch
from torch.utils.data import RandomSampler, SequentialSampler
from tqdm import tqdm, trange
import torch.nn as nn
from BERT.pytorch_pretrained_bert.modeling import BertConfig
from BERT.pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from BERT.pytorch_pretrained_bert.tokenization import BertTokenizer
from BERT.pytorch_pretrained_bert.quantization_modules import calculate_next_quantization_parts
from utils.argument_parser import default_parser, get_predefine_argv, complete_argument
from utils.nli_data_processing import processors, output_modes
from utils.data_processing import init_model, get_task_dataloader
from utils.modeling import BertForSequenceClassificationEncoder, FCClassifierForSequenceClassification, FullFCClassifierForSequenceClassification
from utils.utils import load_model, count_parameters, eval_model_dataloader_nli, eval_model_dataloader, compute_metrics, load_model_finetune
from utils.KD_loss import distillation_loss, patience_loss
from envs import HOME_DATA_FOLDER
from BERT.pytorch_pretrained_bert.quantization_modules import quantization
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
#########################################################################
# Prepare Parser
##########################################################################
parser = default_parser()
DEBUG = True
logger.info("IN CMD MODE")
args = parser.parse_args()
# The code might not be clean in the current form.
train_seed_fixed = args.train_seed
saving_criterion_acc_fixed = args.saving_criterion_acc
saving_criterion_loss_fixed = args.saving_criterion_loss
train_batch_size_fixed = args.train_batch_size
eval_batch_size_fixed = args.eval_batch_size
model_type_fixed = args.model_type
save_model_dir_fixed = args.save_model_dir
output_dir_fixed = args.output_dir
load_model_dir_fixed = args.load_model_dir
layer_initialization_fixed = args.layer_initialization
freeze_layer_fixed = args.freeze_layer
fp16_fixed = args.fp16
learning_rate_fixed = args.learning_rate
teacher_prediction_fixed = args.teacher_prediction
#teacher_num = args.teacher_numb
task_name_fixed = args.task
if DEBUG:
logger.info("IN DEBUG MODE")
argv = get_predefine_argv(args, 'glue', args.task, args.train_type, args.student_hidden_layers)
try:
args = parser.parse_args(argv)
except NameError:
raise ValueError('please uncomment one of option above to start training')
else:
logger.info("IN CMD MODE")
args = parser.parse_args()
args.output_dir = output_dir_fixed
if load_model_dir_fixed is not None:
args.load_model_dir = load_model_dir_fixed
args = complete_argument(args, args.output_dir, args.load_model_dir)
if train_seed_fixed is not None:
args.train_seed = train_seed_fixed
if saving_criterion_acc_fixed is not None:
args.saving_criterion_acc = saving_criterion_acc_fixed
if saving_criterion_loss_fixed is not None:
args.saving_criterion_loss = saving_criterion_loss_fixed
if train_batch_size_fixed is not None:
args.train_batch_size = train_batch_size_fixed
if eval_batch_size_fixed is not None:
args.eval_batch_size = eval_batch_size_fixed
if save_model_dir_fixed is not None:
args.save_model_dir = save_model_dir_fixed
if args.load_model_dir is not None:
args.encoder_checkpoint = args.load_model_dir
if task_name_fixed is not None:
args.task_name = task_name_fixed
args.task = task_name_fixed
if layer_initialization_fixed is not None:
args.layer_initialization = layer_initialization_fixed
if freeze_layer_fixed is not None:
args.freeze_layer = freeze_layer_fixed
if fp16_fixed is not None:
args.fp16 = fp16_fixed
if learning_rate_fixed is not None:
args.learning_rate = learning_rate_fixed
if teacher_prediction_fixed is not None:
args.teacher_prediction = teacher_prediction_fixed
args.model_type = model_type_fixed
args.raw_data_dir = os.path.join(HOME_DATA_FOLDER, 'data_raw', args.task_name)
args.feat_data_dir = os.path.join(HOME_DATA_FOLDER, 'data_feat', args.task_name)
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
logger.info('actual batch size on all GPU = %d' % args.train_batch_size)
device, n_gpu = args.device, args.n_gpu
###################################################################################################################################
random.seed(args.train_seed)
np.random.seed(args.train_seed)
torch.manual_seed(args.train_seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.train_seed)
if args.model_type == 'Original':
if args.student_hidden_layers == 3:
args.fc_layer_idx = '1,3'
elif args.student_hidden_layers == 6:
args.fc_layer_idx = '1,3,5,7,9'
logger.info('Input Argument Information')
args_dict = vars(args)
for a in args_dict:
logger.info('%-28s %s' % (a, args_dict[a]))
#########################################################################
# Prepare Data
##########################################################################
task_name = args.task_name.lower()
if task_name not in processors and 'race' not in task_name:
raise ValueError("Task not found: %s" % (task_name))
if 'race' in task_name:
pass
else:
processor = processors[task_name]()
output_mode = output_modes[task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=True)
if args.do_train:
train_sampler = SequentialSampler if DEBUG else RandomSampler
read_set = 'train'
if args.teacher_prediction is not None and args.alpha > 0:
logger.info('loading teacher\'s prediction')
teacher_predictions = pickle.load(open(args.teacher_prediction, 'rb'))['train'] if args.teacher_prediction is not None else None
#teacher_predictions = pickle.load(open(args.real_teacher, 'rb'))['train'] if args.real_teacher is not None else logger.info("shibal")
logger.info('teacher acc = %.2f, teacher loss = %.5f' % (teacher_predictions['acc']*100, teacher_predictions['loss']))
teacher_predictions_ = pickle.load(open(args.teacher_prediction, 'rb'))['dev'] if args.teacher_prediction is not None else None
#teacher_predictions_ = pickle.load(open(args.real_teacher, 'rb'))['dev'] if args.real_teacher is not None else None
logger.info('teacher acc = %.2f, teacher loss = %.5f' % (teacher_predictions_['acc']*100, teacher_predictions_['loss']))
if args.kd_model == 'kd':
train_examples, train_dataloader, _ = get_task_dataloader(task_name, read_set, tokenizer, args, SequentialSampler,
batch_size=args.train_batch_size,
knowledge=teacher_predictions['pred_logit'])
else:
train_examples, train_dataloader, _ = get_task_dataloader(task_name, read_set, tokenizer, args, SequentialSampler,
batch_size=args.train_batch_size,
knowledge=teacher_predictions['pred_logit'],
extra_knowledge=teacher_predictions['feature_maps'])
else:
if args.alpha > 0:
raise ValueError('please specify teacher\'s prediction file for KD training')
logger.info('runing simple fine-tuning because teacher\'s prediction is not provided')
train_examples, train_dataloader, _ = get_task_dataloader(task_name, read_set, tokenizer, args, SequentialSampler,
batch_size=args.train_batch_size)
num_train_optimization_steps = int(len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
# Run prediction for full data
eval_examples, eval_dataloader, eval_label_ids = get_task_dataloader(task_name, 'dev', tokenizer, args, SequentialSampler, batch_size=args.eval_batch_size)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
# if args.do_eval:
# test_examples, test_dataloader, test_label_ids = get_task_dataloader(task_name, 'test', tokenizer, args, SequentialSampler, batch_size=args.eval_batch_size)
# logger.info("***** Running evaluation *****")
# logger.info(" Num examples = %d", len(test_examples))
# logger.info(" Batch size = %d", args.eval_batch_size)
#########################################################################
# Prepare model
#########################################################################
student_config = BertConfig(os.path.join(args.bert_model, 'bert_config.json'))
if args.kd_model.lower() in ['kd', 'kd.cls', 'kd.u', 'kd.i']:
logger.info('using normal Knowledge Distillation')
output_all_layers = (args.kd_model.lower() in ['kd.cls', 'kd.u', 'kd.i'])
# if original model
if args.model_type == 'Original':
student_encoder, student_classifier = init_model(task_name, output_all_layers, args.student_hidden_layers, student_config)
n_student_layer = len(student_encoder.bert.encoder.layer)
layer_initialization = args.layer_initialization.split(',')
for i in range(len(layer_initialization)):
layer_initialization[i] = int(layer_initialization[i])
args.encoder_checkpoint = '/home/ikhyuncho23/NL_BERT/data/outputs/KD/TinyBERT/tinybert_pytorch_model.bin'
student_encoder = load_model_finetune(student_encoder, layer_initialization, args.encoder_checkpoint, args, 'student', verbose= True)
logger.info('*' * 77)
student_classifier = load_model(student_classifier, args.cls_checkpoint, args, 'classifier', verbose= True)
elif args.kd_model.lower() == 'kd.full':
logger.info('using FULL Knowledge Distillation')
layer_idx = [int(i) for i in args.fc_layer_idx.split(',')]
num_fc_layer = len(layer_idx)
if args.weights is None or args.weights.lower() in ['none']:
weights = np.array([1] * (num_fc_layer-1) + [num_fc_layer-1]) / 2 / (num_fc_layer-1)
else:
weights = [float(w) for w in args.weights.split(',')]
weights = np.array(weights) / sum(weights)
assert len(weights) == num_fc_layer, 'number of weights and number of FC layer must be equal to each other'
# weights = torch.tensor(np.array([1, 1, 1, 1, 2, 6])/12, dtype=torch.float, device=device, requires_grad=False)
if args.fp16:
weights = weights.half()
student_encoder = BertForSequenceClassificationEncoder(student_config, output_all_encoded_layers=True,
num_hidden_layers=args.student_hidden_layers,
fix_pooler=True)
n_student_layer = len(student_encoder.bert.encoder.layer)
student_encoder = load_model(student_encoder, args.encoder_checkpoint, args, 'student', verbose=True)
logger.info('*' * 77)
student_classifier = FullFCClassifierForSequenceClassification(student_config, num_labels, student_config.hidden_size,
student_config.hidden_size, 6)
student_classifier = load_model(student_classifier, args.cls_checkpoint, args, 'exact', verbose=True)
assert max(layer_idx) <= n_student_layer - 1, 'selected FC layer idx cannot exceed the number of transformers'
else:
raise ValueError('%s KD not found, please use kd or kd.full' % args.kd)
n_param_student = count_parameters(student_encoder) + count_parameters(student_classifier)
logger.info('number of layers in student model = %d' % n_student_layer)
logger.info('num parameters in student model are %d and %d' % (count_parameters(student_encoder), count_parameters(student_classifier)))
#########################################################################
# Prepare optimizer
#########################################################################
if task_name == 'rte':
log_per_step = 1
elif task_name == 'mrpc':
log_per_step = 1
elif task_name == 'cola':
log_per_step = 10
elif task_name == 'sst-2':
log_per_step = 10
else:
log_per_step = 200
if args.do_train:
param_optimizer = list(student_encoder.named_parameters()) + list(student_classifier.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
logger.info('FP16 activate, use apex FusedAdam')
try:
from apex.contrib.optimizers import FP16_Optimizer
from apex.contrib.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
logger.info('FP16 is not activated, use BertAdam')
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
#########################################################################
# Model Training
#########################################################################
if args.do_train:
global_step = 0
nb_tr_steps = 0
tr_loss = 0
student_encoder.train()
student_classifier.train()
log_train = open(os.path.join(args.output_dir, 'train_log.txt'), 'w', buffering=1)
log_eval = open(os.path.join(args.output_dir, 'eval_log.txt'), 'w', buffering=1)
print('epoch,global_steps,step,acc,loss,kd_loss,ce_loss,AT_loss', file=log_train)
print('epoch,acc,loss', file=log_eval)
eval_loss_min = 100
eval_best_acc = 0
eval_best_acc_and_f1 = 0
eval_best_f1 = 0
loss_acc = 0
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss, tr_ce_loss, tr_kd_loss, tr_acc = 0, 0, 0, 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
student_encoder.train()
student_classifier.train()
batch = tuple(t.to(device) for t in batch)
if args.alpha == 0:
input_ids, input_mask, segment_ids, label_ids = batch
teacher_pred, teacher_patience = None, None
else:
if args.kd_model == 'kd':
input_ids, input_mask, segment_ids, label_ids, teacher_pred = batch
teacher_patience = None
else:
input_ids, input_mask, segment_ids, label_ids, teacher_pred, teacher_patience = batch
if args.fp16:
teacher_patience = teacher_patience.half()
if args.fp16:
teacher_pred = teacher_pred.half()
full_output, pooled_output = student_encoder(input_ids, segment_ids, input_mask)
if args.kd_model.lower() in['kd', 'kd.cls']:
logits_pred_student = student_classifier(pooled_output)
if args.kd_model.lower() == 'kd.cls':
student_patience = torch.stack(full_output[:-1]).transpose(0,1)
else:
student_patience = None
elif args.kd_model.lower() == 'kd.full':
logits_pred_student = student_classifier(full_output, weights, layer_idx)
else:
raise ValueError(f'{args.kd_model} not implemented yet')
loss_dl, kd_loss, ce_loss = distillation_loss(logits_pred_student, label_ids, teacher_pred, T=args.T, alpha=args.alpha)
if args.beta > 0:
if student_patience.shape[0] != input_ids.shape[0]:
# For RACE
n_layer = student_patience.shape[1]
student_patience = student_patience.transpose(0, 1).contiguous().view(n_layer, input_ids.shape[0], -1).transpose(0,1)
pt_loss = args.beta * patience_loss(teacher_patience, student_patience, args.normalize_patience)
loss = loss_dl + pt_loss
else:
pt_loss = torch.tensor(0.0)
loss = loss_dl
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
n_sample = input_ids.shape[0]
tr_loss += loss.item() * n_sample
if isinstance(kd_loss, float):
tr_kd_loss += kd_loss * n_sample
else:
tr_kd_loss += kd_loss.item() * n_sample
tr_ce_loss += ce_loss.item() * n_sample
tr_loss_pt = pt_loss.item() * n_sample
pred_cls = logits_pred_student.data.max(1)[1]
tr_acc += pred_cls.eq(label_ids).sum().cpu().item()
nb_tr_examples += n_sample
nb_tr_steps += 1
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
lr_this_step = args.learning_rate * warmup_linear(global_step / num_train_optimization_steps,
args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
else:
lr_this_step = args.learning_rate * warmup_linear(global_step / num_train_optimization_steps, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.num_train_epochs == 10:
if (epoch == 8):
logger.info("*"*77)
logger.info("Best Acc: "+str(eval_best_acc)+", Best Loss: "+str(eval_loss_min))
if task_name == 'mrpc':
logger.info("Best acc and f1: "+str(eval_best_acc_and_f1))
logger.info("*"*77)
raise ValueError("Skipping the rest.")
elif args.num_train_epochs == 4:
if (epoch == 4):
logger.info("*"*77)
logger.info("Best Acc: "+str(eval_best_acc)+", Best Loss: "+str(eval_loss_min))
if task_name == 'mrpc':
logger.info("Best acc and f1: "+str(eval_best_acc_and_f1))
logger.info("*"*77)
raise ValueError("Skipping the rest.")
#Validate the model on dev set every log_per_step and save the model if criterion is met.
if (global_step % log_per_step == 0) & (epoch > 0):
if 'race' in task_name:
result = eval_model_dataloader_nli(student_encoder, student_classifier, eval_dataloader, device, False)
else:
test_res = eval_model_dataloader_nli(args.task_name.lower(), eval_label_ids, student_encoder, student_classifier, eval_dataloader, args.kd_model, num_labels, device, args.weights, args.fc_layer_idx, output_mode)
# Saving checkpoints when the conditions below are met.
if task_name == 'cola':
if test_res['mcc'] > eval_best_acc:
logger.info("")
logger.info('='*77)
logger.info("Validation mcc improved! "+str(eval_best_acc)+" -> "+str(test_res['mcc']))
logger.info('='*77)
eval_best_acc = test_res['mcc']
if eval_best_acc > args.saving_criterion_acc:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc.pkl'))
logger.info("Saving the model...")
if test_res['eval_loss']< eval_loss_min:
logger.info("")
logger.info('='*77)
logger.info("Validation Loss improved! "+str(eval_loss_min)+" -> "+str(test_res['eval_loss']))
logger.info('='*77)
eval_loss_min = test_res['eval_loss']
if eval_loss_min < args.saving_criterion_loss:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_loss.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_loss.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_loss.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_loss.pkl'))
logger.info("Saving the model...")
elif task_name == 'mrpc':
if test_res['f1'] > eval_best_acc:
logger.info("")
logger.info('='*77)
logger.info("Validation f1 improved! "+str(eval_best_acc)+" -> "+str(test_res['f1']))
logger.info('='*77)
eval_best_acc = test_res['f1']
print("ACC= "+str(test_res['acc']))
if eval_best_acc > args.saving_criterion_acc:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc.pkl'))
logger.info("Saving the model...")
if test_res['acc_and_f1'] > eval_best_acc_and_f1:
logger.info("")
logger.info('='*77)
logger.info("Validation acc_and_f1 improved! "+str(eval_best_acc_and_f1)+" -> "+str(test_res['acc_and_f1']))
logger.info('='*77)
eval_best_acc_and_f1 = test_res['acc_and_f1']
logger.info("ACC= "+str(test_res['acc']))
logger.info("f1= "+str(test_res['f1']))
if eval_best_acc_and_f1 > args.saving_criterion_acc:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc_and_f1.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc_and_f1.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc_and_f1.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc_and_f1.pkl'))
logger.info("Saving the model...")
if test_res['eval_loss']< eval_loss_min:
logger.info("")
logger.info('='*77)
logger.info("Validation Loss improved! "+str(eval_loss_min)+" -> "+str(test_res['eval_loss']))
logger.info('='*77)
eval_loss_min = test_res['eval_loss']
print("ACC= "+str(test_res['acc']))
if eval_loss_min < args.saving_criterion_loss:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_loss.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_loss.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_loss.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_loss.pkl'))
logger.info("Saving the model...")
else:
if test_res['acc'] > eval_best_acc:
logger.info("")
logger.info('='*77)
logger.info("Validation acc improved! "+str(eval_best_acc)+" -> "+str(test_res['acc']))
logger.info('='*77)
eval_best_acc = test_res['acc']
if eval_best_acc > args.saving_criterion_acc:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc.pkl'))
logger.info("Saving the model...")
if test_res['eval_loss']< eval_loss_min:
logger.info("")
logger.info('='*77)
logger.info("Validation Loss improved! "+str(eval_loss_min)+" -> "+str(test_res['eval_loss']))
logger.info('='*77)
eval_loss_min = test_res['eval_loss']
if eval_loss_min < args.saving_criterion_loss:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_loss.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_loss.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_loss.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_loss.pkl'))
logger.info("Saving the model...")
logger.info("")
logger.info('='*77)
logger.info("Validation Accuracy : "+str(eval_best_acc)+" Validation Loss : "+str(eval_loss_min))
logger.info("The seed is : "+str(args.seed))
logger.info('='*77)
|
##############################################################################
from pandas import DataFrame
import numpy as np
class Analyzer:
"""This class create reports from the datasets"""
@classmethod
def _analyze_header(cls, header_frame, header, data_type):
"""Create an analysis report for the given header."""
report = {}
report['header'] = header
report['expected_type'] = str(data_type)
type_stats = {}
value_stats = {}
for value in header_frame:
cls._update_stats(type_stats, str(type(value)))
cls._update_stats(value_stats, value)
report['type_stats'] = type_stats
report['value_stats'] = value_stats
return report
@staticmethod
def _update_stats(stats, val):
"""Update the count of the value val in the given stats dict"""
if val in stats:
stats[val]+=1
else:
stats[val]=1
@staticmethod
def create_general_report(dataframe):
"""Returns a general report of all dataframe's numeric headers"""
return dataframe.describe()
@classmethod
def create_header_reports(cls, dataframe, hashmap):
"""Create and return reports for each header of the given dataframe
using the hashmap param. The hashmap is a dict whose keys are strings
(representing header names) and values are data types.
"""
headers = dataframe.columns
analysis = []
for header, data_type in hashmap.iteritems():
if header in headers:
header_frame = dataframe[header]
header_analysis = cls._analyze_header(header_frame, header,
data_type)
analysis.append(header_analysis)
return analysis
|
from ._paths import *
from ._pscmanipulate import *
|
# -*- coding: utf-8 -*-
"""
Django settings for MAD Web project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import json
import os
import datetime
import environ
from django.core.exceptions import ImproperlyConfigured
ROOT_DIR = environ.Path(__file__) - 3 # (texaslan/config/settings/common.py - 3 = texaslan/)
APPS_DIR = ROOT_DIR.path('texaslan')
# JSON-based config file
# ------------------------------------------------------------------------------
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, 'config.json')) as file:
config = json.loads(file.read())
def get_config(key, config=config):
try:
return config[key]
except KeyError:
error_message = "Set the {0} config variable".format(key)
raise ImproperlyConfigured(error_message)
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'oauth2_provider', # OAuth Provider
'django_slack_oauth', # Slack
'rest_framework', # Django REST framework
)
# Apps specific for this project go here.
LOCAL_APPS = (
'texaslan',
# custom users app
'texaslan.users.apps.UsersConfig',
# Your stuff: custom apps go here
'texaslan.events.apps.EventsConfig',
'texaslan.go.apps.GoConfig',
'texaslan.notify.apps.NotifyConfig',
'texaslan.comments.apps.CommentsConfig',
'texaslan.applications.apps.ApplicationsConfig',
'texaslan.voting.apps.VotingConfig',
'texaslan.site_settings.apps.SiteSettingsConfig'
)
CLEANUP_APP = (
# has to go last in order to work
'django_cleanup',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS + CLEANUP_APP
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'oauth2_provider.middleware.OAuth2TokenMiddleware',
'texaslan.api.middleware.JWTAuthMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'texaslan.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = get_config('DEBUG')
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = ()
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_WEBMASTER = 'drewiswaycool@gmail.com'
DEFAULT_FROM_EMAIL = 'LAN <noreply@www.texaslan.org>'
EMAIL_SUBJECT_PREFIX = '[LAN] '
SERVER_EMAIL = DEFAULT_FROM_EMAIL
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Drew Romanyk""", 'drew.romanyk@utexas.edu'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': get_config("DATABASE")
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
'texaslan.photos.context_processor.photos_url'
],
},
},
]
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'oauth2_provider.backends.OAuth2Backend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_SIGNUP_FORM_CLASS = 'texaslan.users.forms.UserSignupForm'
ACCOUNT_ADAPTER = 'texaslan.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'texaslan.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# django-compressor
# ------------------------------------------------------------------------------
# Sendgrid
# ------------------------------------------------------------------------------
SENDGRID_API_KEY = get_config("SENDGRID_API_KEY")
SENDGRID_MAILING_LIST_ID = get_config("SENDGRID_MAILING_LIST_ID")
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# Slack
SLACK_CLIENT_ID = get_config("SLACK_CLIENT_ID")
SLACK_CLIENT_SECRET = get_config("SLACK_CLIENT_SECRET")
SLACK_SCOPE = 'files:read,files:write:user,users:read'
SLACK_SUCCESS_REDIRECT_URL = '/users/~update/'
SLACK_PIPELINES = [
'texaslan.slack.pipelines.on_success.register_token',
]
# Photos
PHOTOS_DRIVE_FOLDER_URL = get_config("PHOTOS_DRIVE_FOLDER_URL")
# Django Rest JWT
JWT_AUTH = {
'JWT_RESPONSE_PAYLOAD_HANDLER': 'texaslan.utils.utils.jwt_response_payload_format',
'JWT_ALLOW_REFRESH': True,
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=90),
}
|
import re
import jsonschema
import jwt
from config import db, vuln_app
from api_views.json_schemas import *
from flask import jsonify, Response, request, json
from models.user_model import User
# from app import vuln
def error_message_helper(msg):
return '{ "status": "fail", "message": "' + msg + '"}'
def get_all_users():
return_value = jsonify({'users': User.get_all_users()})
return return_value
def debug():
return_value = jsonify({'users': User.get_all_users_debug()})
return return_value
def get_by_username(username):
if User.get_user(username):
return Response(str(User.get_user(username)), 200, mimetype="application/json")
else:
return Response(error_message_helper("User not found"), 404, mimetype="application/json")
def register_user():
request_data = request.get_json()
# check if user already exists
user = User.query.filter_by(username=request_data.get('username')).first()
if not user:
try:
# validate the data are in the correct form
jsonschema.validate(request_data, register_user_schema)
if vuln and 'admin' in request_data: # User is possible to define if she/he wants to be an admin !!
if request_data['admin'] == "True":
admin = True
else:
admin = False
user = User(username=request_data['username'], password=request_data['password'],
email=request_data['email'], admin=admin)
else:
user = User(username=request_data['username'], password=request_data['password'],
email=request_data['email'])
db.session.add(user)
db.session.commit()
responseObject = {
'status': 'success',
'message': 'Successfully registered. Login to receive an auth token.'
}
return Response(json.dumps(responseObject), 200, mimetype="application/json")
except jsonschema.exceptions.ValidationError as exc:
return Response(error_message_helper(exc.message), 400, mimetype="application/json")
else:
return Response(error_message_helper("User already exists. Please Log in."), 200, mimetype="application/json")
def login_user():
request_data = request.get_json()
try:
# validate the data are in the correct form
jsonschema.validate(request_data, login_user_schema)
# fetching user data if the user exists
user = User.query.filter_by(username=request_data.get('username')).first()
if user and request_data.get('password') == user.password:
auth_token = user.encode_auth_token(user.username)
responseObject = {
'status': 'success',
'message': 'Successfully logged in.',
'auth_token': auth_token.decode("utf-8")
}
return Response(json.dumps(responseObject), 200, mimetype="application/json")
if vuln: # Password Enumeration
if user and request_data.get('password') != user.password:
return Response(error_message_helper("Password is not correct for the given username."), 200, mimetype="application/json")
elif not user: # User enumeration
return Response(error_message_helper("Username does not exist"), 200, mimetype="application/json")
else:
if (user and request_data.get('password') != user.password) or (not user):
return Response(error_message_helper("Username or Password Incorrect!"), 200, mimetype="application/json")
except jsonschema.exceptions.ValidationError as exc:
return Response(error_message_helper(exc.message), 400, mimetype="application/json")
except:
return Response(error_message_helper("An error occurred!"), 200, mimetype="application/json")
def token_validator(auth_header):
if auth_header:
try:
auth_token = auth_header.split(" ")[1]
except:
auth_token = ""
else:
auth_token = ""
if auth_token:
# if auth_token is valid we get back the username of the user
return User.decode_auth_token(auth_token)
else:
return "Invalid token"
def update_email(username):
request_data = request.get_json()
try:
jsonschema.validate(request_data, update_email_schema)
except:
return Response(error_message_helper("Please provide a proper JSON body."), 400, mimetype="application/json")
resp = token_validator(request.headers.get('Authorization'))
if "expired" in resp:
return Response(error_message_helper(resp), 401, mimetype="application/json")
elif "Invalid token" in resp:
return Response(error_message_helper(resp), 401, mimetype="application/json")
else:
user = User.query.filter_by(username=resp).first()
if vuln: # Regex DoS
match = re.search(
r"^([0-9a-zA-Z]([-.\w]*[0-9a-zA-Z])*@{1}([0-9a-zA-Z][-\w]*[0-9a-zA-Z]\.)+[a-zA-Z]{2,9})$",
str(request_data.get('email')))
if match:
user.email = request_data.get('email')
db.session.commit()
responseObject = {
'status': 'success',
'data': {
'username': user.username,
'email': user.email
}
}
return Response(json.dumps(responseObject), 204, mimetype="application/json")
else:
return Response(error_message_helper("Please Provide a valid email address."), 400, mimetype="application/json")
else:
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if (re.search(regex, request_data.get('email'))):
user.email = request_data.get('email')
db.session.commit()
responseObject = {
'status': 'success',
'data': {
'username': user.username,
'email': user.email
}
}
return Response(json.dumps(responseObject), 204, mimetype="application/json")
else:
return Response(error_message_helper("Please Provide a valid email address."), 400, mimetype="application/json")
def update_password(username):
request_data = request.get_json()
resp = token_validator(request.headers.get('Authorization'))
if "expired" in resp:
return Response(error_message_helper(resp), 401, mimetype="application/json")
elif "Invalid token" in resp:
return Response(error_message_helper(resp), 401, mimetype="application/json")
else:
if request_data.get('password'):
if vuln: # Unauthorized update of password of another user
user = User.query.filter_by(username=username).first()
user.password = request_data.get('password')
db.session.commit()
else:
user = User.query.filter_by(username=resp).first()
user.password = request_data.get('password')
db.session.commit()
responseObject = {
'status': 'success',
'Password': 'Updated.'
}
return Response(json.dumps(responseObject), 204, mimetype="application/json")
else:
return Response(error_message_helper("Malformed Data"), 400, mimetype="application/json")
def delete_user(username):
resp = token_validator(request.headers.get('Authorization'))
if "expired" in resp:
return Response(error_message_helper(resp), 401, mimetype="application/json")
elif "Invalid token" in resp:
return Response(error_message_helper(resp), 401, mimetype="application/json")
else:
user = User.query.filter_by(username=resp).first()
if user.admin:
if bool(User.delete_user(username)):
responseObject = {
'status': 'success',
'message': 'User deleted.'
}
return Response(json.dumps(responseObject), 200, mimetype="application/json")
else:
return Response(error_message_helper("User not found!"), 404, mimetype="application/json")
else:
return Response(error_message_helper("Only Admins may delete users!"), 401, mimetype="application/json")
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRcurl(RPackage):
"""A wrapper for 'libcurl' <http://curl.haxx.se/libcurl/> Provides
functions to allow one to compose general HTTP requests and provides
convenient functions to fetch URIs, get & post forms, etc. and process
the results returned by the Web server. This provides a great deal of
control over the HTTP/FTP/... connection and the form of the request
while providing a higher-level interface than is available just using
R socket connections. Additionally, the underlying implementation is
robust and extensive, supporting FTP/FTPS/TFTP (uploads and downloads),
SSL/HTTPS, telnet, dict, ldap, and also supports cookies, redirects,
authentication, etc."""
homepage = "https://cran.rstudio.com/web/packages/RCurl/index.html"
url = "https://cran.rstudio.com/src/contrib/RCurl_1.95-4.8.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/RCurl"
version('1.95-4.8', '9c8aaff986eb2792c89dd3ae54d21580')
depends_on('r-bitops', type=('build', 'run'))
depends_on('curl')
|
from flask import Flask, render_template, request, url_for, redirect
from flask_mysqldb import MySQL
from twilio.twiml.messaging_response import MessagingResponse
from datetime import datetime
import functions.getter as getter
import json
app = Flask(__name__)
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'whatsapp_bot_covid'
mysql = MySQL(app)
# init mysql connection
getter.init_connection(mysql)
@app.route('/')
def base():
return 'LANJUTKAN'
def myconverter(o):
if isinstance(o, datetime):
return o.__str__()
@app.route('/nasional')
def test_nasional():
data = getter.get_nasional()
datas = json.dumps(data, indent=4, sort_keys=True, default=myconverter)
return str(datas)
@app.route('/provinsi')
def test_provinsi():
data = getter.get_prov_byname('jawa timur')
datas = json.dumps(data, indent=4, sort_keys=True, default=myconverter)
return str(datas)
@app.route('/chat', methods=['POST'])
def sms_reply():
# Ambil pesan dari chat
message = request.form.get('Body').lower().strip()
# Jika pesan bukan command, abaikan
if message[0] != '/':
return
response = ''
chat_response = MessagingResponse()
words = message.split()
if words[0] == '/halo':
response = "Halo juga\n\n"
response += "Saya adalah bot yang didesain untuk memberikan info kasus COVID-19 baik secara nasional maupun berdasarkan provinsi.\n"
response += "Sumber data didapatkan dari API yang disediakan oleh kawalcorona - https://kawalcorona.com/api/\n"
response += "Ketikkan '/help' untuk mendapatkan informasi mengenai fitur yang diprogram untuk saya\n\n"
elif words[0] == '/nasional':
response = getter.get_nasional()
elif words[0] == '/cari':
if len(words) > 1:
nama_prov = ' '.join(words[1:])
result = getter.get_prov_byname(nama_prov)
response = result if result else 'Provinsi tidak ditemukan'
else:
response += 'Pastikan anda sudah memasukan nama provinsi'
elif words[0] == '/help':
response = 'List command:\n\n'
response += '1. /halo -> Perkenalan bot\n'
response += '2. /nasional -> Kasus COVID-19 di Indonesia\n'
response += '3. /cari [nama_provinsi] -> Mencari kasus COVID-19 berdasarkan provinsi\n'
response += 'Misal: /cari jawa timur\n\n'
response += 'Versi 1.0 - 17/05/2020 20.00'
else:
response = 'Maaf perintah tidak dikenali, ketik /help untuk mengetahui fitur'
chat_response.message(response)
return str(chat_response)
# Ubah menjadi debug=False ketika akan dideploy ke hosting
if __name__ == "__main__":
app.run(debug=True)
|
#!/usr/bin/env python3
import argparse
import csv
from sklearn.model_selection import train_test_split
from nltk import pos_tag
from nltk.tokenize import word_tokenize
def load_csv(csv_filename):
"""Load csv file generated py ```generate_training_testing_csv.py``` and parse contents into ingredients and labels lists
Parameters
----------
csv_filename : str
Name of csv file
Returns
-------
list[str]
List of ingredient strings
list[dict]
List of dictionaries, each dictionary the ingredient labels
"""
labels, ingredients = [], []
with open(csv_filename, 'r') as f:
reader = csv.reader(f)
next(reader) # skip first row
for row in reader:
ingredients.append(row[0])
labels.append({'quantity': row[1].strip(),
'unit': row[2].strip(),
'item': row[3].strip(),
'comment': row[4].strip()})
return ingredients, labels
def create_crf(crf_filename, ingredients, labels):
"""Create a .crf file containing CRF++ formatted data.
CRF++ expects files formatted in a particular way.
Each row contains a token, its labels and its label, seperated by whitespace (tabs or spaces), e.g.
token label_1 label_2 label
There can an arbitrary number of labels and they should be arrange in order of importances.
There *must* be the same number of labels for each token.
Tokens are grouped in sentences by seperating sentences by a blank line.
The following labels are written by this scripts:
1. Part of speech
Using nltk's pos_tagger to get the part of speech for each token
2. Position in sentence
Identified by In where n is an integer, starting from 1
The labels for each token are set according BIO tagging.
The first time a token with a given label is come across, it gets a B-* tag (B for beginning)
Any consecutive tokens with the same label get a I-* tag (I for inside)
Any tokens without a label get an OTHER tag (O for OTHER)
Parameters
----------
crf_filename : str
Name of .crf file to write data to.
ingredients : list[str]
List of labels for each label dict
labels : list[dict]
List of dicts of labels for each label
"""
with open(crf_filename, 'w') as f:
for ingredient, label in zip(ingredients, labels):
tokens = word_tokenize(ingredient)
prev_tag = 'OTHER'
for i, (token, pos) in enumerate(pos_tag(tokens)):
if token in label['quantity']:
if prev_tag != 'B-QTY' and prev_tag != 'I-QTY':
tag = 'B-QTY'
prev_tag = 'B-QTY'
else:
tag = 'I-QTY'
prev_tag = 'I-QTY'
elif token in label['unit']:
if prev_tag != 'B-UNIT' and prev_tag != 'I-UNIT':
tag = 'B-UNIT'
prev_tag = 'B-UNIT'
else:
tag = 'I-UNIT'
prev_tag = 'I-UNIT'
elif token in label['item']:
if prev_tag != 'B-ITEM' and prev_tag != 'I-ITEM':
tag = 'B-ITEM'
prev_tag = 'B-ITEM'
else:
tag = 'I-ITEM'
prev_tag = 'I-ITEM'
elif token in label['comment']:
if prev_tag != 'B-COMMENT' and prev_tag != 'I-COMMENT':
tag = 'B-COMMENT'
prev_tag = 'B-COMMENT'
else:
tag = 'I-COMMENT'
prev_tag = 'I-COMMENT'
else:
tag = 'OTHER'
prev_tag = 'OTHER'
f.write(f'{token}\t{pos}\tI{i+1}\t{tag}\n')
f.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate CRF++ compatible training and testing data files from csv')
parser.add_argument('-i', '--input', help='Path to input csv file')
parser.add_argument('-o', '--train', help='Path to training output crf file')
parser.add_argument('-t', '--test', help='Path to testing output crf file')
parser.add_argument('-s', '--split', default=0.25, type=float, help='Fraction of data to be used for testing')
args = parser.parse_args()
ingredients, labels = load_csv(args.input)
ingredients_train, ingredients_test, labels_train, labels_test = train_test_split(ingredients, labels, test_size=args.split)
create_crf(args.train, ingredients_train, labels_train)
create_crf(args.test, ingredients_test, labels_test)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=3
# total number=4
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.Y.on(input_qubit[0])) # number=2
c.append(cirq.Y.on(input_qubit[0])) # number=3
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq9.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import logging
logger = logging.getLogger()
def reverse(filename, output=None, *, engine='pdftk'):
"""
reverse a PDF
:param filenames: PDF filepath
:param output: PDF output
"""
if output is None:
output = 'REVERSED-' + filename
if engine == 'pdfjam':
command = ['pdfjam',
filename,
'last-1',
'--outfile',
output,
]
elif engine == 'pdftk':
command = ['pdftk', ]
command.append(filename)
command.extend(['cat', 'end-1', 'output'])
command.append(output)
else:
raise ValueError(f'Wrong engine name: {engine}')
logger.debug(f'Executed command: {command}')
process = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
|
from skimage.filters import threshold_otsu
from skimage.filters import sobel
from skimage.morphology import disk, remove_small_objects, binary_closing
from scipy.ndimage import binary_fill_holes
__all__ = [ 'segment_all_channels',
'bright_field_segmentation',
'fluorescent_segmentation']
def segment_all_channels(image, min_size=100, selem = disk(5)):
"""calculates the segmentation per channel
It considers the first channel is brightfield and the rest are fluorescents
Parameters
----------
image : 3D array, shape (M, N,C)
The input image with the first channel as brightfield and the rest as florescent
Returns
-------
segmented_image : 3D array, shape (M, N,C)
The binary mask per channel
Raises
-------
None
"""
segmented_image = image.copy()*0
for ch in range(image.shape[2]):
if ch == 0:
segmented_image[:,:,ch] = bright_field_segmentation(image[:,:,ch],
min_size=min_size,
selem = selem)
else:
segmented_image[:,:,ch] = fluorescent_segmentation(image[:,:,ch],
min_size=min_size,
selem = selem)
return segmented_image
def bright_field_segmentation(image, min_size=100, selem = disk(5)):
"""calculates the segmentation per channel using edge detection
It first calculates the sobel filtered image to calculate the edges.
Then removes the small objects, closes the binary shapes and
finally fills the shapes.
Parameters
----------
image : 2D array, shape (M, N)
The input image only one channel
Returns
-------
segmented_image : 2D array, shape (M, N)
The binary mask
Raises
-------
None
References
-------
.. [1] http://jkimmel.net/so-you-want-to-segment-a-cell/
Notes
-----
1. It works best for brightfield channels in Imaging Flow Cytometry (IFC)
2. We use otsu thresholding in the background
"""
segmented_image = image.copy()*0
# calculate edges
edges = sobel(image)
# segmentation
threshold_level = threshold_otsu(edges)
bw = edges > threshold_level # bw is a standard variable name for binary images
# postprocessing
bw_cleared = remove_small_objects(bw, min_size) # clear objects <100 px
# close the edges of the outline with morphological closing
bw_close = binary_closing(bw_cleared, selem=selem)
segmented_image = binary_fill_holes(bw_close)
return segmented_image.astype(int)
def fluorescent_segmentation(image, min_size=100, selem = disk(5)):
"""calculates the segmentation using direct thresholding
It calcualtes the threshold using otsu thresholding.
Then removes the small objects, closes the binary shapes and
finally fills the shapes.
Parameters
----------
image : 2D array, shape (M, N)
The input image with multiple channels.
Returns
-------
segmented_image : 2D array (int), shape (M, N)
Segmentation of the input image.
Raises
-------
None
References
-------
.. [1] https://scikit-image.org/docs/dev/auto_examples/applications/plot_human_mitosis.html
Notes
-----
1. It works best for florescent channels in Imaging Flow Cytometry (IFC).
2. We use otsu thresholding in the background
"""
segmented_image = image.copy()*0
# segmentation
threshold_level = threshold_otsu(image)
bw = image > threshold_level # bw is a standard variable name for binary images
# postprocessing
bw_cleared = remove_small_objects(bw, min_size) # clear objects
# close the edges of the outline with morphological closing
bw_close = binary_closing(bw_cleared, selem=selem)
segmented_image = binary_fill_holes(bw_close)
return segmented_image.astype(int)
|
#Samuel Low-Chappell
import difflib
import pickle
import sys
import os
#error checking:
#ensures that arguments were entered properly
#and that the file exists
if len(sys.argv)!=2:
print "Error: Improper number of arguments. Should be 2."
else:
pickle_name=sys.argv[1]
if not os.path.exists(pickle_name):
print "Error: File not found"
else:
pairs_dict=pickle.load(open(pickle_name, "r"))
#infinite loop so you can keep spell checking
while 1:
key_words=""
keys=[]
fix=""
suggestions_list=[]
#get input from the user
key_words=raw_input("Enter a pair of words: ")
key_list=key_words.split()
#checks for valid input
while len(key_list)!=2:
print "Error: Invalid input, try again."
key_words=raw_input("Enter a pair of words: ")
key_list=key_words.split()
word_1=key_list[0]
word_2=key_list[1]
#puts the input in the same form as the
#elements of the dictionary
key_words=str(key_list)
#checks to see if the words are in the dictionary, if so, returns a positive response
if key_words in pairs_dict:
print "Well spelled!"
#if they are not in the dictionary, it gives the closest fix
else:
#goes through the close matches of the user input
#in the dictionary
for i in difflib.get_close_matches(key_words, pairs_dict, 10):
foo=i.split()
#iterates through the close matches making comparisons to check frequency
#of use.
#sets the fix to be the pair with the highest frequency
#Then, prints the first word in fix with the
#second word of user input, as indicated in the a3 PDF
count=pairs_dict.get(i)
if (count>pairs_dict.get(fix)):
fix=i
final_form=fix.split()
print "Fix: ", final_form[0].replace("['", "").replace("',", "").replace("'", "").replace("]", ""), word_2
|
from .message_pb2 import (
StartTimeFilter,
StatusFilter,
WorkflowExecutionFilter,
WorkflowTypeFilter,
)
__all__ = [
"StartTimeFilter",
"StatusFilter",
"WorkflowExecutionFilter",
"WorkflowTypeFilter",
]
|
from random import randint
import datetime
lvl = 10
base_rounds = 10
rounds = lvl * base_rounds
print("You have", rounds, "rounds to try to get through.")
for i in range(rounds):
r = randint(1, 100)
print(r)
if r >= 96:
break
print("Number of rounds:", i)
if i == rounds - 1:
print("Nothing got through")
else:
print("It took", str(datetime.timedelta(seconds=i*6)))
|
#!/usr/bin/env python3
import os
import sys
import subprocess
import argparse
def main():
parser = argparse.ArgumentParser(description='GATK MergeMutectStats')
parser.add_argument('-j', '--jvm-mem', dest='jvm_mem', type=int,
help='JVM max memory in MB', default=1000)
parser.add_argument('-I', dest='input_stats', type=str,
help='Input mutect stats file(s)', nargs="+", required=True)
parser.add_argument('-O', dest='output_name', type=str,
help='Output file name', required=True)
args = parser.parse_args()
if not args.output_name.endswith('.stats'):
sys.exit('Usage: -O output file name must end with ".stats"')
cmd = 'gatk --java-options "-Xmx%sm" MergeMutectStats -O %s' % (
args.jvm_mem, args.output_name
)
for i in args.input_stats:
cmd = cmd + ' -stats %s' % i
try:
p = subprocess.run([cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
print(p.stdout.decode("utf-8"))
if p.returncode != 0:
print('Error occurred: %s' % p.stderr.decode("utf-8"), file=sys.stderr)
sys.exit(p.returncode)
except Exception as e:
sys.exit('Execution failed: %s' % e)
if __name__ == "__main__":
main()
|
import unittest, json
from etk.etk import ETK
from etk.extractors.glossary_extractor import GlossaryExtractor
from etk.knowledge_graph_schema import KGSchema
sample_input = {
"projects": [
{
"name": "etk",
"description": "version 2 of etk, implemented by Runqi, Dongyu, Sylvia, Amandeep and others."
},
{
"name": "rltk",
"description": "record linkage toolkit, implemented by Pedro, Mayank, Yixiang and several students."
}
]
}
class TestProvenance(unittest.TestCase):
def test_Provenance(self) -> None:
kg_schema = KGSchema(json.load(open('etk/unit_tests/ground_truth/test_config.json')))
self.etk = ETK(kg_schema=kg_schema, use_spacy_tokenizer=True)
g = ['runqi', 'sylvia', 'dongyu', 'mayank', 'pedro', 'amandeep', 'yixiang']
self.name_extractor = GlossaryExtractor(g, "name_extractor",
self.etk.default_tokenizer,
case_sensitive=False, ngrams=1)
doc = self.etk.create_document(sample_input)
descriptions = doc.select_segments("projects[*].description")
projects = doc.select_segments("projects[*]")
for d, p in zip(descriptions, projects):
names = doc.extract(self.name_extractor, d)
p.store(names, "members")
expected_provenances = [
{
"@id": 0,
"@type": "extraction_provenance_record",
"method": "name_extractor",
"confidence": 1.0,
"origin_record": {
"path": "projects.[0].description",
"start_char": 33,
"end_char": 38
}
},
{
"@id": 1,
"@type": "extraction_provenance_record",
"method": "name_extractor",
"confidence": 1.0,
"origin_record": {
"path": "projects.[0].description",
"start_char": 40,
"end_char": 46
}
},
{
"@id": 2,
"@type": "extraction_provenance_record",
"method": "name_extractor",
"confidence": 1.0,
"origin_record": {
"path": "projects.[0].description",
"start_char": 48,
"end_char": 54
}
},
{
"@id": 3,
"@type": "extraction_provenance_record",
"method": "name_extractor",
"confidence": 1.0,
"origin_record": {
"path": "projects.[0].description",
"start_char": 56,
"end_char": 64
}
},
{
"@id": 4,
"@type": "storage_provenance_record",
"doc_id": None,
"field": None,
"destination": "projects.[0].members",
"parent_provenances": {
"Runqi": 0,
"Dongyu": 1,
"Sylvia": 2,
"Amandeep": 3
}
},
{
"@id": 5,
"@type": "extraction_provenance_record",
"method": "name_extractor",
"confidence": 1.0,
"origin_record": {
"path": "projects.[1].description",
"start_char": 39,
"end_char": 44
}
},
{
"@id": 6,
"@type": "extraction_provenance_record",
"method": "name_extractor",
"confidence": 1.0,
"origin_record": {
"path": "projects.[1].description",
"start_char": 46,
"end_char": 52
}
},
{
"@id": 7,
"@type": "extraction_provenance_record",
"method": "name_extractor",
"confidence": 1.0,
"origin_record": {
"path": "projects.[1].description",
"start_char": 54,
"end_char": 61
}
},
{
"@id": 8,
"@type": "storage_provenance_record",
"doc_id": None,
"field": None,
"destination": "projects.[1].members",
"parent_provenances": {
"Pedro": 5,
"Mayank": 6,
"Yixiang": 7
}
}
]
expected_projects = [
{
"name": "etk",
"description": "version 2 of etk, implemented by Runqi, Dongyu, Sylvia, Amandeep and others.",
"members": [
"Runqi",
"Dongyu",
"Sylvia",
"Amandeep"
]
},
{
"name": "rltk",
"description": "record linkage toolkit, implemented by Pedro, Mayank, Yixiang and several students.",
"members": [
"Pedro",
"Mayank",
"Yixiang"
]
}
]
self.assertEqual(expected_projects, doc.value["projects"])
self.assertEqual(expected_provenances, doc.value["provenances"])
if __name__ == '__main__':
unittest.main()
|
#
# PySNMP MIB module PANDATEL-BMZ-MODEM-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/PANDATEL-BMZ-MODEM-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:37:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
device_id, mdmSpecifics = mibBuilder.importSymbols("PANDATEL-MODEM-MIB", "device-id", "mdmSpecifics")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, enterprises, TimeTicks, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Unsigned32, Bits, Counter64, NotificationType, iso, IpAddress, Gauge32, ModuleIdentity, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "enterprises", "TimeTicks", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Unsigned32", "Bits", "Counter64", "NotificationType", "iso", "IpAddress", "Gauge32", "ModuleIdentity", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
bmz_modem = MibIdentifier((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 10000, 2, 302)).setLabel("bmz-modem")
bmz = MibIdentifier((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302))
bmzModemTable = MibTable((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1), )
if mibBuilder.loadTexts: bmzModemTable.setStatus('mandatory')
if mibBuilder.loadTexts: bmzModemTable.setDescription('This table contains information about all BM-Z converters in all racks.')
bmzTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1, 1), ).setIndexNames((0, "PANDATEL-BMZ-MODEM-MIB", "mdmRack"), (0, "PANDATEL-BMZ-MODEM-MIB", "mdmModem"), (0, "PANDATEL-BMZ-MODEM-MIB", "mdmPosition"))
if mibBuilder.loadTexts: bmzTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: bmzTableEntry.setDescription('The index of the table.')
mdmRack = MibTableColumn((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmRack.setStatus('mandatory')
if mibBuilder.loadTexts: mdmRack.setDescription('The index of the rack where the converter is installed.')
mdmModem = MibTableColumn((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmModem.setStatus('mandatory')
if mibBuilder.loadTexts: mdmModem.setDescription('This entry displays the slot number where the corresponding converter is installed in the rack.')
mdmPosition = MibTableColumn((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("local", 1), ("remote", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmPosition.setStatus('mandatory')
if mibBuilder.loadTexts: mdmPosition.setDescription("This entry displays the location of the corresponding converter: 'local' or 'remote'. The converter which is plugged into a managed rack is 'local', the counterpart is 'remote'.")
mdmModemName = MibTableColumn((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmModemName.setStatus('mandatory')
if mibBuilder.loadTexts: mdmModemName.setDescription('The verbal name of this converter.')
mdmInterfaceEmulationMode = MibTableColumn((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 99))).clone(namedValues=NamedValues(("other", 1), ("dte", 2), ("dce", 3), ("te", 4), ("nt", 5), ("unknown", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmInterfaceEmulationMode.setStatus('mandatory')
if mibBuilder.loadTexts: mdmInterfaceEmulationMode.setDescription("Interface mode of the unit: 'dte', 'dce', 'te' or 'nt'.")
mdmModemProperty = MibTableColumn((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 99))).clone(namedValues=NamedValues(("other", 1), ("e1", 2), ("t1", 3), ("unknown", 99)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmModemProperty.setStatus('mandatory')
if mibBuilder.loadTexts: mdmModemProperty.setDescription("This entry displays the speed class of the BM-Z: 'e1'.")
mdmHDSLUnit = MibTableColumn((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("ntu", 2), ("ltu", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmHDSLUnit.setStatus('mandatory')
if mibBuilder.loadTexts: mdmHDSLUnit.setDescription('HDSL unit type: Line Termination Unit (LTU) or Network Termination Unit (NTU)')
mdmClockSystem = MibTableColumn((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("dual", 2), ("single", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmClockSystem.setStatus('mandatory')
if mibBuilder.loadTexts: mdmClockSystem.setDescription("The clock system of the link: 'dual' or 'single'.")
mdmClockSource = MibTableColumn((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("internal", 2), ("remote", 3), ("external", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmClockSource.setStatus('mandatory')
if mibBuilder.loadTexts: mdmClockSource.setDescription("The clock source of the link: 'internal', 'external', or 'remote'.")
mdmDataRate = MibTableColumn((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("other", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mdmDataRate.setStatus('mandatory')
if mibBuilder.loadTexts: mdmDataRate.setDescription('The data rate at the interface in bits per seconds. The data rate at the remote counterpart changes simultaneously.')
mdmRemoteAccessMode = MibTableColumn((1, 3, 6, 1, 4, 1, 760, 1, 1, 2, 1, 10, 302, 1, 1, 64), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 7))).clone(namedValues=NamedValues(("other", 1), ("disable", 2), ("via-hdsl-channel", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mdmRemoteAccessMode.setStatus('mandatory')
if mibBuilder.loadTexts: mdmRemoteAccessMode.setDescription("The remote access mode: 'via-hdsl-channel'.")
mibBuilder.exportSymbols("PANDATEL-BMZ-MODEM-MIB", mdmModemName=mdmModemName, mdmRemoteAccessMode=mdmRemoteAccessMode, bmzModemTable=bmzModemTable, bmzTableEntry=bmzTableEntry, mdmClockSystem=mdmClockSystem, mdmModem=mdmModem, mdmDataRate=mdmDataRate, mdmClockSource=mdmClockSource, bmz=bmz, mdmRack=mdmRack, mdmModemProperty=mdmModemProperty, bmz_modem=bmz_modem, mdmHDSLUnit=mdmHDSLUnit, mdmInterfaceEmulationMode=mdmInterfaceEmulationMode, mdmPosition=mdmPosition)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default glyphmap writer. Writes rows like:
picosvg/clipped/emoji_u270d_1f3fb.svg, bitmap/emoji_u270d_1f3fb.png, g_270d_1f3fb, 270d, 1f3fb
The first two columns represent respectively the SVG and bitmap filenames; either can be
left empty if the font is vector- or bitmap-only.
The third column is the UFO/PostScript glyph name, and it's required.
The fourth and the remaining columns are optional, and contain the Unicode codepoints
as hexadecimal digits; a single codepoint gets added to the cmap, more than one produce
a GSUB ligature, no codepoint leaves the glyph unmapped.
"""
import enum
from absl import app
from absl import flags
from nanoemoji.glyph import glyph_name
from nanoemoji.glyphmap import GlyphMapping
from nanoemoji import codepoints
from nanoemoji import util
from pathlib import Path
from typing import Iterator, Sequence, Tuple
FLAGS = flags.FLAGS
flags.DEFINE_string("output_file", "-", "Output filename ('-' means stdout)")
class InputFileSuffix(enum.Enum):
SVG = ".svg"
PNG = ".png"
def _glyphmappings(input_files: Sequence[str]) -> Iterator[GlyphMapping]:
# group .svg and/or .png files with the same filename stem
sources_by_stem = {}
suffix_index = {InputFileSuffix.SVG: 0, InputFileSuffix.PNG: 1}
for filename in input_files:
input_file = Path(filename)
i = suffix_index[InputFileSuffix(input_file.suffix)]
sources_by_stem.setdefault(input_file.stem, [None, None])[i] = input_file
for source_stem, files in sources_by_stem.items():
cps = tuple(codepoints.from_filename(source_stem))
yield GlyphMapping(*files, cps, glyph_name(cps))
def main(argv):
input_files = util.expand_ninja_response_files(argv[1:])
with util.file_printer(FLAGS.output_file) as print:
for gm in _glyphmappings(input_files):
# filename(s), glyph_name, codepoint(s)
print(gm.csv_line())
if __name__ == "__main__":
app.run(main)
|
#!/usr/bin/env python
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
# See https://packaging.python.org/tutorials/packaging-projects/ for details
setuptools.setup(
name="fuzzingbook",
version="0.0.1",
author="Andreas Zeller et al.",
author_email="zeller@cs.uni-saarland.de",
description="Code for 'Generating Software Tests' (https://www.fuzzingbook.org/)",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://www.fuzzingbook.org/",
packages=setuptools.find_packages(),
# See https://pypi.org/classifiers/
classifiers=[
"Development Status :: 3 - Alpha",
"Framework :: Jupyter",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Testing",
"Topic :: Education :: Testing",
"Topic :: Software Development :: Quality Assurance",
"Topic :: Security"
],
)
|
from ._version import get_versions
from .contexts import cd
from .prompting import error, prompt, status, success
from .unix import cp, ln_s
__all__ = ["prompt", "status", "success", "error", "cp", "cd", "ln_s"]
__version__ = get_versions()["version"]
del get_versions
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AutoAugment util file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import math
import tensorflow as tf
import numpy as np
from PIL import Image, ImageEnhance, ImageOps
import random
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
# Represents an invalid bounding box that is used for checking for padding
# lists of bounding box coordinates for a few augmentation operations
_INVALID_BOX = [[-1.0, -1.0, -1.0, -1.0]]
def policy_v0():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)],
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)],
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)],
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)],
]
return policy
def policy_v1():
"""Autoaugment policy that was used in AutoAugment Detection Paper."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('TranslateX_BBox', 0.6, 4), ('Equalize', 0.8, 10)], # 0
[('TranslateY_Only_BBoxes', 0.2, 2), ('Cutout', 0.8, 8)], # 1
[('Sharpness', 0.0, 8), ('ShearX_BBox', 0.4, 0)], # 2
[('ShearY_BBox', 1.0, 2), ('TranslateY_Only_BBoxes', 0.6, 6)], # 3
[('Rotate_BBox', 0.6, 10), ('Color', 1.0, 6)], # 4
[('Color', 0.0, 0), ('ShearX_Only_BBoxes', 0.8, 4)], # 5
[('ShearY_Only_BBoxes', 0.8, 2), ('Flip_Only_BBoxes', 1.0, 10)], # 6
[('Equalize', 0.6, 10), ('TranslateX_BBox', 0.2, 2)],
[('Color', 1.0, 10), ('TranslateY_Only_BBoxes', 0.4, 6)],
[('Rotate_BBox', 0.8, 10), ('Contrast', 0.0, 10)], # 9
[('Cutout', 0.2, 2), ('Brightness', 0.8, 10)], # 10
[('Color', 1.0, 6), ('Equalize', 1.0, 2)],
[('Cutout_Only_BBoxes', 1.0, 6), ('TranslateY_Only_BBoxes', 0.8, 2)],
[('Color', 0.2, 8), ('Rotate_BBox', 0.8, 10)],
[('Sharpness', 0.4, 4), ('TranslateY_Only_BBoxes', 0.0, 4)],
[('Sharpness', 1.0, 4), ('SolarizeAdd', 0.4, 4)], # 15
[('Rotate_BBox', 1.0, 8), ('Sharpness', 0.2, 8)],
[('ShearY_BBox', 0.6, 10), ('Equalize_Only_BBoxes', 0.6, 8)],
[('ShearX_BBox', 0.2, 6), ('TranslateY_Only_BBoxes', 0.2, 10)],
[('SolarizeAdd', 0.6, 8), ('Brightness', 0.8, 5)], # 19
]
return policy
def policy_vtest():
"""Autoaugment test policy for debugging."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Brightness', 0.5, 6)],
[('SolarizeAdd', 0.5, 2)],
[('Posterize', 0.5, 10)],
[('Equalize', 0.5, 10)],
[('Contrast', 0.5, 7)],
[('Color', 0.5, 10)],
[('Sharpness', 0.5, 10)]
]
return policy
def policy_v2():
"""Additional policy that performs well on object detection."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Color', 0.0, 6), ('Cutout', 0.6, 8), ('Sharpness', 0.4, 8)],
[('Rotate_BBox', 0.4, 8), ('Sharpness', 0.4, 2),
('Rotate_BBox', 0.8, 10)],
[('TranslateY_BBox', 1.0, 8), ('AutoContrast', 0.8, 2)],
[('AutoContrast', 0.4, 6), ('ShearX_BBox', 0.8, 8),
('Brightness', 0.0, 10)],
[('SolarizeAdd', 0.2, 6), ('Contrast', 0.0, 10),
('AutoContrast', 0.6, 0)],
[('Cutout', 0.2, 0), ('Solarize', 0.8, 8), ('Color', 1.0, 4)],
[('TranslateY_BBox', 0.0, 4), ('Equalize', 0.6, 8),
('Solarize', 0.0, 10)],
[('TranslateY_BBox', 0.2, 2), ('ShearY_BBox', 0.8, 8),
('Rotate_BBox', 0.8, 8)],
[('Cutout', 0.8, 8), ('Brightness', 0.8, 8), ('Cutout', 0.2, 2)],
[('Color', 0.8, 4), ('TranslateY_BBox', 1.0, 6), ('Rotate_BBox', 0.6, 6)],
[('Rotate_BBox', 0.6, 10), ('BBox_Cutout', 1.0, 4), ('Cutout', 0.2, 8)],
[('Rotate_BBox', 0.0, 0), ('Equalize', 0.6, 6), ('ShearY_BBox', 0.6, 8)],
[('Brightness', 0.8, 8), ('AutoContrast', 0.4, 2),
('Brightness', 0.2, 2)],
[('TranslateY_BBox', 0.4, 8), ('Solarize', 0.4, 6),
('SolarizeAdd', 0.2, 10)],
[('Contrast', 1.0, 10), ('SolarizeAdd', 0.2, 8), ('Equalize', 0.2, 4)],
]
return policy
def policy_v3():
""""Additional policy that performs well on object detection."""
# Each tuple is an augmentation operation of the form
# (operation, probability, magnitude). Each element in policy is a
# sub-policy that will be applied sequentially on the image.
policy = [
[('Posterize', 0.8, 2), ('TranslateX_BBox', 1.0, 8)],
[('BBox_Cutout', 0.2, 10), ('Sharpness', 1.0, 8)],
[('Rotate_BBox', 0.6, 8), ('Rotate_BBox', 0.8, 10)],
[('Equalize', 0.8, 10), ('AutoContrast', 0.2, 10)],
[('SolarizeAdd', 0.2, 2), ('TranslateY_BBox', 0.2, 8)],
[('Sharpness', 0.0, 2), ('Color', 0.4, 8)],
[('Equalize', 1.0, 8), ('TranslateY_BBox', 1.0, 8)],
[('Posterize', 0.6, 2), ('Rotate_BBox', 0.0, 10)],
[('AutoContrast', 0.6, 0), ('Rotate_BBox', 1.0, 6)],
[('Equalize', 0.0, 4), ('Cutout', 0.8, 10)],
[('Brightness', 1.0, 2), ('TranslateY_BBox', 1.0, 6)],
[('Contrast', 0.0, 2), ('ShearY_BBox', 0.8, 0)],
[('AutoContrast', 0.8, 10), ('Contrast', 0.2, 10)],
[('Rotate_BBox', 1.0, 10), ('Cutout', 1.0, 10)],
[('SolarizeAdd', 0.8, 6), ('Equalize', 0.8, 8)],
]
return policy
#
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if isinstance(image1, np.ndarray):
if factor == 0.0:
return image1
if factor == 1.0:
return image2
image1 = image1.astype(np.float32)
image2 = image2.astype(np.float32)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = image1 + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return temp.astype(np.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return np.clip(temp, 0.0, 255.0).astype(np.uint8)
else:
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
# 贴黑
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
if isinstance(image, np.ndarray):
image_height = image.shape[0]
image_width = image.shape[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = np.random.randint(low=0, high=image_height)
cutout_center_width = np.random.randint(low=0, high=image_width)
lower_pad = max(0, cutout_center_height - pad_size)
upper_pad = max(0, image_height - cutout_center_height - pad_size)
left_pad = max(0, cutout_center_width - pad_size)
right_pad = max(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad), image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = np.pad(np.zeros(cutout_shape, dtype=image.dtype), padding_dims, 'constant', constant_values=1)
mask = mask[..., np.newaxis]
mask = np.tile(mask, [1, 1, 3])
image = np.where(mask == 0, np.ones_like(image, dtype=image.dtype) * replace, image)
return image.astype(np.uint8)
else:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
#
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
if isinstance(image, np.ndarray):
return np.where(image < threshold, image, 255 - image)
else:
return tf.where(image < threshold, image, 255 - image)
#
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
if isinstance(image, np.ndarray):
added_image = image.astype(np.int64) + addition
added_image = np.clip(added_image, 0, 255).astype(np.uint8)
return np.where(image < threshold, added_image, image)
else:
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
#
def color(image, factor):
"""Equivalent of PIL Color."""
if isinstance(image, np.ndarray):
img = Image.fromarray(image)
img = ImageEnhance.Sharpness(img).enhance(1 + factor * random.choice([-1, 1]))
return np.array(img)
else:
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
# 对比度
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
if isinstance(image, np.ndarray):
img = Image.fromarray(image)
img = ImageEnhance.Contrast(img).enhance(factor)
return np.array(img)
else:
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
# 亮度
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
if isinstance(image, np.ndarray):
degenerate = np.zeros_like(image)
else:
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
#
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
if isinstance(image, np.ndarray):
img = Image.fromarray(image)
img = ImageOps.posterize(img, shift)
else:
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
#
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
if isinstance(image, np.ndarray):
img = Image.fromarray(image)
rot = img.convert("RGBA").rotate(degrees)
img = Image.composite(rot, Image.new("RGBA", rot.size, (replace[0],) * 4), rot).convert(img.mode)
return np.array(img)
else:
image = tf.contrib.image.rotate(wrap(image), radians)
return unwrap(image, replace)
#
def random_shift_bbox(image, bbox, pixel_scaling, replace,
new_min_bbox_coords=None):
"""Move the bbox and the image content to a slightly new random location.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
The potential values for the new min corner of the bbox will be between
[old_min - pixel_scaling * bbox_height/2,
old_min - pixel_scaling * bbox_height/2].
pixel_scaling: A float between 0 and 1 that specifies the pixel range
that the new bbox location will be sampled from.
replace: A one or three value 1D tensor to fill empty pixels.
new_min_bbox_coords: If not None, then this is a tuple that specifies the
(min_y, min_x) coordinates of the new bbox. Normally this is randomly
specified, but this allows it to be manually set. The coordinates are
the absolute coordinates between 0 and image height/width and are int32.
Returns:
The new image that will have the shifted bbox location in it along with
the new bbox that contains the new coordinates.
"""
if isinstance(image, np.ndarray):
# Obtains image height and width and create helper clip functions.
image_height = float(image.shape[0])
image_width = float(image.shape[1])
def clip_y(val):
return np.clip(val, 0, image_height.astype(np.int32) - 1)
def clip_x(val):
return np.clip(val, 0, image_width.astype(np.int32) - 1)
# Convert bbox to pixel coordinates.
min_y = image_height.astype(np.int32) * bbox[0]
min_x = image_width.astype(np.int32) * bbox[1]
max_y = clip_y(image_height.astype(np.int32) * bbox[2])
max_x = clip_x(image_width.astype(np.int32) * bbox[3])
bbox_height, bbox_width = (max_y - min_y + 1, max_x - min_x + 1)
image_height = image_height.astype(np.int32)
image_width = image_width.astype(np.int32)
# Select the new min/max bbox ranges that are used for sampling the
# new min x/y coordinates of the shifted bbox.
minval_y = clip_y(
min_y - (pixel_scaling * float(bbox_height) / 2.0).astype(np.int32))
maxval_y = clip_y(
min_y + (pixel_scaling * float(bbox_height) / 2.0).astype(np.int32))
minval_x = clip_x(
min_x - (pixel_scaling * float(bbox_width) / 2.0).astype(np.int32))
maxval_x = clip_x(
min_x + (pixel_scaling * float(bbox_width) / 2.0).astype(np.int32))
# Sample and calculate the new unclipped min/max coordinates of the new bbox.
if new_min_bbox_coords is None:
unclipped_new_min_y = np.random.randint(low=minval_y, high=maxval_y)
unclipped_new_min_x = np.random.randint(low=minval_x, high=maxval_x)
else:
unclipped_new_min_y, unclipped_new_min_x = (clip_y(new_min_bbox_coords[0]), clip_x(new_min_bbox_coords[1]))
unclipped_new_max_y = unclipped_new_min_y + bbox_height - 1
unclipped_new_max_x = unclipped_new_min_x + bbox_width - 1
# Determine if any of the new bbox was shifted outside the current image.
# This is used for determining if any of the original bbox content should be
# discarded.
new_min_y, new_min_x, new_max_y, new_max_x = (
clip_y(unclipped_new_min_y), clip_x(unclipped_new_min_x), clip_y(unclipped_new_max_y),
clip_x(unclipped_new_max_x))
shifted_min_y = (new_min_y - unclipped_new_min_y) + min_y
shifted_max_y = max_y - (unclipped_new_max_y - new_max_y)
shifted_min_x = (new_min_x - unclipped_new_min_x) + min_x
shifted_max_x = max_x - (unclipped_new_max_x - new_max_x)
# Create the new bbox tensor by converting pixel integer values to floats.
new_bbox = np.stack([
float(new_min_y) / float(image_height),
float(new_min_x) / float(image_width),
float(new_max_y) / float(image_height),
float(new_max_x) / float(image_width)])
# Copy the contents in the bbox and fill the old bbox location
# with gray (128).
bbox_content = image[shifted_min_y:shifted_max_y + 1,
shifted_min_x:shifted_max_x + 1, :]
def mask_and_add_image(
min_y_, min_x_, max_y_, max_x_, mask, content_tensor, image_):
"""Applies mask to bbox region in image then adds content_tensor to it."""
mask = np.pad(mask,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]], 'constant', constant_values=1)
content_tensor = np.pad(content_tensor,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]], 'constant', constant_values=0)
return image_ * mask + content_tensor
# Zero out original bbox location.
mask = np.zeros_like(image)[min_y:max_y + 1, min_x:max_x + 1, :]
grey_tensor = np.zeros_like(mask) + replace[0]
image = mask_and_add_image(min_y, min_x, max_y, max_x, mask,
grey_tensor, image)
# Fill in bbox content to new bbox location.
mask = np.zeros_like(bbox_content)
image = mask_and_add_image(new_min_y, new_min_x, new_max_y, new_max_x, mask,
bbox_content, image)
else:
# Obtains image height and width and create helper clip functions.
image_height = tf.to_float(tf.shape(image)[0])
image_width = tf.to_float(tf.shape(image)[1])
def clip_y(val):
return tf.clip_by_value(val, 0, tf.to_int32(image_height) - 1)
def clip_x(val):
return tf.clip_by_value(val, 0, tf.to_int32(image_width) - 1)
# Convert bbox to pixel coordinates.
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = clip_y(tf.to_int32(image_height * bbox[2]))
max_x = clip_x(tf.to_int32(image_width * bbox[3]))
bbox_height, bbox_width = (max_y - min_y + 1, max_x - min_x + 1)
image_height = tf.to_int32(image_height)
image_width = tf.to_int32(image_width)
# Select the new min/max bbox ranges that are used for sampling the
# new min x/y coordinates of the shifted bbox.
minval_y = clip_y(
min_y - tf.to_int32(pixel_scaling * tf.to_float(bbox_height) / 2.0))
maxval_y = clip_y(
min_y + tf.to_int32(pixel_scaling * tf.to_float(bbox_height) / 2.0))
minval_x = clip_x(
min_x - tf.to_int32(pixel_scaling * tf.to_float(bbox_width) / 2.0))
maxval_x = clip_x(
min_x + tf.to_int32(pixel_scaling * tf.to_float(bbox_width) / 2.0))
# Sample and calculate the new unclipped min/max coordinates of the new bbox.
if new_min_bbox_coords is None:
unclipped_new_min_y = tf.random_uniform(
shape=[], minval=minval_y, maxval=maxval_y,
dtype=tf.int32)
unclipped_new_min_x = tf.random_uniform(
shape=[], minval=minval_x, maxval=maxval_x,
dtype=tf.int32)
else:
unclipped_new_min_y, unclipped_new_min_x = (
clip_y(new_min_bbox_coords[0]), clip_x(new_min_bbox_coords[1]))
unclipped_new_max_y = unclipped_new_min_y + bbox_height - 1
unclipped_new_max_x = unclipped_new_min_x + bbox_width - 1
# Determine if any of the new bbox was shifted outside the current image.
# This is used for determining if any of the original bbox content should be
# discarded.
new_min_y, new_min_x, new_max_y, new_max_x = (
clip_y(unclipped_new_min_y), clip_x(unclipped_new_min_x),
clip_y(unclipped_new_max_y), clip_x(unclipped_new_max_x))
shifted_min_y = (new_min_y - unclipped_new_min_y) + min_y
shifted_max_y = max_y - (unclipped_new_max_y - new_max_y)
shifted_min_x = (new_min_x - unclipped_new_min_x) + min_x
shifted_max_x = max_x - (unclipped_new_max_x - new_max_x)
# Create the new bbox tensor by converting pixel integer values to floats.
new_bbox = tf.stack([
tf.to_float(new_min_y) / tf.to_float(image_height),
tf.to_float(new_min_x) / tf.to_float(image_width),
tf.to_float(new_max_y) / tf.to_float(image_height),
tf.to_float(new_max_x) / tf.to_float(image_width)])
# Copy the contents in the bbox and fill the old bbox location
# with gray (128).
bbox_content = image[shifted_min_y:shifted_max_y + 1,
shifted_min_x:shifted_max_x + 1, :]
def mask_and_add_image(
min_y_, min_x_, max_y_, max_x_, mask, content_tensor, image_):
"""Applies mask to bbox region in image then adds content_tensor to it."""
mask = tf.pad(mask,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]], constant_values=1)
content_tensor = tf.pad(content_tensor,
[[min_y_, (image_height - 1) - max_y_],
[min_x_, (image_width - 1) - max_x_],
[0, 0]], constant_values=0)
return image_ * mask + content_tensor
# Zero out original bbox location.
mask = tf.zeros_like(image)[min_y:max_y + 1, min_x:max_x + 1, :]
grey_tensor = tf.zeros_like(mask) + replace[0]
image = mask_and_add_image(min_y, min_x, max_y, max_x, mask,
grey_tensor, image)
# Fill in bbox content to new bbox location.
mask = tf.zeros_like(bbox_content)
image = mask_and_add_image(new_min_y, new_min_x, new_max_y, new_max_x, mask,
bbox_content, image)
return image, new_bbox
#
def _clip_bbox(min_y, min_x, max_y, max_x):
"""Clip bounding box coordinates between 0 and 1.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
Returns:
Clipped coordinate values between 0 and 1.
"""
if isinstance(min_y, float):
min_y = np.clip(min_y, 0.0, 1.0)
min_x = np.clip(min_x, 0.0, 1.0)
max_y = np.clip(max_y, 0.0, 1.0)
max_x = np.clip(max_x, 0.0, 1.0)
else:
min_y = tf.clip_by_value(min_y, 0.0, 1.0)
min_x = tf.clip_by_value(min_x, 0.0, 1.0)
max_y = tf.clip_by_value(max_y, 0.0, 1.0)
max_x = tf.clip_by_value(max_x, 0.0, 1.0)
return min_y, min_x, max_y, max_x
#
def _check_bbox_area(min_y, min_x, max_y, max_x, delta=0.05):
"""Adjusts bbox coordinates to make sure the area is > 0.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
delta: Float, this is used to create a gap of size 2 * delta between
bbox min/max coordinates that are the same on the boundary.
This prevents the bbox from having an area of zero.
Returns:
Tuple of new bbox coordinates between 0 and 1 that will now have a
guaranteed area > 0.
"""
height = max_y - min_y
width = max_x - min_x
def _adjust_bbox_boundaries(min_coord, max_coord):
# Make sure max is never 0 and min is never 1.
if isinstance(min_coord, float):
max_coord = max(max_coord, 0.0 + delta)
min_coord = max(min_coord, 1.0 - delta)
else:
max_coord = tf.maximum(max_coord, 0.0 + delta)
min_coord = tf.minimum(min_coord, 1.0 - delta)
return min_coord, max_coord
if isinstance(min_y, float):
if height == 0:
min_y, max_y = _adjust_bbox_boundaries(min_y, max_y)
if width == 0:
min_x, max_x = _adjust_bbox_boundaries(min_x, max_x)
else:
min_y, max_y = tf.cond(tf.equal(height, 0.0),
lambda: _adjust_bbox_boundaries(min_y, max_y),
lambda: (min_y, max_y))
min_x, max_x = tf.cond(tf.equal(width, 0.0),
lambda: _adjust_bbox_boundaries(min_x, max_x),
lambda: (min_x, max_x))
return min_y, min_x, max_y, max_x
def _scale_bbox_only_op_probability(prob):
"""Reduce the probability of the bbox-only operation.
Probability is reduced so that we do not distort the content of too many
bounding boxes that are close to each other. The value of 3.0 was a chosen
hyper parameter when designing the autoaugment algorithm that we found
empirically to work well.
Args:
prob: Float that is the probability of applying the bbox-only operation.
Returns:
Reduced probability.
"""
# ===================
# return prob
return prob / 3.0
#
def _apply_bbox_augmentation(image, bbox, augmentation_func, *args):
"""Applies augmentation_func to the subsection of image indicated by bbox.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where the bbox location in the image will
have `ugmentation_func applied to it.
"""
if isinstance(image, np.ndarray):
image_height = float(image.shape[0])
image_width = float(image.shape[1])
min_y = (image_height * bbox[0]).astype(np.int32)
min_x = (image_width * bbox[1]).astype(np.int32)
max_y = (image_height * bbox[2]).astype(np.int32)
max_x = (image_width * bbox[3]).astype(np.int32)
image_height = int(image_height)
image_width = int(image_width)
# Clip to be sure the max values do not fall out of range.
max_y = np.minimum(max_y, image_height - 1)
max_x = np.minimum(max_x, image_width - 1)
# Get the sub-tensor that is the image within the bounding box region.
bbox_content = image[min_y:max_y + 1, min_x:max_x + 1, :]
if bbox_content.shape[0] == 0 or bbox_content.shape[1] == 0:
return image
# Apply the augmentation function to the bbox portion of the image.
augmented_bbox_content = augmentation_func(bbox_content, *args)
# Pad the augmented_bbox_content and the mask to match the shape of original
# image.
augmented_bbox_content = np.pad(augmented_bbox_content,
[[min_y, (image_height - 1) - max_y], [min_x, (image_width - 1) - max_x],
[0, 0]], 'constant')
# Create a mask that will be used to zero out a part of the original image.
mask_tensor = np.zeros_like(bbox_content)
mask_tensor = np.pad(mask_tensor,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]],
'constant',
constant_values=1)
else:
image_height = tf.to_float(tf.shape(image)[0])
image_width = tf.to_float(tf.shape(image)[1])
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = tf.to_int32(image_height * bbox[2])
max_x = tf.to_int32(image_width * bbox[3])
image_height = tf.to_int32(image_height)
image_width = tf.to_int32(image_width)
# Clip to be sure the max values do not fall out of range.
max_y = tf.minimum(max_y, image_height - 1)
max_x = tf.minimum(max_x, image_width - 1)
# Get the sub-tensor that is the image within the bounding box region.
bbox_content = image[min_y:max_y + 1, min_x:max_x + 1, :]
# Apply the augmentation function to the bbox portion of the image.
augmented_bbox_content = augmentation_func(bbox_content, *args)
# Pad the augmented_bbox_content and the mask to match the shape of original
# image.
augmented_bbox_content = tf.pad(augmented_bbox_content,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]])
# Create a mask that will be used to zero out a part of the original image.
mask_tensor = tf.zeros_like(bbox_content)
mask_tensor = tf.pad(mask_tensor,
[[min_y, (image_height - 1) - max_y],
[min_x, (image_width - 1) - max_x],
[0, 0]],
constant_values=1)
# Replace the old bbox content with the new augmented content.
image = image * mask_tensor + augmented_bbox_content
return image
#
def _concat_bbox(bbox, bboxes):
"""Helper function that concates bbox to bboxes along the first dimension."""
# Note if all elements in bboxes are -1 (_INVALID_BOX), then this means
# we discard bboxes and start the bboxes Tensor with the current bbox.
if isinstance(bbox, np.ndarray):
bboxes_sum_check = np.sum(bboxes, axis=-1)
bbox = bbox[np.newaxis, ...]
# This check will be true when it is an _INVALID_BOX
if np.any(bboxes_sum_check == -7.0):
bboxes = bbox
else:
bboxes = np.concatenate([bboxes, bbox], axis=0)
else:
bboxes_sum_check = tf.reduce_sum(bboxes)
bbox = tf.expand_dims(bbox, 0)
# This check will be true when it is an _INVALID_BOX
bboxes = tf.cond(tf.equal(bboxes_sum_check, -4.0),
lambda: bbox,
lambda: tf.concat([bboxes, bbox], 0))
return bboxes
#
def _apply_bbox_augmentation_wrapper(image, bbox, new_bboxes, prob,
augmentation_func, func_changes_bbox,
*args):
"""Applies _apply_bbox_augmentation with probability prob.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
new_bboxes: 2D Tensor that is a list of the bboxes in the image after they
have been altered by aug_func. These will only be changed when
func_changes_bbox is set to true. Each bbox has 4 elements
(min_y, min_x, max_y, max_x) of type float that are the normalized
bbox coordinates between 0 and 1.
prob: Float that is the probability of applying _apply_bbox_augmentation.
augmentation_func: Augmentation function that will be applied to the
subsection of image.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A tuple. Fist element is a modified version of image, where the bbox
location in the image will have augmentation_func applied to it if it is
chosen to be called with probability `prob`. The second element is a
Tensor of Tensors of length 4 that will contain the altered bbox after
applying augmentation_func.
"""
if isinstance(image, np.ndarray):
should_apply_op = np.floor(np.random.uniform() + prob).astype(np.bool_)
if func_changes_bbox:
if should_apply_op:
augmented_image, bbox = augmentation_func(image, bbox, *args)
augmented_image, bbox = image, bbox
else:
if should_apply_op:
augmented_image = _apply_bbox_augmentation(image, bbox, augmentation_func, *args)
else:
augmented_image = image
new_bboxes = _concat_bbox(bbox, new_bboxes)
else:
should_apply_op = tf.cast(
tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
if func_changes_bbox:
augmented_image, bbox = tf.cond(
should_apply_op,
lambda: augmentation_func(image, bbox, *args),
lambda: (image, bbox))
else:
augmented_image = tf.cond(
should_apply_op,
lambda: _apply_bbox_augmentation(image, bbox, augmentation_func, *args),
lambda: image)
new_bboxes = _concat_bbox(bbox, new_bboxes)
return augmented_image, new_bboxes
#
def _apply_multi_bbox_augmentation(image, bboxes, prob, aug_func,
func_changes_bbox, *args):
"""Applies aug_func to the image for each bbox in bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
prob: Float that is the probability of applying aug_func to a specific
bounding box within the image.
aug_func: Augmentation function that will be applied to the
subsections of image indicated by the bbox values in bboxes.
func_changes_bbox: Boolean. Does augmentation_func return bbox in addition
to image.
*args: Additional parameters that will be passed into augmentation_func
when it is called.
Returns:
A modified version of image, where each bbox location in the image will
have augmentation_func applied to it if it is chosen to be called with
probability prob independently across all bboxes. Also the final
bboxes are returned that will be unchanged if func_changes_bbox is set to
false and if true, the new altered ones will be returned.
"""
# Will keep track of the new altered bboxes after aug_func is repeatedly
# applied. The -1 values are a dummy value and this first Tensor will be
# removed upon appending the first real bbox.
if isinstance(image, np.ndarray):
new_bboxes = np.array(_INVALID_BOX)
if bboxes.shape[0] == 0:
bboxes = np.array(_INVALID_BOX)
bboxes = bboxes.reshape(-1, 4)
# pylint:disable=g-long-lambda
# pylint:disable=line-too-long
wrapped_aug_func = lambda _image, bbox, _new_bboxes: _apply_bbox_augmentation_wrapper(
_image, bbox, _new_bboxes, prob, aug_func, func_changes_bbox, *args)
# pylint:enable=g-long-lambda
# pylint:enable=line-too-long
# Setup the while_loop.
num_bboxes = bboxes.shape[0] # We loop until we go over all bboxes.
idx = 0 # Counter for the while loop.
# Conditional function when to end the loop once we go over all bboxes
# images_and_bboxes contain (_image, _new_bboxes)
cond = lambda _idx, _images_and_bboxes: _idx < num_bboxes
# Shuffle the bboxes so that the augmentation order is not deterministic if
# we are not changing the bboxes with aug_func.
if not func_changes_bbox:
np.random.shuffle(bboxes)
loop_bboxes = bboxes
else:
loop_bboxes = bboxes
# Main function of while_loop where we repeatedly apply augmentation on the
# bboxes in the image.
# pylint:disable=g-long-lambda
body = lambda _idx, _images_and_bboxes: [
_idx + 1, wrapped_aug_func(_images_and_bboxes[0],
loop_bboxes[_idx],
_images_and_bboxes[1])]
# pylint:enable=g-long-lambda
while cond(idx, (image, new_bboxes)):
idx, (image, new_bboxes) = body(idx, (image, new_bboxes))
# Either return the altered bboxes or the original ones depending on if
# we altered them in anyway.
if func_changes_bbox:
final_bboxes = new_bboxes
else:
final_bboxes = bboxes
else:
new_bboxes = tf.constant(_INVALID_BOX)
# If the bboxes are empty, then just give it _INVALID_BOX. The result
# will be thrown away.
bboxes = tf.cond(tf.equal(tf.size(bboxes), 0),
lambda: tf.constant(_INVALID_BOX),
lambda: bboxes)
# 确保 bbox 形状
bboxes = tf.ensure_shape(bboxes, (None, 4))
# pylint:disable=g-long-lambda
# pylint:disable=line-too-long
wrapped_aug_func = lambda _image, bbox, _new_bboxes: _apply_bbox_augmentation_wrapper(
_image, bbox, _new_bboxes, prob, aug_func, func_changes_bbox, *args)
# pylint:enable=g-long-lambda
# pylint:enable=line-too-long
# Setup the while_loop.
num_bboxes = tf.shape(bboxes)[0] # We loop until we go over all bboxes.
idx = tf.constant(0) # Counter for the while loop.
# Conditional function when to end the loop once we go over all bboxes
# images_and_bboxes contain (_image, _new_bboxes)
cond = lambda _idx, _images_and_bboxes: tf.less(_idx, num_bboxes)
# Shuffle the bboxes so that the augmentation order is not deterministic if
# we are not changing the bboxes with aug_func.
if not func_changes_bbox:
loop_bboxes = tf.random.shuffle(bboxes)
# ===========================
# from tensorflow.python.ops import gen_random_ops
# from tensorflow.python.framework import random_seed
# seed1, seed2 = random_seed.get_seed(None)
# loop_bboxes = gen_random_ops.random_shuffle(
# bboxes, seed=seed1, seed2=seed2, name=None)
# ===========================
else:
loop_bboxes = bboxes
# Main function of while_loop where we repeatedly apply augmentation on the
# bboxes in the image.
# pylint:disable=g-long-lambda
body = lambda _idx, _images_and_bboxes: [
_idx + 1, wrapped_aug_func(_images_and_bboxes[0],
loop_bboxes[_idx],
_images_and_bboxes[1])]
# pylint:enable=g-long-lambda
_, (image, new_bboxes) = tf.while_loop(
cond, body, [idx, (image, new_bboxes)],
shape_invariants=[idx.get_shape(),
(image.get_shape(), tf.TensorShape([None, 4]))])
# Either return the altered bboxes or the original ones depending on if
# we altered them in anyway.
if func_changes_bbox:
final_bboxes = new_bboxes
else:
final_bboxes = bboxes
return image, final_bboxes
#
def _apply_multi_bbox_augmentation_wrapper(image, bboxes, prob, aug_func,
func_changes_bbox, *args):
"""Checks to be sure num bboxes > 0 before calling inner function."""
if isinstance(image, np.ndarray):
num_bboxes = bboxes.shape[0]
if num_bboxes == 0:
image, bboxes = image, bboxes
else:
image, bboxes = _apply_multi_bbox_augmentation(
image, bboxes, prob, aug_func, func_changes_bbox, *args)
else:
num_bboxes = tf.shape(bboxes)[0]
image, bboxes = tf.cond(
tf.equal(num_bboxes, 0),
lambda: (image, bboxes),
# pylint:disable=g-long-lambda
lambda: _apply_multi_bbox_augmentation(
image, bboxes, prob, aug_func, func_changes_bbox, *args))
# pylint:enable=g-long-lambda
return image, bboxes
def rotate_only_bboxes(image, bboxes, prob, degrees, replace):
"""Apply rotate to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, rotate, func_changes_bbox, degrees, replace)
def shear_x_only_bboxes(image, bboxes, prob, level, replace):
"""Apply shear_x to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, shear_x, func_changes_bbox, level, replace)
def shear_y_only_bboxes(image, bboxes, prob, level, replace):
"""Apply shear_y to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, shear_y, func_changes_bbox, level, replace)
#目标框填充
def translate_x_only_bboxes(image, bboxes, prob, pixels, replace):
"""Apply translate_x to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_x, func_changes_bbox, pixels, replace)
# 目标框填充
def translate_y_only_bboxes(image, bboxes, prob, pixels, replace):
"""Apply translate_y to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, translate_y, func_changes_bbox, pixels, replace)
#
def flip_only_bboxes(image, bboxes, prob):
"""Apply flip_lr to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
if isinstance(image, np.ndarray):
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, np.fliplr, func_changes_bbox)
else:
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, tf.image.flip_left_right, func_changes_bbox)
def solarize_only_bboxes(image, bboxes, prob, threshold):
"""Apply solarize to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, solarize, func_changes_bbox, threshold)
def equalize_only_bboxes(image, bboxes, prob):
"""Apply equalize to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, equalize, func_changes_bbox)
def cutout_only_bboxes(image, bboxes, prob, pad_size, replace):
"""Apply cutout to each bbox in the image with probability prob."""
func_changes_bbox = False
prob = _scale_bbox_only_op_probability(prob)
return _apply_multi_bbox_augmentation_wrapper(
image, bboxes, prob, cutout, func_changes_bbox, pad_size, replace)
#
def _rotate_bbox(bbox, image_height, image_width, degrees):
"""Rotates the bbox coordinated by degrees.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
Returns:
A tensor of the same shape as bbox, but now with the rotated coordinates.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
if isinstance(bbox, np.ndarray):
image_height, image_width = (
float(image_height), float(image_width))
# Translate the bbox to the center of the image and turn the normalized 0-1
# coordinates to absolute pixel locations.
# Y coordinates are made negative as the y axis of images goes down with
# increasing pixel values, so we negate to make sure x axis and y axis points
# are in the traditionally positive direction.
min_y = -(image_height * (bbox[0] - 0.5)).astype(np.int32)
min_x = (image_width * (bbox[1] - 0.5)).astype(np.int32)
max_y = -(image_height * (bbox[2] - 0.5)).astype(np.int32)
max_x = (image_width * (bbox[3] - 0.5)).astype(np.int32)
coordinates = np.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = coordinates.astype(np.float32)
# Rotate the coordinates according to the rotation matrix clockwise if
# radians is positive, else negative
rotation_matrix = np.stack(
[[np.cos(radians), np.sin(radians)],
[-np.sin(radians), np.cos(radians)]])
new_coords = np.dot(rotation_matrix, np.transpose(coordinates)).astype(np.int32)
# Find min/max values and convert them back to normalized 0-1 floats.
min_y = -(float(np.max(new_coords[0, :])) / image_height - 0.5)
min_x = float(np.min(new_coords[1, :])) / image_width + 0.5
max_y = -(float(np.min(new_coords[0, :])) / image_height - 0.5)
max_x = float(np.max(new_coords[1, :])) / image_width + 0.5
if max_x < 0. or min_x > 1.0 or max_y < 0. or min_y > 1.0:
return None
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return np.stack([min_y, min_x, max_y, max_x])
else:
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Translate the bbox to the center of the image and turn the normalized 0-1
# coordinates to absolute pixel locations.
# Y coordinates are made negative as the y axis of images goes down with
# increasing pixel values, so we negate to make sure x axis and y axis points
# are in the traditionally positive direction.
min_y = -tf.to_int32(image_height * (bbox[0] - 0.5))
min_x = tf.to_int32(image_width * (bbox[1] - 0.5))
max_y = -tf.to_int32(image_height * (bbox[2] - 0.5))
max_x = tf.to_int32(image_width * (bbox[3] - 0.5))
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Rotate the coordinates according to the rotation matrix clockwise if
# radians is positive, else negative
rotation_matrix = tf.stack(
[[tf.cos(radians), tf.sin(radians)],
[-tf.sin(radians), tf.cos(radians)]])
new_coords = tf.cast(
tf.matmul(rotation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to normalized 0-1 floats.
min_y = -(tf.to_float(tf.reduce_max(new_coords[0, :])) / image_height - 0.5)
min_x = tf.to_float(tf.reduce_min(new_coords[1, :])) / image_width + 0.5
max_y = -(tf.to_float(tf.reduce_min(new_coords[0, :])) / image_height - 0.5)
max_x = tf.to_float(tf.reduce_max(new_coords[1, :])) / image_width + 0.5
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
# 旋转图片
def rotate_with_bboxes(image, bboxes, degrees, replace):
"""Equivalent of PIL Rotate that rotates the image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of rotating
image by degrees. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the rotated image.
"""
# Rotate the image.
image = rotate(image, degrees, replace)
# Convert bbox coordinates to pixel values.
if isinstance(image, np.ndarray):
image_height = image.shape[0]
image_width = image.shape[1]
else:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_rotate_bbox = lambda bbox: _rotate_bbox(
bbox, image_height, image_width, degrees)
# pylint:enable=g-long-lambda
if isinstance(image, np.ndarray):
bboxes = np.array([box for box in list(map(wrapped_rotate_bbox, bboxes)) if box is not None])
else:
bboxes = tf.map_fn(wrapped_rotate_bbox, bboxes)
return image, bboxes
#
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
if isinstance(image, np.ndarray):
replace = tuple(replace)
image = Image.fromarray(image)
image = image.transform(image.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), fillcolor=replace)
return np.array(image)
else:
image = tf.contrib.image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
#
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
if isinstance(image, np.ndarray):
replace = tuple(replace)
image = Image.fromarray(image)
image = image.transform(image.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), fillcolor=replace)
return np.array(image)
else:
image = tf.contrib.image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
#
def _shift_bbox(bbox, image_height, image_width, pixels, shift_horizontal):
"""Shifts the bbox coordinates by pixels.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, width of the image.
pixels: An int. How many pixels to shift the bbox.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
if isinstance(bbox, np.ndarray):
# Convert bbox to integer pixel locations.
min_y = int(image_height * bbox[0])
min_x = int(image_width * bbox[1])
max_y = int(image_height * bbox[2])
max_x = int(image_width * bbox[3])
if shift_horizontal:
min_x = np.maximum(0, min_x - pixels)
max_x = np.minimum(image_width, max_x - pixels)
else:
min_y = np.maximum(0, min_y - pixels)
max_y = np.minimum(image_height, max_y - pixels)
# Convert bbox back to floats.
min_y = float(min_y) / float(image_height)
min_x = float(min_x) / float(image_width)
max_y = float(max_y) / float(image_height)
max_x = float(max_x) / float(image_width)
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return np.stack([min_y, min_x, max_y, max_x])#, bbox[4], bbox[5], bbox[6]])
else:
pixels = tf.to_int32(pixels)
# Convert bbox to integer pixel locations.
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
if shift_horizontal:
min_x = tf.maximum(0, min_x - pixels)
max_x = tf.minimum(image_width, max_x - pixels)
else:
min_y = tf.maximum(0, min_y - pixels)
max_y = tf.minimum(image_height, max_y - pixels)
# Convert bbox back to floats.
min_y = tf.to_float(min_y) / tf.to_float(image_height)
min_x = tf.to_float(min_x) / tf.to_float(image_width)
max_y = tf.to_float(max_y) / tf.to_float(image_height)
max_x = tf.to_float(max_x) / tf.to_float(image_width)
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
# 图片位移
def translate_bbox(image, bboxes, pixels, replace, shift_horizontal):
"""Equivalent of PIL Translate in X/Y dimension that shifts image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pixels: An int. How many pixels to shift the image and bboxes
replace: A one or three value 1D tensor to fill empty pixels.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of translating
image by pixels. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the shifted image.
"""
# ===========================
import random
sign = random.randint(0,9)
if sign >=5 :
if isinstance(image, np.ndarray):
pixels = pixels
else:
pixels = tf.to_float(pixels)
else:
if isinstance(image, np.ndarray):
pixels = -pixels
else:
pixels = -tf.to_float(pixels)
# ===========================
if shift_horizontal:
image = translate_x(image, pixels, replace)
else:
image = translate_y(image, pixels, replace)
# Convert bbox coordinates to pixel values.
if isinstance(image, np.ndarray):
image_height = image.shape[0]
image_width = image.shape[1]
else:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shift_bbox = lambda bbox: _shift_bbox(
bbox, image_height, image_width, pixels, shift_horizontal)
# pylint:enable=g-long-lambda
if isinstance(image, np.ndarray):
bboxes = np.array([box for box in list(map(wrapped_shift_bbox, bboxes)) if box is not None])
else:
bboxes = tf.map_fn(wrapped_shift_bbox, bboxes)
return image, bboxes
# 水平斜切
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
if isinstance(image, np.ndarray):
replace = tuple(replace)
img = Image.fromarray(image)
img = img.transform(
img.size, Image.AFFINE, (1, level, 0, 0, 1, 0), Image.BICUBIC, fillcolor=replace)
return np.array(img)
else:
image = tf.contrib.image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
# 垂直斜切
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
if isinstance(image, np.ndarray):
replace = tuple(replace)
img = Image.fromarray(image)
img = img.transform(
img.size, Image.AFFINE, (1, 0, 0, level, 1, 0), Image.BICUBIC, fillcolor=replace)
return np.array(img)
else:
image = tf.contrib.image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def _shear_bbox(bbox, image_height, image_width, level, shear_horizontal):
"""Shifts the bbox according to how the image was sheared.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, height of the image.
level: Float. How much to shear the image.
shear_horizontal: If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
if isinstance(bbox, np.ndarray):
image_height, image_width = (
float(image_height), float(image_width))
# Change bbox coordinates to be pixels.
min_y = (image_height * bbox[0]).astype(np.int32)
min_x = (image_width * bbox[1]).astype(np.int32)
max_y = (image_height * bbox[2]).astype(np.int32)
max_x = (image_width * bbox[3]).astype(np.int32)
coordinates = np.stack([[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = coordinates.astype(np.float32)
# Shear the coordinates according to the translation matrix.
if shear_horizontal:
translation_matrix = np.stack(
[[1, 0], [-level, 1]])
else:
translation_matrix = np.stack(
[[1, -level], [0, 1]])
translation_matrix = translation_matrix.astype(np.float32)
new_coords = np.dot(translation_matrix, np.transpose(coordinates)).astype(np.int32)
# Find min/max values and convert them back to floats.
min_y = float(np.min(new_coords[0, :])) / image_height
min_x = float(np.min(new_coords[1, :])) / image_width
max_y = float(np.max(new_coords[0, :])) / image_height
max_x = float(np.max(new_coords[1, :])) / image_width
if max_x < 0. or min_x > 1.0 or max_y < 0. or min_y > 1.0:
return None
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return np.stack([min_y, min_x, max_y, max_x])
else:
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Change bbox coordinates to be pixels.
min_y = tf.to_int32(image_height * bbox[0])
min_x = tf.to_int32(image_width * bbox[1])
max_y = tf.to_int32(image_height * bbox[2])
max_x = tf.to_int32(image_width * bbox[3])
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = tf.cast(coordinates, tf.float32)
# Shear the coordinates according to the translation matrix.
if shear_horizontal:
translation_matrix = tf.stack(
[[1, 0], [-level, 1]])
else:
translation_matrix = tf.stack(
[[1, -level], [0, 1]])
translation_matrix = tf.cast(translation_matrix, tf.float32)
new_coords = tf.cast(
tf.matmul(translation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to floats.
min_y = tf.to_float(tf.reduce_min(new_coords[0, :])) / image_height
min_x = tf.to_float(tf.reduce_min(new_coords[1, :])) / image_width
max_y = tf.to_float(tf.reduce_max(new_coords[0, :])) / image_height
max_x = tf.to_float(tf.reduce_max(new_coords[1, :])) / image_width
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
# 斜切
def shear_with_bboxes(image, bboxes, level, replace, shear_horizontal):
"""Applies Shear Transformation to the image and shifts the bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
level: Float. How much to shear the image. This value will be between
-0.3 to 0.3.
replace: A one or three value 1D tensor to fill empty pixels.
shear_horizontal: Boolean. If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of shearing
image by level. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the sheared image.
"""
if shear_horizontal:
image = shear_x(image, level, replace)
else:
image = shear_y(image, level, replace)
# Convert bbox coordinates to pixel values.
if isinstance(image, np.ndarray):
image_height = image.shape[0]
image_width = image.shape[1]
# pylint:disable=g-long-lambda
wrapped_shear_bbox = lambda bbox: _shear_bbox(
bbox, image_height, image_width, level, shear_horizontal)
# pylint:enable=g-long-lambda
bboxes = np.array([box for box in list(map(wrapped_shear_bbox, bboxes)) if box is not None])
else:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shear_bbox = lambda bbox: _shear_bbox(
bbox, image_height, image_width, level, shear_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shear_bbox, bboxes)
return image, bboxes
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.to_float(tf.reduce_min(image))
hi = tf.to_float(tf.reduce_max(image))
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.to_float(im) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
# 锐化
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
if isinstance(image, np.ndarray):
if image.shape[0] == 0 or image.shape[1] == 0:
return image
img = Image.fromarray(image)
img = ImageEnhance.Sharpness(img).enhance(1 + factor * random.choice([-1, 1]))
return np.array(img)
else:
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant(
[[1, 1, 1], [1, 5, 1], [1, 1, 1]], dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', rate=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
# 色彩
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
if isinstance(image, np.ndarray):
img = Image.fromarray(image)
img = ImageOps.equalize(img)
return np.array(img)
else:
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
#
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
if isinstance(image, np.ndarray):
shape = image.shape
extended_channel = np.ones([shape[0], shape[1], 1], image.dtype)
extended = np.concat([image, extended_channel], 2)
else:
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
#
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
if isinstance(image, np.ndarray):
image_shape = image.shape
# Flatten the spatial dimensions.
flattened_image = image.reshape(-1, image_shape[2])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = np.concat([replace, np.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = np.where(
np.equal(alpha_channel, 0),
np.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = flattened_image.reshape(image_shape)
image = image[0:image_shape[0], 0:image_shape[1], 0:3]
else:
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, 3]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
#
def _cutout_inside_bbox(image, bbox, pad_fraction):
"""Generates cutout mask and the mean pixel value of the bbox.
First a location is randomly chosen within the image as the center where the
cutout mask will be applied. Note this can be towards the boundaries of the
image, so the full cutout mask may not be applied.
Args:
image: 3D uint8 Tensor.
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
pad_fraction: Float that specifies how large the cutout mask should be in
in reference to the size of the original bbox. If pad_fraction is 0.25,
then the cutout mask will be of shape
(0.25 * bbox height, 0.25 * bbox width).
Returns:
A tuple. Fist element is a tensor of the same shape as image where each
element is either a 1 or 0 that is used to determine where the image
will have cutout applied. The second element is the mean of the pixels
in the image where the bbox is located.
"""
if isinstance(image, np.ndarray):
image_height = image.shape[0]
image_width = image.shape[1]
# Transform from shape [1, 4] to [4].
bbox = np.squeeze(bbox)
min_y = (float(image_height) * bbox[0]).astype(np.int32)
min_x = (float(image_width) * bbox[1]).astype(np.int32)
max_y = (float(image_height) * bbox[2]).astype(np.int32)
max_x = (float(image_width) * bbox[3]).astype(np.int32)
# Calculate the mean pixel values in the bounding box, which will be used
# to fill the cutout region.
mean = np.mean(image[min_y:max_y + 1, min_x:max_x + 1], axis=(0, 1))
# Cutout mask will be size pad_size_heigh * 2 by pad_size_width * 2 if the
# region lies entirely within the bbox.
box_height = max_y - min_y + 1
box_width = max_x - min_x + 1
pad_size_height = (pad_fraction * (box_height / 2)).astype(np.int32)
pad_size_width = (pad_fraction * (box_width / 2)).astype(np.int32)
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = np.random.randint(low=min_y, high=max_y + 1)
cutout_center_width = np.random.randint(low=min_x, high=max_x + 1)
lower_pad = np.maximum(
0, cutout_center_height - pad_size_height)
upper_pad = np.maximum(
0, image_height - cutout_center_height - pad_size_height)
left_pad = np.maximum(
0, cutout_center_width - pad_size_width)
right_pad = np.maximum(
0, image_width - cutout_center_width - pad_size_width)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = np.pad(
np.zeros(cutout_shape, dtype=image.dtype),
padding_dims, 'constant', constant_values=1)
mask = mask[..., np.newaxis]
mask = np.tile(mask, [1, 1, 3])
else:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Transform from shape [1, 4] to [4].
bbox = tf.squeeze(bbox)
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
# Calculate the mean pixel values in the bounding box, which will be used
# to fill the cutout region.
mean = tf.reduce_mean(image[min_y:max_y + 1, min_x:max_x + 1],
reduction_indices=[0, 1])
# Cutout mask will be size pad_size_heigh * 2 by pad_size_width * 2 if the
# region lies entirely within the bbox.
box_height = max_y - min_y + 1
box_width = max_x - min_x + 1
pad_size_height = tf.to_int32(pad_fraction * (box_height / 2))
pad_size_width = tf.to_int32(pad_fraction * (box_width / 2))
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=min_y, maxval=max_y + 1,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=min_x, maxval=max_x + 1,
dtype=tf.int32)
lower_pad = tf.maximum(
0, cutout_center_height - pad_size_height)
upper_pad = tf.maximum(
0, image_height - cutout_center_height - pad_size_height)
left_pad = tf.maximum(
0, cutout_center_width - pad_size_width)
right_pad = tf.maximum(
0, image_width - cutout_center_width - pad_size_width)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, 2)
mask = tf.tile(mask, [1, 1, 3])
return mask, mean
#
def bbox_cutout(image, bboxes, pad_fraction, replace_with_mean):
"""Applies cutout to the image according to bbox information.
This is a cutout variant that using bbox information to make more informed
decisions on where to place the cutout mask.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pad_fraction: Float that specifies how large the cutout mask should be in
in reference to the size of the original bbox. If pad_fraction is 0.25,
then the cutout mask will be of shape
(0.25 * bbox height, 0.25 * bbox width).
replace_with_mean: Boolean that specified what value should be filled in
where the cutout mask is applied. Since the incoming image will be of
uint8 and will not have had any mean normalization applied, by default
we set the value to be 128. If replace_with_mean is True then we find
the mean pixel values across the channel dimension and use those to fill
in where the cutout mask is applied.
Returns:
A tuple. First element is a tensor of the same shape as image that has
cutout applied to it. Second element is the bboxes that were passed in
that will be unchanged.
"""
def apply_bbox_cutout(image, bboxes, pad_fraction):
"""Applies cutout to a single bounding box within image."""
# Choose a single bounding box to apply cutout to.
if isinstance(image, np.ndarray):
random_index = np.random.randint(low=0, high=bboxes.shape[0])
# Select the corresponding bbox and apply cutout.
chosen_bbox = bboxes[random_index]
mask, mean = _cutout_inside_bbox(image, chosen_bbox, pad_fraction)
# When applying cutout we either set the pixel value to 128 or to the mean
# value inside the bbox.
replace = mean if replace_with_mean else 128
# Apply the cutout mask to the image. Where the mask is 0 we fill it with
# `replace`.
image = np.where(
np.equal(mask, 0),
(np.ones_like(image, dtype=image.dtype) * replace).astype(image.dtype),
image)
else:
random_index = tf.random_uniform(
shape=[], maxval=tf.shape(bboxes)[0], dtype=tf.int32)
# Select the corresponding bbox and apply cutout.
chosen_bbox = tf.gather(bboxes, random_index)
mask, mean = _cutout_inside_bbox(image, chosen_bbox, pad_fraction)
# When applying cutout we either set the pixel value to 128 or to the mean
# value inside the bbox.
replace = mean if replace_with_mean else 128
# Apply the cutout mask to the image. Where the mask is 0 we fill it with
# `replace`.
image = tf.where(
tf.equal(mask, 0),
tf.cast(tf.ones_like(image, dtype=image.dtype) * replace,
dtype=image.dtype),
image)
return image
# Check to see if there are boxes, if so then apply boxcutout.
if isinstance(image, np.ndarray):
if bboxes.shape[0] == 0:
image = image
else:
image = apply_bbox_cutout(image, bboxes, pad_fraction)
else:
image = tf.cond(tf.equal(tf.size(bboxes), 0), lambda: image,
lambda: apply_bbox_cutout(image, bboxes, pad_fraction))
return image, bboxes
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'Cutout': cutout,
'BBox_Cutout': bbox_cutout,
'Rotate_BBox': rotate_with_bboxes,
# pylint:disable=g-long-lambda
'TranslateX_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=True),
'TranslateY_BBox': lambda image, bboxes, pixels, replace: translate_bbox(
image, bboxes, pixels, replace, shift_horizontal=False),
'ShearX_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=True),
'ShearY_BBox': lambda image, bboxes, level, replace: shear_with_bboxes(
image, bboxes, level, replace, shear_horizontal=False),
# pylint:enable=g-long-lambda
'Rotate_Only_BBoxes': rotate_only_bboxes,
'ShearX_Only_BBoxes': shear_x_only_bboxes,
'ShearY_Only_BBoxes': shear_y_only_bboxes,
'TranslateX_Only_BBoxes': translate_x_only_bboxes,
'TranslateY_Only_BBoxes': translate_y_only_bboxes,
'Flip_Only_BBoxes': flip_only_bboxes,
'Solarize_Only_BBoxes': solarize_only_bboxes,
'Equalize_Only_BBoxes': equalize_only_bboxes,
'Cutout_Only_BBoxes': cutout_only_bboxes,
}
# ===========================
# def _randomly_negate_tensor(tensor):
# """With 50% prob turn the tensor negative."""
# should_flip = tf.cast(tf.floor(tf.random_uniform([]) + 0.5), tf.bool)
# final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
# return final_tensor
# ===========================
def _rotate_level_to_arg(level):
level = (level / _MAX_LEVEL) * 30.
# ===========================
# level = _randomly_negate_tensor(level)
# ===========================
return (level,)
def _shrink_level_to_arg(level):
"""Converts level to ratio by which we shrink the image content."""
if level == 0:
return (1.0,) # if level is zero, do not shrink the image
# Maximum shrinking ratio is 2.9.
level = 2. / (_MAX_LEVEL / level) + 0.9
return (level,)
def _enhance_level_to_arg(level):
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level / _MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
# ===========================
# level = _randomly_negate_tensor(level)
# ===========================
return (level,)
# 位移
def _translate_level_to_arg(level, translate_const):
level = (level / _MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
# ===========================
# level = _randomly_negate_tensor(level) # windows 有 bug?
# ===========================
return (level,)
def _bbox_cutout_level_to_arg(level, hparams):
cutout_pad_fraction = (level / _MAX_LEVEL) * hparams.cutout_max_pad_fraction
return (cutout_pad_fraction,
hparams.cutout_bbox_replace_with_mean)
def level_to_arg(hparams):
return {
'AutoContrast': lambda level: (),
'Equalize': lambda level: (),
'Posterize': lambda level: (int((level / _MAX_LEVEL) * 4),),
'Solarize': lambda level: (int((level / _MAX_LEVEL) * 256),),
'SolarizeAdd': lambda level: (int((level / _MAX_LEVEL) * 110),),
'Color': _enhance_level_to_arg,
'Contrast': _enhance_level_to_arg,
'Brightness': _enhance_level_to_arg,
'Sharpness': _enhance_level_to_arg,
'Cutout': lambda level: (int((level / _MAX_LEVEL) * hparams.cutout_const),),
# pylint:disable=g-long-lambda
'BBox_Cutout': lambda level: _bbox_cutout_level_to_arg(
level, hparams),
'TranslateX_BBox': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
'TranslateY_BBox': lambda level: _translate_level_to_arg(
level, hparams.translate_const),
# pylint:enable=g-long-lambda
'ShearX_BBox': _shear_level_to_arg,
'ShearY_BBox': _shear_level_to_arg,
'Rotate_BBox': _rotate_level_to_arg,
'Rotate_Only_BBoxes': _rotate_level_to_arg,
'ShearX_Only_BBoxes': _shear_level_to_arg,
'ShearY_Only_BBoxes': _shear_level_to_arg,
# pylint:disable=g-long-lambda
'TranslateX_Only_BBoxes': lambda level: _translate_level_to_arg(
level, hparams.translate_bbox_const),
'TranslateY_Only_BBoxes': lambda level: _translate_level_to_arg(
level, hparams.translate_bbox_const),
# pylint:enable=g-long-lambda
'Flip_Only_BBoxes': lambda level: (),
'Solarize_Only_BBoxes': lambda level: (int((level / _MAX_LEVEL) * 256),),
'Equalize_Only_BBoxes': lambda level: (),
# pylint:disable=g-long-lambda
'Cutout_Only_BBoxes': lambda level: (
int((level / _MAX_LEVEL) * hparams.cutout_bbox_const),),
# pylint:enable=g-long-lambda
}
def bbox_wrapper(func):
"""Adds a bboxes function argument to func and returns unchanged bboxes."""
def wrapper(images, bboxes, *args, **kwargs):
return (func(images, *args, **kwargs), bboxes)
return wrapper
def _parse_policy_info(name, prob, level, replace_value, augmentation_hparams):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(augmentation_hparams)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
# pytype:disable=wrong-arg-types
if 'prob' in inspect.getargspec(func)[0]:
args = tuple([prob] + list(args))
# pytype:enable=wrong-arg-types
# Add in replace arg if it is required for the function that is being called.
if 'replace' in inspect.getargspec(func)[0]:
# Make sure replace is the final argument
assert 'replace' == inspect.getargspec(func)[0][-1]
args = tuple(list(args) + [replace_value])
# Add bboxes as the second positional argument for the function if it does
# not already exist.
if 'bboxes' not in inspect.getargspec(func)[0]:
func = bbox_wrapper(func)
return (func, prob, args)
def _apply_func_with_prob(func, image, args, prob, bboxes):
"""Apply `func` to image w/ `args` as input with probability `prob`."""
assert isinstance(args, tuple)
assert 'bboxes' == inspect.getargspec(func)[0][1]
# If prob is a function argument, then this randomness is being handled
# inside the function, so make sure it is always called.
if 'prob' in inspect.getargspec(func)[0]:
prob = 1.0
# Apply the function with probability `prob`.
# ===========================
import random
sign = random.uniform(0, 1)
if sign <= prob:
augmented_image, augmented_bboxes = func(image, bboxes, *args)
else:
augmented_image, augmented_bboxes = image, bboxes
# ===========================
# should_apply_op = tf.cast(
# tf.floor(tf.random_uniform([], dtype=tf.float32) + prob), tf.bool)
# augmented_image, augmented_bboxes = tf.cond(
# should_apply_op,
# lambda: func(image, bboxes, *args),
# lambda: (image, bboxes))
return augmented_image, augmented_bboxes
def select_and_apply_random_policy(policies, image, bboxes):
"""Select a random policy from `policies` and apply it to `image`."""
# ===========================
import random
sign = random.randint(0, len(policies)-1)
print(sign)
for (i, policy) in enumerate(policies):
if i == sign:
image, bboxes = policy(image, bboxes)
else:
image, bboxes = image, bboxes
# ===========================
# policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32)
# # Note that using tf.case instead of tf.conds would result in significantly
# # larger graphs and would even break export for some larger policies.
# for (i, policy) in enumerate(policies):
# image, bboxes = tf.cond(
# tf.equal(i, policy_to_select),
# lambda selected_policy=policy: selected_policy(image, bboxes),
# lambda: (image, bboxes))
return (image, bboxes)
def build_and_apply_nas_policy(policies, image, bboxes,
augmentation_hparams):
"""Build a policy from the given policies passed in and apply to image.
Args:
policies: list of lists of tuples in the form `(func, prob, level)`, `func`
is a string name of the augmentation function, `prob` is the probability
of applying the `func` operation, `level` is the input argument for
`func`.
image: tf.Tensor that the resulting policy will be applied to.
bboxes:
augmentation_hparams: Hparams associated with the NAS learned policy.
Returns:
A version of image that now has data augmentation applied to it based on
the `policies` pass into the function. Additionally, returns bboxes if
a value for them is passed in that is not None
"""
replace_value = [128, 128, 128]
# func is the string name of the augmentation function, prob is the
# probability of applying the operation and level is the parameter associated
# with the tf op.
# tf_policies are functions that take in an image and return an augmented
# image.
tf_policies = []
for policy in policies:
tf_policy = []
# Link string name to the correct python function and make sure the correct
# argument is passed into that function.
for policy_info in policy:
policy_info = list(policy_info) + [replace_value, augmentation_hparams]
tf_policy.append(_parse_policy_info(*policy_info))
# Now build the tf policy that will apply the augmentation procedue
# on image.
def make_final_policy(tf_policy_):
def final_policy(image_, bboxes_):
for func, prob, args in tf_policy_:
image_, bboxes_ = _apply_func_with_prob(
func, image_, args, prob, bboxes_)
return image_, bboxes_
return final_policy
tf_policies.append(make_final_policy(tf_policy))
augmented_images, augmented_bboxes = select_and_apply_random_policy(
tf_policies, image, bboxes)
# If no bounding boxes were specified, then just return the images.
return (augmented_images, augmented_bboxes)
# TODO(barretzoph): Add in ArXiv link once paper is out.
def distort_image_with_autoaugment(image, bboxes, augmentation_name):
"""Applies the AutoAugment policy to `image` and `bboxes`.
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
bboxes: `Tensor` of shape [N, 4] representing ground truth boxes that are
normalized between [0, 1].
augmentation_name: The name of the AutoAugment policy to use. The available
options are `v0`, `v1`, `v2`, `v3` and `test`. `v0` is the policy used for
all of the results in the paper and was found to achieve the best results
on the COCO dataset. `v1`, `v2` and `v3` are additional good policies
found on the COCO dataset that have slight variation in what operations
were used during the search procedure along with how many operations are
applied in parallel to a single image (2 vs 3).
Returns:
A tuple containing the augmented versions of `image` and `bboxes`.
"""
available_policies = {'v0': policy_v0, 'v1': policy_v1, 'v2': policy_v2,
'v3': policy_v3, 'test': policy_vtest}
if augmentation_name not in available_policies:
raise ValueError('Invalid augmentation_name: {}'.format(augmentation_name))
policy = available_policies[augmentation_name]()
# Hparams that will be used for AutoAugment.
augmentation_hparams = tf.contrib.training.HParams(
cutout_max_pad_fraction=0.75, cutout_bbox_replace_with_mean=False,
cutout_const=100, translate_const=250, cutout_bbox_const=50,
translate_bbox_const=120)
assert type(image) == type(bboxes)
return build_and_apply_nas_policy(policy, image, bboxes, augmentation_hparams)
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def mkLed():
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
datawidth = 32
addrwidth = 10
myaxi = vthread.AXIM(m, 'myaxi', clk, rst, datawidth)
ram_a = vthread.RAM(m, 'ram_a', clk, rst, datawidth, addrwidth)
ram_b = vthread.RAM(m, 'ram_b', clk, rst, datawidth, addrwidth)
ram_c = vthread.RAM(m, 'ram_c', clk, rst, datawidth, addrwidth)
strm = vthread.Stream(m, 'mystream', clk, rst)
a = strm.source('a') # 32-bit
b = strm.source('b') # 32-bit
a_list = strm.Split(a, width=10, point=0, signed=True) # 10-bit, (ceil(32/10)=4)-word
# a_list : [{{8{a[31]}}, a[31:20]}, a[19:10], a[9:0]]
b_list = strm.Split(b, width=10, point=0, signed=True) # 10-bit, (ceil(32/10)=4)-word
# b_list : [{{8{b[31]}}, b[31:20]}, b[19:10], b[9:0]]
c_list = [ai + bi for ai, bi in zip(a_list, b_list)] # SIMD-like addition
c = strm.Cat(*c_list) # 32-bit
strm.sink(c, 'c')
def comp_stream(size, offset):
strm.set_source('a', ram_a, offset, size)
strm.set_source('b', ram_b, offset, size)
strm.set_sink('c', ram_c, offset, size)
strm.run()
strm.join()
def comp_sequential(size, offset):
for i in range(size):
a = ram_a.read(i + offset)
b = ram_b.read(i + offset)
sum = 0
for j in range(0, 32, 10):
aj = (a >> j) & (2 ** 10 - 1)
bj = (b >> j) & (2 ** 10 - 1)
if j + 10 > 32:
pos = 9 - ((j + 10 - 32) % 10)
for k in range(j + 10 - 32):
aj = aj | ((aj & (2 ** pos)) << (k + 1))
bj = bj | ((bj & (2 ** pos)) << (k + 1))
cj = (aj + bj) & (2 ** 10 - 1)
sum = (cj << j) | sum
ram_c.write(i + offset, sum)
def check(size, offset_stream, offset_seq):
all_ok = True
for i in range(size):
st = ram_c.read(i + offset_stream)
sq = ram_c.read(i + offset_seq)
if vthread.verilog.NotEql(st, sq):
all_ok = False
print(i, st, sq)
if all_ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
def comp(size):
# stream
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
for i in range(size):
ram_a.write(i + offset, -i)
ram_b.write(i + offset, -i + 10)
comp_stream(size, offset)
myaxi.dma_write(ram_c, offset, 1024, size)
# sequential
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
for i in range(size):
ram_a.write(i + offset, -i)
ram_b.write(i + offset, -i + 10)
comp_sequential(size, offset)
myaxi.dma_write(ram_c, offset, 1024 * 2, size)
# verification
check(size, 0, offset)
vthread.finish()
th = vthread.Thread(m, 'th_comp', clk, rst, comp)
fsm = th.start(32)
return m
def mkTest(memimg_name=None):
m = Module('test')
# target instance
led = mkLed()
# copy paras and ports
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memimg_name=memimg_name)
memory.connect(ports, 'myaxi')
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
#simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'iverilog' or (simtype == 'verilator' and lines[-1].startswith('-')):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
|
#!/usr/bin/env python
# File: get_outliers.py
# Created on: Wed Feb 20 14:12:45 2013
# Last Change: Thu Feb 21 16:02:23 2013
# Purpose of script: <+INSERT+>
# Author: Steven Boada
from mk_galaxy_struc import mk_galaxy_struc
from random import choice
galaxies = mk_galaxy_struc()
galaxies = filter(lambda galaxy: galaxy.ston_I >30., galaxies)
high_group = [galaxy.ID for galaxy in galaxies if galaxy.ICD_IH > 0.25]
group = [galaxy.ID for galaxy in galaxies if 0.1 > galaxy.ICD_IH > 0 and
galaxy.field ==1]
low_group =[choice(group) for i in range(len(high_group))]
with open('outlierstamps.sh','wt') as f1:
f1.writelines('#/bin/bash\n')
f1.writelines('ds9 ')
base1 = './GSD_IJH_20kpc/'
base2 = './GSD_IaH_colormaps/'
base3 = './GSD_SEGS/'
for ID in low_group:
f1.writelines('-frame new -file ')
f1.writelines(base2+'GSD_'+str(int(ID))+'I_a_H.fits ')
for ID in low_group:
f1.writelines('-frame new -file ')
f1.writelines(base3+'GSD_'+str(int(ID))+'_seg.fits ')
for ID in low_group:
f1.writelines('-rgb ')
f1.writelines('-blue '+base1+'GSD_'+str(int(ID))+'_I.fits ')
f1.writelines('-green '+base1+'GSD_'+str(int(ID))+'_J.fits ')
f1.writelines('-red '+base1+'GSD_'+str(int(ID))+'_H.fits ')
for ID in high_group:
f1.writelines('-frame new -file ')
f1.writelines(base2+'GSD_'+str(int(ID))+'I_a_H.fits ')
for ID in high_group:
f1.writelines('-frame new -file ')
f1.writelines(base3+'GSD_'+str(int(ID))+'_seg.fits ')
for ID in high_group:
f1.writelines('-rgb ')
f1.writelines('-blue '+base1+'GSD_'+str(int(ID))+'_I.fits ')
f1.writelines('-green '+base1+'GSD_'+str(int(ID))+'_J.fits ')
f1.writelines('-red '+base1+'GSD_'+str(int(ID))+'_H.fits ')
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import django
import djongo
from django.conf import settings
settings.configure(DEBUG=True, STATIC_URL='/')
django.setup()
# -- Project information -----------------------------------------------------
project = 'djongo'
copyright = '2018, nesdis'
author = 'nesdis'
# The short X.Y version
version = djongo.__version__
# The full version, including alpha/beta/rc tags
release = djongo.__version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'djongodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'djongo.tex', 'djongo Documentation',
'nesdis', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djongo', 'djongo Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'djongo', 'djongo Documentation',
author, 'djongo', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
|
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from .registry import register
from .layers import create as create_layer
from .layers.activations import create as create_act
from .layers import SpectralConv2d
__all__ = ['ResNeXtER', 'resnexter18', 'resnexter34', 'resnexter50', 'resnexter101', 'resnexter152']
def dwconv7x7(filters):
return nn.Conv2d(filters, filters, kernel_size=7,
padding=3, groups=filters, bias=False)
def conv1x1(in_filters, out_filters):
return nn.Linear(in_filters, out_filters)
class Block(nn.Module):
expansion = 1
def __init__(self, filters, drop_path=0, norm_layer=None, scale_init=1e-5):
super().__init__()
if norm_layer is None:
norm_layer = nn.InstanceNorm2d
self.dwconv = dwconv7x7(filters)
self.norm = norm_layer(filters)
self.act = nn.GELU()
self.pwconv1 = conv1x1(filters, filters * 4)
self.pwconv2 = conv1x1(filters * 4, filters)
self.scale = create_layer("scale", init_value=scale_init)
self.balance = create_layer("balance")
self.drop = create_layer("drop_path", drop_prob=drop_path)
self.attn = create_layer("pixel_attn", f_in=filters, dropout=0.1)
def forward(self, x):
out = self.dwconv(x)
out = out.permute(0,2,3,1)
out = self.norm(out)
out = self.pwconv1(out)
out = self.act(out)
out = self.pwconv2(out)
out = self.scale(out)
out = out.permute(0,3,1,2)
out = self.balance(out, self.attn(out))
out = x + self.drop(out)
return out
@register("resnexter")
class ResNeXtER(nn.Module):
def __init__(self, num_colors=3, layers=[2,2,2], zero_init_residual=False,
num_filters=128, out_filters=64, norm_layer=None):
super().__init__()
self.out_dim = out_filters
if norm_layer is None:
norm_layer = partial(nn.InstanceNorm2d, affine=True)
else:
norm_layer = eval(norm_layer) if isinstance(norm_layer, str) else norm_layer
self._norm_layer = norm_layer
self.filters = num_filters
self.head = nn.Sequential(
nn.Conv2d(num_colors, self.filters, 3),
self._norm_layer(self.filters),
nn.GELU(),
create_layer("balanced_attn", in_planes=self.filters)
)
self.norm = norm_layer(self.filters)
self.act = nn.GELU()
self.attn = create_layer("balanced_attn", in_planes=self.filters)
self.layer1 = self._make_layer(Block, self.filters, layers[0])
self.layer2 = self._make_layer(Block, self.filters * 2, layers[1])
self.layer3 = self._make_layer(Block, self.filters * 4, layers[2])
self.fuse1 = nn.Conv2d(self.filters * 7, self.filters, kernel_size=7, padding=3)
self.fuse2 = nn.Conv2d(self.filters * 7, self.filters, kernel_size=5, padding=2)
self.fuse3 = nn.Conv2d(self.filters * 7, self.filters, kernel_size=3, padding=1)
self.balance1 = create_layer("balance")
self.balance2 = create_layer("balance")
self.balance3 = create_layer("balance")
self.gff = nn.Conv2d(self.filters, out_filters, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if zero_init_residual:
for m in self.modules():
if isinstance(m, Block):
nn.init.constant_(m.norm.weight, 0)
def _make_layer(self, block, filters, blocks,
drop_path=0.1, dropout=0.2):
"""
:param last_relu: in metric learning paradigm, the final relu is removed (last_relu = False)
"""
norm_layer = nn.InstanceNorm2d
layers = list()
layers.append(block(filters, drop_path=drop_path, norm_layer=norm_layer))
layers.append(SpectralConv2d(filters, filters, 12, 12))
for i in range(1, blocks):
layers.append(block(filters, drop_path=drop_path, norm_layer=norm_layer))
layers.append(SpectralConv2d(filters, filters, 12, 12))
return nn.Sequential(*layers)
def forward(self, x):
x = self.head(x)
c1 = self.layer1(x)
c2 = self.layer2(torch.cat([x,c1],dim=1))
c3 = self.layer3(torch.cat([x,c1,c2],dim=1))
g = torch.cat([c1, c2, c3], dim=1)
f1 = self.fuse1(g)
f2 = self.fuse2(g)
f3 = self.fuse3(g)
f = self.balance1(self.balance2(x, f1), self.balance3(f2, f3))
out = self.gff(f)
return out
|
import warnings
import numpy as np
import pandas as pd
import xarray as xr
import geopandas
from rasterio import features
from affine import Affine
from python.aux.utils import calc_area
np.seterr(divide='ignore', invalid='ignore')
"""Contains methods for the flowmodel (transport model & local model)"""
def get_mask_of_basin(da, kw_basins='Danube'):
"""Return a mask where all points outside the selected basin are False.
Parameters:
-----------
da : xr.DataArray
contains the coordinates
kw_basins : str
identifier of the basin in the basins dataset
"""
def transform_from_latlon(lat, lon):
lat = np.asarray(lat)
lon = np.asarray(lon)
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale
def rasterize(shapes, coords, fill=np.nan, **kwargs):
"""Rasterize a list of (geometry, fill_value) tuples onto the given
xray coordinates. This only works for 1d latitude and longitude
arrays.
"""
transform = transform_from_latlon(coords['latitude'], coords['longitude'])
out_shape = (len(coords['latitude']), len(coords['longitude']))
raster = features.rasterize(shapes, out_shape=out_shape,
fill=fill, transform=transform,
dtype=float, **kwargs)
return xr.DataArray(raster, coords=coords, dims=('latitude', 'longitude'))
# this shapefile is from natural earth data
# http://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-1-states-provinces/
shp2 = '/raid/home/srvx7/lehre/users/a1303583/ipython/ml_flood/data/' \
+ 'drainage_basins/Major_Basins_of_the_World.shp'
basins = geopandas.read_file(shp2)
single_basin = basins.query("NAME == '"+kw_basins+"'").reset_index(drop=True)
shapes = [(shape, n) for n, shape in enumerate(single_basin.geometry)]
da['basins'] = rasterize(shapes, da.coords)
da = da.basins == 0
return da.drop('basins') # the basins coordinate is not used anymore from here on
def select_upstream(mask_river_in_catchment, lat, lon, basin='Danube'):
"""Return a mask containing upstream river gridpoints.
Parameters
----------
mask_river_in_catchment : xr.DataArray
array that is True only for river gridpoints within a certain catchment
coords: only latitude and longitute
lat, lon : float
latitude and longitude of the considered point
basin : str
identifier of the basin in the basins dataset
Returns
-------
xr.DataArray
0/1 mask array with (latitude, longitude) as coordinates
"""
# this condition should be replaced with a terrain dependent mask
# but generally speaking, there will always be some points returned that
# do not influence the downstream point;
# the statistical model should ignore those points as learned from the dataset
da = mask_river_in_catchment.load()
is_west = (~np.isnan(da.where(da.longitude <= lon))).astype(bool)
mask_basin = get_mask_of_basin(da, kw_basins=basin)
nearby_mask = da*0.
nearby_mask.loc[dict(latitude=slice(lat+1.5, lat-1.5),
longitude=slice(lon-1.5, lon+1.5))] = 1.
nearby_mask = nearby_mask.astype(bool)
mask = mask_basin & nearby_mask & is_west & mask_river_in_catchment
if 'basins' in mask.coords:
mask = mask.drop('basins')
if 'time' in mask.coords:
mask = mask.drop('time') # time and basins dimension make no sense here
return mask
def add_shifted_variables(ds, shifts, variables='all'):
"""Adds additional variables to an array which are shifted in time.
Parameters
----------
ds : xr.Dataset
shifts : list(int, )
e.g. range(1,4); shift=1 means having the value x(t=0) at t=1
variables : str or list
e.g. ['lsp', 'cp']
Returns
-------
xr.Dataset
the input Dataset with the shifted timeseries added as additional variable
"""
if isinstance(ds, xr.DataArray):
ds = ds.to_dataset() # enforce input type
if variables == 'all':
variables = ds.data_vars
for var in variables:
for i in shifts:
if i == 0:
continue # zero-shift is the original timeseries
if i > 0:
sign = '-'
else:
sign = '+'
newvar = var+sign+str(i)
ds[newvar] = ds[var].shift(time=i)
return ds
def shift_and_aggregate(df, shift, aggregate):
"""
To get a predictor from [lsp(t-3), ..., lsp(t-6)],
use shift = 3 and aggregate = 3
Parameters
----------
shift : int
aggregate : int
"""
return df.shift(time=shift).rolling(time=aggregate).sum()/aggregate
def aggregate_clustersum(ds, cluster, clusterdim):
"""Aggregate a 3-dimensional array over certain points (latitude, longitude).
Parameters
----------
ds : xr.Dataset
the array to aggregate (collapse) spatially
cluster : xr.DataArray
3-dimensional array (clusterdim, latitude, longitude),
`clusterdim` contains the True/False mask of points to aggregate over
e.g. len(clusterdim)=4 means you have 4 clusters
clusterdim : str
dimension name to access the different True/False masks
Returns
-------
xr.DataArray
1-dimensional
"""
out = xr.Dataset()
# enforce same coordinates
interp = True
if (len(ds.latitude.values) == len(cluster.latitude.values) and
len(ds.longitude.values) == len(cluster.longitude.values)):
if (np.allclose(ds.latitude.values, cluster.latitude.values) and
np.allclose(ds.longitude.values, cluster.longitude.values)):
interp = False
if interp:
ds = ds.interp(latitude=cluster.latitude, longitude=cluster.longitude)
area_per_gridpoint = calc_area(ds.isel(time=0))
if isinstance(ds, xr.DataArray):
ds = ds.to_dataset()
for var in ds:
for cl in cluster.coords[clusterdim]:
newname = var+'_cluster'+str(cl.values)
this_cluster = cluster.sel({clusterdim: cl})
da = ds[var].where(this_cluster, 0.) # no contribution from outside cluster
out[newname] = xr.dot(da, area_per_gridpoint)
return out.drop(clusterdim)
def cluster_by_discharge(dis_2d, bin_edges):
"""Custom clustering by discharge.
"""
cluster = dict()
for i in range(len(bin_edges)-1):
cluster[str(i)] = (dis_2d >= bin_edges[i]) & (dis_2d < bin_edges[i+1])
cluster[str(i)].attrs['units'] = None
return xr.Dataset(cluster,
coords=dict(clusterId=('clusterId', range(len(bin_edges))),
latitude=('latitude', dis_2d.latitude),
longitude=('longitude', dis_2d.longitude)))
def reshape_scalar_predictand(X_dis, y):
"""Reshape, merge predictor/predictand in time, drop nans.
Parameters
----------
X_dis : xr.Dataset
variables: time shifted predictors (name irrelevant)
coords: time, latitude, longitude
y : xr.DataArray
coords: time
"""
if isinstance(X_dis, xr.Dataset):
X_dis = X_dis.to_array(dim='var_dimension')
# stack -> seen as one dimension for the model
stack_dims = [a for a in X_dis.dims if a != 'time'] # all except time
X_dis = X_dis.stack(features=stack_dims)
Xar = X_dis.dropna('features', how='all') # drop features that only contain NaN
if isinstance(y, xr.Dataset):
if len(y.data_vars) > 1:
warnings.warn('Supplied `y` with more than one variable.'
'Which is the predictand? Supply only one!')
for v in y:
y = y[v] # use the first
break
yar = y
if len(yar.dims) > 1:
raise NotImplementedError('y.dims: '+str(yar.dims) +
' Supply only one predictand dimension, e.g. `time`!')
# to be sure that these dims are not in the output
for coord in ['latitude', 'longitude']:
if coord in yar.coords:
yar = yar.drop(coord)
# merge times
yar.coords['features'] = 'predictand'
Xy = xr.concat([Xar, yar], dim='features') # maybe merge instead concat?
Xyt = Xy.dropna('time', how='any') # drop rows with nan values
Xda = Xyt[:, :-1] # last column is predictand
yda = Xyt[:, -1].drop('features') # features was only needed in merge
return Xda, yda
def multiday_prediction_to_timeseries(prediction):
"""Convert a 2-dimensional xarray to 1-dimensional with nonunique time-index.
Parameters
----------
xar : xr.DataArray
2-dimensional xarray (init_time, forecast_day)
Returns
-------
xr.DataArray
1-dimensional (time) array with nonunique time index
"""
forecast_days = len(prediction.forecast_day)
inits = np.array(prediction.init_time)[:, np.newaxis]
# repeat the initial time for every forecast day in a column
times = np.repeat(inits, forecast_days, axis=1)
# add the forecast day to each column
for i, day in enumerate(prediction.forecast_day.values):
times[:, i] += np.timedelta64(day, 'D')
times = times.ravel()
data = prediction.values.ravel()
return pd.Series(data, index=times)
def reshape_multiday_predictand(X_dis, y):
"""Reshape, merge predictor/predictand in time, drop nans.
Parameters
----------
X_dis : xr.Dataset
variables: time shifted predictors (name irrelevant)
coords: time, latitude, longitude
y : xr.DataArray (multiple variables, multiple timesteps)
coords: time, forecast_day
"""
if isinstance(X_dis, xr.Dataset):
X_dis = X_dis.to_array(dim='var_dimension')
# stack -> seen as one dimension for the model
stack_dims = [a for a in X_dis.dims if a != 'time'] # all except time
X_dis = X_dis.stack(features=stack_dims)
Xar = X_dis.dropna('features', how='all') # drop features that only contain NaN
if not isinstance(y, xr.DataArray):
raise TypeError('Supply `y` as xr.DataArray.'
'with coords (time, forecast_day)!')
# to be sure that these dims are not in the output
for coord in ['latitude', 'longitude']:
if coord in y.coords:
y = y.drop(coord)
out_dim = len(y.forecast_day)
y = y.rename(dict(forecast_day='features')) # rename temporarily
Xy = xr.concat([Xar, y], dim='features') # maybe merge instead concat?
Xyt = Xy.dropna('time', how='any') # drop rows with nan values
Xda = Xyt[:, :-out_dim] # last column is predictand
yda = Xyt[:, -out_dim:] # features was only needed in merge
yda = yda.rename(dict(features='forecast_day')) # change renaming back to original
return Xda, yda
def add_valid_time(pred):
"""Add a another time coordinate giving the valid time of a forecast.
Parameters
----------
pred : xr.DataArray
2-dimensional (init_time, forecast_day)
Returns
-------
xr.DataArray
with an additional 'time' coordinate of forecast validity.
"""
validtime = np.zeros((len(pred.init_time), len(pred.forecast_day)))
fcst_days = pred.forecast_day.values
# iterate over columns and add the respective number of days
for i, fcst_day in enumerate(fcst_days):
validtime[:, i] = pred.init_time.values + np.timedelta64(fcst_day, 'D')
pred.coords['time'] = (('init_time', 'forecast_day'),
validtime.astype(np.datetime64))
return pred
def add_future_precip(X, future_days=13):
"""Add shifted precipitation variables.
Parameters
----------
X : xr.Dataset
containing 'lsp' and 'cp' variables
future_days : int
create variables that are shifted by 1 up to `future_days`-days
Returns
-------
xr.Dataset
with additional shifted variables
"""
for var in ['lsp', 'cp']:
for i in range(1, future_days+1):
newvar = var+'+'+str(i)
X[newvar] = X[var].shift(time=-i) # future precip as current day variable
def add_future_vars(X, future_days=13):
"""Add shifted variables (from future time points) to the dataset
for multi-day forecasts.
Parameters
----------
X : xr.Dataset
variables: time shifted features
coords: time
future_days : int
"""
if isinstance(X, xr.Dataset):
for var in X.variables:
if var not in 'time':
for i in range(1, future_days+1):
newvar = var+'+'+str(i)
# future precip as current day variable
X[newvar] = X[var].shift(time=-i)
else:
raise TypeError('Input type has to be a xr.Dataset!')
return X
def add_time(vector, time, name=None):
"""Converts input vector to xarray.DataArray with the corresponding input time coordinate.
Parameters
----------
vector : numpy.array
time : xr.DataArray
name : str
"""
return xr.DataArray(vector, dims=('time'), coords={'time': time}, name=name)
def generate_prediction_array(y_pred, y_reana, forecast_range=14):
"""Convenience function to generate a [number of forecasts, forecast range] shaped xr.DataArray from the one
dimensional xr.DataArray input prediction and converts the predicted discharge change into absolute values,
starting from t=t0 with the reanalysis value for each forecast.
Parameters
----------
y_pred : xr.DataArray
y_reana : xr.DataArray
forecast_range : int
"""
# reorganize data into the shape [forecast_range, number_of_forecasts]
# add +1 to forecast range to include the init state in the length
num_forecasts = int(np.floor(y_pred.shape[0]/(forecast_range+1)))
full_forecast_len = num_forecasts*(forecast_range+1)
new_pred = y_pred[:full_forecast_len].copy()
time_new = y_pred.time[:full_forecast_len].copy()
time_new_data = time_new.values.reshape([num_forecasts, (forecast_range+1)])
pred_multif_data = new_pred.values.reshape([num_forecasts, (forecast_range+1)])
# set init to reanalysis value
pred_multif_data[:, 0] = y_reana.where(new_pred)[0::(forecast_range+1)].values
# cumulative sum to accumulate the forecasted change
pred_multif_data_fin = np.cumsum(pred_multif_data, axis=1)
pred_multif = xr.DataArray(pred_multif_data_fin,
coords={'num_of_forecast': range(1, num_forecasts+1),
'forecast_day': range(0, forecast_range+1),
'time': (('num_of_forecast', 'forecast_day'), time_new_data)},
dims=['num_of_forecast', 'forecast_day'],
name='prediction')
return pred_multif
def remove_outlier(x):
"""Removes outliers under, over 1th, 99th percentile of the input pandas series.
Parameters
----------
x : pd.Series
"""
x99 = x.quantile(0.99)
x01 = x.quantile(0.01)
x = x.where(x > x01).dropna()
x = x.where(x < x99).dropna()
return x
def multi_forecast_case_study(pipe_case, x, y):
"""
Convenience function for predicting discharge via the pre-trained input pipe.
Loads glofas forecast_rerun data from a in-function set path, used to evaluate
the model predictions.
Outputs are 3 xr.DataArrays: One for the model forecast, one for the forecast reruns,
one for the reanalysis.
Parameters
----------
pipe_case : trainer ML pipe ready for prediction
x : xr.DataArray
y : xr.DataArray
Returns
-------
xr.DataArray (3 times)
"""
y_2013 = y
X_2013 = x
multif_list = []
multifrerun_list = []
for forecast in range(1, 5):
if forecast == 1:
date_init = '2013-05-18'
date_end = '2013-06-17'
fr_dir = '2013051800'
elif forecast == 2:
date_init = '2013-05-22'
date_end = '2013-06-21'
fr_dir = '2013052200'
elif forecast == 3:
date_init = '2013-05-25'
date_end = '2013-06-24'
fr_dir = '2013052500'
elif forecast == 4:
date_init = '2013-05-29'
date_end = '2013-06-28'
fr_dir = '2013052900'
X_case = X_2013.sel(time=slice(date_init, date_end)).copy()
# not needed with the new dataset containing 1981-2016
# X_case = X_case.drop(dim='features', labels='lsp-56-180')
# y_case = y_2013.sel(time=slice(date_init, date_end)).copy()
# prediction start from every nth day
# if in doubt, leave n = 1 !!!
n = 1
X_pred = X_case[::n].copy()
y_pred = pipe_case.predict(X_pred)
y_pred = add_time(y_pred, X_pred.time, name='forecast')
multif_case = generate_prediction_array(y_pred, y_2013, forecast_range=30)
multif_case.num_of_forecast.values = [forecast]
multif_list.append(multif_case)
# add glofas forecast rerun data
# glofas forecast rerun data
frerun = xr.open_mfdataset(f'../../data/glofas-freruns/{fr_dir}/glof*', combine='by_coords')
poi = dict(lat=48.35, lon=13.95)
fr = frerun['dis'].sel(lon=slice(13.9, 14.), lat=slice(48.4, 48.3)).compute()
fr = fr.where(~np.isnan(fr), 0).drop(labels=['lat', 'lon']).squeeze()
multifrerun_list.append(fr)
# merge forecasts into one big array
date_init = '2013-05-18'
date_end = '2013-06-28'
y_case_fin = y_2013.sel(time=slice(date_init, date_end)).copy()
X_case_multi_core = X_2013.sel(time=slice(date_init, date_end)
).isel(features=1).copy().drop('features')*np.nan
X_list = []
for fc in multif_list:
X_iter = X_case_multi_core.copy()
X_iter.loc[{'time': fc.time.values.ravel()}] = fc.values[0]
X_list.append(X_iter)
X_multif_fin = xr.concat(X_list, dim='num_of_forecast')
X_multif_fin.name = 'prediction'
X_list = []
for frr in multifrerun_list:
X_iter = X_case_multi_core.copy()
ens_list = []
for fr_num in frr.ensemble:
fr_iter = frr.sel(ensemble=fr_num)
X_ens_iter = X_iter.copy()
X_ens_iter.loc[{'time': frr.time.values}] = fr_iter.values
ens_list.append(X_ens_iter)
ens_da = xr.concat(ens_list, dim='ensemble')
X_list.append(ens_da)
X_multifr_fin = xr.concat(X_list, dim='num_of_forecast')
X_multifr_fin.name = 'forecast rerun'
return X_multif_fin, X_multifr_fin, y_case_fin
def multi_forecast_case_study_tdnn(pipe_case):
"""
Convenience function for predicting discharge via the pre-trained input pipe.
Loads glofas forecast_rerun data from a in-function set path, used to evaluate
the model predictions.
Outputs are 3 xr.DataArrays: One for the model forecast, one for the forecast reruns,
one for the truth/reanalysis.
Parameters
----------
pipe_case : trainer ML pipe ready for prediction
Returns
-------
xr.DataArray (3 times)
"""
features_2013 = xr.open_dataset('../../data/features_xy.nc')
y = features_2013['dis']
X = features_2013.drop(['dis', 'dis_diff'])
X, y = reshape_scalar_predictand(X, y)
multif_list = []
multifrerun_list = []
for forecast in range(1, 5):
if forecast == 1:
date_init = '2013-05-18'
date_end = '2013-06-17'
fr_dir = '2013051800'
elif forecast == 2:
date_init = '2013-05-22'
date_end = '2013-06-21'
fr_dir = '2013052200'
elif forecast == 3:
date_init = '2013-05-25'
date_end = '2013-06-24'
fr_dir = '2013052500'
elif forecast == 4:
date_init = '2013-05-29'
date_end = '2013-06-28'
fr_dir = '2013052900'
X_case = X.sel(time=slice(date_init, date_end)).copy()
# prediction start from every nth day
# if in doubt, leave n = 1 !!!
n = 1
X_pred = X_case[::n].copy()
y_pred = pipe_case.predict(X_pred)
multif_case = generate_prediction_array(y_pred, y, forecast_range=30)
multif_case.num_of_forecast.values = [forecast]
multif_list.append(multif_case)
# add glofas forecast rerun data
# glofas forecast rerun data
frerun = xr.open_mfdataset(f'../../data/glofas-freruns/{fr_dir}/glof*',
combine='by_coords')
fr = frerun['dis'].sel(lon=slice(13.9, 14.), lat=slice(48.4, 48.3))
fr = fr.drop(labels=['lat', 'lon']).squeeze()
multifrerun_list.append(fr)
# merge forecasts into one big array
date_init = '2013-05-18'
date_end = '2013-06-28'
y_case_fin = y.sel(time=slice(date_init, date_end)).copy()
X_case_multi_core = X.sel(time=slice(date_init, date_end)
).isel(features=1).copy().drop('features')*np.nan
X_list = []
for fc in multif_list:
X_iter = X_case_multi_core.copy()
X_iter.loc[{'time': fc.time.values.ravel()}] = fc.values[0]
X_list.append(X_iter)
X_multif_fin = xr.concat(X_list, dim='num_of_forecast')
X_multif_fin.name = 'prediction'
X_list = []
for frr in multifrerun_list:
X_iter = X_case_multi_core.copy()
ens_list = []
for fr_num in frr.ensemble:
fr_iter = frr.sel(ensemble=fr_num)
X_ens_iter = X_iter.copy()
X_ens_iter.loc[{'time': frr.time.values}] = fr_iter.values
ens_list.append(X_ens_iter)
ens_da = xr.concat(ens_list, dim='ensemble')
X_list.append(ens_da)
X_multifr_fin = xr.concat(X_list, dim='num_of_forecast')
X_multifr_fin.name = 'forecast rerun'
return X_multif_fin, X_multifr_fin, y_case_fin
|
# Copyright 2018 Calculation Consulting [calculationconsulting.com]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys, os
import logging
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import powerlaw
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import load_model
import torch
import torch.nn as nn
import onnx
from onnx import numpy_helper
import sklearn
from sklearn.decomposition import TruncatedSVD
#
# this is use to allow editing in Eclipse but also
# building on the commend line
# see: https://stackoverflow.com/questions/14132789/relative-imports-for-the-billionth-time
#
from .RMT_Util import *
from .constants import *
WW_NAME = 'weightwatcher'
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(WW_NAME)
logger.setLevel(logging.INFO)
mpl_logger = logging.getLogger("matplotlib")
mpl_logger.setLevel(logging.WARNING)
MAX_NUM_EVALS = 50000
DEFAULT_PARAMS = {'glorot_fix': False, 'normalize':False, 'conv2d_norm':True, 'randomize': True, 'savefig':False,
'rescale':True , 'deltaEs':False, 'intra':False, 'channels':None, 'conv2d_fft':False}
TPL = 'truncated_power_law'
POWER_LAW = 'power_law'
LOG_NORMAL = 'lognormal'
EXPONENTIAL = 'exponential'
def main():
"""
Weight Watcher
"""
print("WeightWatcher command line support coming later. https://calculationconsulting.com")
class ONNXLayer:
"""Helper class to support ONNX layers
Turns out the op_type is option, so we have to
infers the layer_ type from the dimension of the weights
[a,b,c,d] -> CONV2D
[a,b] -> DENSE
"""
def __init__(self, model, inode, node):
self.model = model
self.node = node
self.layer_id = inode
self.name = node.name
self.dims = node.dims
self.the_type = LAYER_TYPE.UNKNOWN
if len(self.dims) == 4:
self.the_type = LAYER_TYPE.CONV2D
elif len(self.dims) == 2:
self.the_type = LAYER_TYPE.DENSE
else:
logger.debug("Unsupported ONNX Layer, dims = {}".format(self.dims))
def get_weights(self):
return numpy_helper.to_array(self.node)
def set_weights(self, idx, W):
T = numpy_helper.from_array(W)
self.model.graph.initializer[idx].CopyFrom(T)
class WWLayer:
"""WW wrapper layer to Keras and PyTorch Layer layer objects
Uses pythong metaprogramming to add result columns for the final details dataframe"""
def __init__(self, layer, layer_id=-1, name=None,
the_type=LAYER_TYPE.UNKNOWN,
framework=FRAMEWORK.UNKNOWN,
channels=CHANNELS.UNKNOWN,
skipped=False, make_weights=True, params=DEFAULT_PARAMS):
self.layer = layer
self.layer_id = layer_id
self.name = name
self.skipped = skipped
self.the_type = the_type
self.framework = framework
self.channels = channels
# get the LAYER_TYPE
self.the_type = self.layer_type(self.layer)
if self.name is None and hasattr(self.layer, 'name'):
name = self.layer.name
# original weights (tensor) and biases
self.has_weights = False
self.weights = None
# extracted weight matrices
self.num_W = 0
self.Wmats = []
self.N = 0
self.M = 0
self.num_components = self.M # default for full SVD, not used yet
self.rf = 1 # receptive field size, default for dense layer
self.conv2d_count = 1 # reset by slice iterator for back compatability with ww2x
self.w_norm = 1 # reset if normalize, conv2D_norm, or glorot_fix used
# to be used for conv2d_fft approach
self.inputs_shape = []
self.outputs_shape = []
# evals
self.evals = None
self.rand_evals = None
# details, set by metaprogramming in apply_xxx() methods
self.columns = []
# conv2d_fft
# only applies to Conv2D layers
# layer, this would be some kind of layer weight options
self.params = params
# don't make if we set the weights externally
if make_weights:
self.make_weights()
def add_column(self, name, value):
"""Add column to the details dataframe"""
self.columns.append(name)
self.__dict__[name] = value
def get_row(self):
"""get a details dataframe row from the columns and metadata"""
data = {}
data['layer_id'] = self.layer_id
data['name'] = self.name
data['layer_type'] = str(self.the_type)
data['N'] = self.N
data['M'] = self.M
data['rf'] = self.rf
for col in self.columns:
data[col] = self.__dict__[col]
return data
def __repr__(self):
return "WWLayer()"
def __str__(self):
return "WWLayer {} {} {} {} skipped {}".format(self.layer_id, self.name,
self.framework.name, self.the_type.name, self.skipped)
def layer_type(self, layer):
"""Given a framework layer, determine the weightwatcher LAYER_TYPE
This can detect basic Keras and PyTorch classes by type, and will try to infer the type otherwise. """
the_type = LAYER_TYPE.UNKNOWN
typestr = (str(type(layer))).lower()
# Keras TF 2.x types
if isinstance(layer, keras.layers.Dense):
the_type = LAYER_TYPE.DENSE
elif isinstance(layer, keras.layers.Conv1D):
the_type = LAYER_TYPE.CONV1D
elif isinstance(layer, keras.layers.Conv2D):
the_type = LAYER_TYPE.CONV2D
elif isinstance(layer, keras.layers.Flatten):
the_type = LAYER_TYPE.FLATTENED
elif isinstance(layer, keras.layers.Embedding):
the_type = LAYER_TYPE.EMBEDDING
elif isinstance(layer, tf.keras.layers.LayerNormalization):
the_type = LAYER_TYPE.NORM
# PyTorch
elif isinstance(layer, nn.Linear):
the_type = LAYER_TYPE.DENSE
elif isinstance(layer, nn.Conv1d):
the_type = LAYER_TYPE.CONV1D
elif isinstance(layer, nn.Conv2d):
the_type = LAYER_TYPE.CONV2D
elif isinstance(layer, nn.Embedding):
the_type = LAYER_TYPE.EMBEDDING
elif isinstance(layer, nn.LayerNorm):
the_type = LAYER_TYPE.NORM
# ONNX
elif isinstance(layer,ONNXLayer):
the_type = layer.the_type
# allow user to specify model type with file mapping
# try to infer type (i.e for huggingface)
elif typestr.endswith(".linear'>"):
the_type = LAYER_TYPE.DENSE
elif typestr.endswith(".dense'>"):
the_type = LAYER_TYPE.DENSE
elif typestr.endswith(".conv1d'>"):
the_type = LAYER_TYPE.CONV1D
elif typestr.endswith(".conv2d'>"):
the_type = LAYER_TYPE.CONV2D
return the_type
def make_weights(self):
""" Constructor for WWLayer class. Make a ww (wrapper)_layer from a framework layer, or return None if layer is skipped.
In particular , late uses specify filter on layer ids and names """
has_weights = False;
if not self.skipped:
has_weights, weights, has_biases, biases = self.get_weights_and_biases()
self.has_weights = has_weights
self.has_biases = has_biases
if has_biases:
self.biases = biases
if has_weights:
self.weights = weights
self.set_weight_matrices(weights)
return self
def get_weights_and_biases(self):
"""extract the original weights (as a tensor) for the layer, and biases for the layer, if present
"""
has_weights, has_biases = False, False
weights, biases = None, None
if self.framework == FRAMEWORK.PYTORCH:
if hasattr(self.layer, 'weight'):
w = [np.array(self.layer.weight.data.clone().cpu())]
if self.the_type==LAYER_TYPE.CONV2D:
weights = w[0]
biases = None
has_weights = True
elif self.the_type==LAYER_TYPE.CONV1D:
weights = w[0]
biases = None
has_weights = True
elif self.the_type==LAYER_TYPE.EMBEDDING:
weights = w[0]
biases = None
has_weights = True
elif self.the_type==LAYER_TYPE.DENSE:
weights = w[0]
#biases = w[1]
has_weights = True
#has_biases = True
else:
logger.warn("pytorch layer: {} type {} not found ".format(str(self.layer),str(self.the_type)))
elif self.framework == FRAMEWORK.KERAS:
w = self.layer.get_weights()
if self.the_type==LAYER_TYPE.CONV2D:
weights = w[0]
biases = None
has_weights = True
elif self.the_type==LAYER_TYPE.CONV1D:
weights = w[0]
biases = None
has_weights = True
elif self.the_type==LAYER_TYPE.EMBEDDING:
weights = w[0]
biases = None
has_weights = True
elif self.the_type==LAYER_TYPE.DENSE:
weights = w[0]
biases = w[1]
has_weights = True
has_biases = True
else:
logger.warn("keras layer: {} {} type {} not found ".format(self.layer.name,str(self.layer),str(self.the_type)))
elif self.framework == FRAMEWORK.ONNX:
onnx_layer = self.layer
weights = onnx_layer.get_weights()
has_weights = True
return has_weights, weights, has_biases, biases
def set_weight_matrices(self, weights):#, conv2d_fft=False, conv2d_norm=True):
"""extract the weight matrices from the framework layer weights (tensors)
sets the weights and detailed properties on the ww (wrapper) layer
conv2d_fft not supported yet """
if not self.has_weights:
logger.info("Layer {} {} has no weights".format(self.layer_id, self.name))
return
the_type = self.the_type
conv2d_fft = self.params['conv2d_fft']
N, M, n_comp, rf = 0, 0, 0, None
Wmats = []
# this may change if we treat Conv1D differently layer
if (the_type == LAYER_TYPE.DENSE or the_type == LAYER_TYPE.CONV1D or the_type==LAYER_TYPE.EMBEDDING):
Wmats = [self.weights]
N, M = np.max(Wmats[0].shape), np.min(Wmats[0].shape)
n_comp = M
rf = 1
# this is very slow with describe
elif the_type == LAYER_TYPE.CONV2D:
if not conv2d_fft:
Wmats, N, M, rf = self.conv2D_Wmats(weights, self.channels)
n_comp = M
else:
Wmats, N, M, n_comp = self.get_conv2D_fft(weights)
elif the_type == LAYER_TYPE.NORM:
logger.info("Layer id {} Layer norm has no matrices".format(self.layer_id))
else:
logger.info("Layer id {} unknown type {} layer {}".format(self.layer_id, the_type, type(self.layer)))
self.N = N
self.M = M
self.rf = rf
self.Wmats = Wmats
self.num_components = n_comp
return
def get_conv2D_fft(self, W, n=32):
"""Compute FFT of Conv2D channels, to apply SVD later"""
logger.info("get_conv2D_fft on W {}".format(W.shape))
# is pytorch or tensor style
s = W.shape
logger.debug(" Conv2D SVD ({}): Analyzing ...".format(s))
N, M, imax, jmax = s[0], s[1], s[2], s[3]
# probably better just to check what col N is in
if N + M >= imax + jmax:
logger.debug("[2,3] tensor shape detected: {}x{} (NxM), {}x{} (i,j)".format(N, M, imax, jmax))
fft_axes = [2, 3]
else:
N, M, imax, jmax = imax, jmax, N, M
fft_axes = [0, 1]
logger.debug("[1,2] tensor shape detected: {}x{} (NxM), {}x{} (i,j)".format(N, M, imax, jmax))
# Switch N, M if in wrong order
if N < M:
M, N = N, M
# receptive_field / kernel size
rf = np.min([imax, jmax])
# aspect ratio
Q = N / M
# num non-zero eigenvalues rf is receptive field size (sorry calculated again here)
n_comp = rf * N * M
logger.info("N={} M={} n_comp {} ".format(N, M, n_comp))
# run FFT on each channel
fft_grid = [n, n]
fft_coefs = np.fft.fft2(W, fft_grid, axes=fft_axes)
return [fft_coefs], N, M, n_comp
def channel_str(self, channel):
if channel==CHANNELS.FIRST:
return "FIRST"
elif channel==CHANNELS.LAST:
return "LAST"
else:
return "UNKNOWN"
def conv2D_Wmats(self, Wtensor, channels=CHANNELS.UNKNOWN):
"""Extract W slices from a 4 layer_id conv2D tensor of shape: (N,M,i,j) or (M,N,i,j).
Return ij (N x M) matrices, with receptive field size (rf) and channels flag (first or last)"""
logger.debug("conv2D_Wmats")
# TODO: detect or use channels
# if channels specified ...
Wmats = []
s = Wtensor.shape
N, M, imax, jmax = s[0], s[1], s[2], s[3]
if N + M >= imax + jmax:
detected_channels = CHANNELS.LAST
else:
detected_channels = CHANNELS.FIRST
if channels == CHANNELS.UNKNOWN :
logger.debug("channles UNKNOWN, detected {}".format(self.channel_str(detected_channels)))
channels = detected_channels
if detected_channels == channels:
if channels == CHANNELS.LAST:
logger.debug("Channels Last tensor shape: {}x{} (NxM), {}x{} (i,j)".format(N, M, imax, jmax))
for i in range(imax):
for j in range(jmax):
W = Wtensor[:, :, i, j]
if W.shape[0] < W.shape[1]:
W = W.T
Wmats.append(W)
else: #channels == CHANNELS.FIRST # i, j, M, N
M, N, imax, jmax = imax, jmax, N, M
# check this
logger.debug("Channels First shape: {}x{} (NxM), {}x{} (i,j)".format(N, M, imax, jmax))
for i in range(imax):
for j in range(jmax):
W = Wtensor[i, j, :, :]
if W.shape[1] < W.shape[0]:
W = W.T
Wmats.append(W)
elif detected_channels != channels:
logger.warn("warning, expected channels {}, detected channels {}".format(self.channel_str(channels),self.channel_str(detected_channels)))
# flip how we extract the WMats
# reverse of above extraction
if detected_channels == CHANNELS.LAST:
logger.debug("Flipping LAST to FIRST Channel, {}x{} ()x{}".format(N, M, imax, jmax))
for i in range(N):
for j in range(M):
W = Wtensor[i, j,:,:]
if imax < jmax:
W = W.T
Wmats.append(W)
else: #detected_channels == CHANNELS.FIRST:
N, M, imax, jmax = imax, jmax, N, M
logger.debug("Flipping FIRST to LAST Channel, {}x{} ()x{}".format(N, M, imax, jmax))
# check this
for i in range(N):
for j in range(M):
W = Wtensor[:, :, i, j]
if imax < jmax:
W = W.T
Wmats.append(W)
# final flip
N, M, imax, jmax = imax, jmax, N, M
rf = imax * jmax # receptive field size
logger.debug("get_conv2D_Wmats N={} M={} rf= {} channels = {}".format(N, M, rf, channels))
return Wmats, N, M, rf
class ModelIterator:
"""Iterator that loops over ww wrapper layers, with original matrices (tensors) and biases (optional) available."""
def __init__(self, model, params=DEFAULT_PARAMS):
self.params = params
self.k = 0
logger.debug("FRAMEWORKS: KERAS = {} PYTORCH = {} ONNX = {} UNKNOWN = {} ".format(FRAMEWORK.KERAS, FRAMEWORK.PYTORCH, FRAMEWORK.ONNX, FRAMEWORK.UNKNOWN))
logger.debug("FIRST = {} LAST = {} UNKNOWN = {} ".format(CHANNELS.FIRST, CHANNELS.LAST, CHANNELS.UNKNOWN))
self.model = model
self.framework = self.set_framework()
self.channels = self.set_channels(params.get('channels'))
logger.debug("MODEL ITERATOR, framework = {}, channels = {} ".format(self.framework, self.channels))
self.model_iter = self.model_iter_(model)
self.layer_iter = self.make_layer_iter_()
def set_framework(self):
"""infer the framework """
framework = FRAMEWORK.UNKNOWN
if hasattr(self.model, 'layers'):
framework = FRAMEWORK.KERAS
elif hasattr(self.model, 'modules'):
framework = FRAMEWORK.PYTORCH
elif isinstance(self.model, onnx.onnx_ml_pb2.ModelProto):
framework = FRAMEWORK.ONNX
return framework
def __iter__(self):
return self
# Python 3 compatibility
def __next__(self):
return self.next()
def next(self):
curr_layer = next(self.layer_iter)
if curr_layer:
return curr_layer
else:
raise StopIteration()
def model_iter_(self, model):
"""Return a generator for iterating over the layers in the model.
Also detects the framework being used.
Used by base class and child classes to iterate over the framework layers """
layer_iter = None
if self.framework == FRAMEWORK.KERAS:
def layer_iter_():
def traverse_(layer):
"not recursive, just iterate over all submodules if present"
if not hasattr(layer, 'submodules') or len(layer.submodules)==0:
yield layer
else:
for sublayer in layer.submodules:
yield sublayer
for layer in model.layers:
yield from traverse_(layer)
layer_iter = layer_iter_()
elif self.framework == FRAMEWORK.PYTORCH:
def layer_iter_():
for layer in model.modules():
yield layer
layer_iter = layer_iter_()
elif self.framework == FRAMEWORK.ONNX:
def layer_iter_():
for inode, node in enumerate(model.graph.initializer):
yield ONNXLayer(model, inode, node)
layer_iter = layer_iter_()
else:
layer_iter = None
return layer_iter
def make_layer_iter_(self):
"""The layer iterator for this class / instance.
Override this method to change the type of iterator used by the child class"""
return self.model_iter
def set_channels(self, channels=None):
"""Set the channels flag for the framework, with the ability to override"""
the_channel = CHANNELS.UNKNOWN
if channels is None:
if self.framework == FRAMEWORK.KERAS:
the_channel = CHANNELS.FIRST
elif self.framework == FRAMEWORK.PYTORCH:
the_channel = CHANNELS.LAST
elif self.framework == FRAMEWORK.ONNX:
the_channel = CHANNELS.LAST
elif isinstance(channels, str):
if channels.lower()=='first':
the_channel=CHANNELS.FIRST
elif channels.lower()=='last':
the_channel=CHANNELS.LAST
return the_channel
class WWLayerIterator(ModelIterator):
"""Creates an iterator that generates WWLayer wrapper objects to the model layers"""
def __init__(self, model, params=DEFAULT_PARAMS, filters=[]):
super().__init__(model, params=params)
self.filter_ids = []
self.filter_types = []
self.filter_names = []
if type(filters) is not list:
filters = [filters]
for f in filters:
tf = type(f)
if tf is LAYER_TYPE:
logger.info("Filtering layer by type {}".format(str(f)))
self.filter_types.append(f)
elif tf is int:
logger.info("Filtering layer by id {}".format(f))
self.filter_ids.append(f)
elif tf is str:
logger.info("Filtering layer by name {}".format(f))
self.filter_names.append(f)
else:
logger.warn("unknown filter type {} detected and ignored".format(tf))
def apply_filters(self, ww_layer):
"""Apply filters. Set skipped False if filter is applied to this layer, keeping the layer (or no filters, meaning all layers kept)"""
ww_layer.skipped = False
if self.filter_types is not None and len(self.filter_types) > 0:
if ww_layer.the_type in self.filter_types:
logger.info("keeping layer {} {} with type {} ".format(ww_layer.layer_id, ww_layer.name , str(ww_layer.the_type)))
ww_layer.skipped = False
else:
logger.info("skipping layer {} {} with type {} ".format(ww_layer.layer_id, ww_layer.name , str(ww_layer.the_type)))
ww_layer.skipped = True
if self.filter_ids is not None and len(self.filter_ids) > 0:
# keep positive layer ids
if np.min(self.filter_ids) > 0:
if ww_layer.layer_id in self.filter_ids:
logger.info("keeping layer {} {} by id".format(ww_layer.layer_id, ww_layer.name))
ww_layer.skipped = False
else:
logger.info("skipping layer {} {} by id".format(ww_layer.layer_id, ww_layer.name))
ww_layer.skipped = True
# or remove negative layer ids
elif np.min(self.filter_ids) < 0:
if -(ww_layer.layer_id) in self.filter_ids:
logger.info("skipping layer {} {} by id".format(ww_layer.layer_id, ww_layer.name))
ww_layer.skipped = True
else:
logger.info("keeping layer {} {} by id".format(ww_layer.layer_id, ww_layer.name))
ww_layer.skipped = False
if self.filter_names is not None and len(self.filter_names) > 0:
if ww_layer.name in self.filter_names:
logger.info("keeping layer {} {} by name ".format(ww_layer.layer_id, ww_layer.name))
ww_layer.skipped = False
else:
logger.info("skipping layer {} {} by name ".format(ww_layer.layer_id, ww_layer.name))
ww_layer.skipped = True
return ww_layer.skipped
def ww_layer_iter_(self):
"""Create a generator for iterating over ww_layers, created lazily """
for curr_layer in self.model_iter:
curr_id, self.k = self.k, self.k + 1
ww_layer = WWLayer(curr_layer, layer_id=curr_id,
framework=self.framework,
channels=self.channels,
params=self.params)
self.apply_filters(ww_layer)
if not self.layer_supported(ww_layer):
ww_layer.skipped = True
if not ww_layer.skipped:
yield ww_layer
def make_layer_iter_(self):
return self.ww_layer_iter_()
def layer_supported(self, ww_layer):
"""Return true if this kind of layer is supported"""
supported = False
layer_id = ww_layer.layer_id
name = ww_layer.name
the_type = ww_layer.the_type
rf = ww_layer.rf
M = ww_layer.M
N = ww_layer.N
min_evals = self.params.get('min_evals')
max_evals = self.params.get('max_evals')
ww2x = self.params.get('ww2x')
logger.debug("layer_supported N {} max evals {}".format(N, max_evals))
if ww_layer.skipped:
logger.debug("Layer {} {} is skipped".format(layer_id, name))
elif not ww_layer.has_weights:
logger.debug("layer not supported: Layer {} {} has no weights".format(layer_id, name))
return False
elif the_type is LAYER_TYPE.UNKNOWN:
logger.debug("layer not supported: Layer {} {} type {} unknown".format(layer_id, name, the_type))
return False
elif the_type in [LAYER_TYPE.FLATTENED, LAYER_TYPE.NORM]:
logger.debug("layer not supported: Layer {} {} type {} not supported".format(layer_id, name, the_type))
return False
elif ww2x and min_evals and M < min_evals:
logger.debug("layer not supported: Layer {} {}: num_evals {} < min_evals {}".format(layer_id, name, M, min_evals))
return False
elif ww2x and max_evals and N > max_evals:
logger.debug("layer not supported: Layer {} {}: num_evals {} > max_evals {}".format(layer_id, name, N, max_evals))
return False
elif (not ww2x) and min_evals and M * rf < min_evals:
logger.debug("layer not supported: Layer {} {}: num_evals {} < min_evals {}".format(layer_id, name, M * rf, min_evals))
return False
elif (not ww2x) and max_evals and N * rf > max_evals:
logger.debug("layer not supported: Layer {} {}: num_evals {} > max_evals {}".format(layer_id, name, N * rf, max_evals))
return False
elif the_type in [LAYER_TYPE.DENSE, LAYER_TYPE.CONV1D, LAYER_TYPE.CONV2D, LAYER_TYPE.EMBEDDING]:
supported = True
return supported
class WW2xSliceIterator(WWLayerIterator):
"""Iterator variant that breaks Conv2D layers into slices for back compatability"""
from copy import deepcopy
def ww_slice_iter_(self):
for ww_layer in self.ww_layer_iter_():
if ww_layer.the_type == LAYER_TYPE.CONV2D:
count = len(ww_layer.Wmats)
for iw, W in enumerate(ww_layer.Wmats):
ww_slice = deepcopy(ww_layer)
ww_slice.Wmats = [W]
ww_slice.conv2d_count = count
ww_slice.add_column("slice_id", iw)
yield ww_slice
else:
ww_layer.add_column("slice_id", 0)
yield ww_layer
def make_layer_iter_(self):
return self.ww_slice_iter_()
class WWIntraLayerIterator(WW2xSliceIterator):
"""Iterator variant that iterates over N-1 layer pairs, forms ESD for cross correlations"""
from copy import deepcopy
prev_layer = None
def ww_intralayer_iter_(self):
# TODO: detect the layer ordering and flip accordingly
# for all layers the same way
def align_mats(W0, W1):
logger.info("aligning {} {}".format(W0.shape, W1.shape))
# M x N
if W0.shape[0] > W0.shape[1]:
logger.debug("fliping W0")
W0 = np.transpose(W0)
# N x M
if W0.shape[1] != W1.shape[0]:
logger.debug("fliping W1 to match W0")
W1 = np.transpose(W1)
logger.info("aligned {} {}".format(W0.shape, W1.shape))
return W0, W1
## Need to look at all W, currently just doing 1
for ww_layer in self.ww_layer_iter_():
if self.prev_layer is None:
self.prev_layer = deepcopy(ww_layer)
else:
name = "{} X {} ".format(self.prev_layer.layer_id, ww_layer.layer_id)
logger.info("Analyzing {} ".format(name))
W0 = self.prev_layer.Wmats[0]
W1 = ww_layer.Wmats[0]
W0, W1 = align_mats(W0, W1)
self.prev_layer = deepcopy(ww_layer)
ww_intralayer = deepcopy(ww_layer)
ww_intralayer.name = name
# NEED TO LOOK AT ALL LAYERS
ww_intralayer.count = 1
if W0.shape[1]!=W1.shape[0]:
logger.info(" {} not compatible, skipping".format(name))
else:
norm12 = np.linalg.norm(W0)*np.linalg.norm(W1)
X = np.dot(W0,W1)/(norm12)
ww_intralayer.Wmats = [X]
ww_intralayer.N = np.max(X.shape)
ww_intralayer.M = np.min(X.shape)
ww_intralayer.add_column("Xflag", True)
yield ww_intralayer
def make_layer_iter_(self):
return self.ww_intralayer_iter_()
class WeightWatcher(object):
def __init__(self, model=None, log_level=None):
if log_level:
logger.setLevel(log_level)
self.model = self.load_model(model)
self.details = None
logger.info(self.banner())
def header(self):
"""WeightWatcher v0.1.dev0 by Calculation Consulting"""
# from weightwatcher import __name__, __version__, __author__, __description__, __url__
# return "{} v{} by {}\n{}\n{}".format(__name__, __version__, __author__, __description__, __url__)
return ""
def banner(self):
versions = "\npython version {}".format(sys.version)
versions += "\nnumpy version {}".format(np.__version__)
versions += "\ntensforflow version {}".format(tf.__version__)
versions += "\nkeras version {}".format(tf.keras.__version__)
return "\n{}{}".format(self.header(), versions)
def __repr__(self):
done = bool(self.results)
txt = "\nAnalysis done: {}".format(done)
return "{}{}".format(self.header(), txt)
# TODO: get rid of this or extend to be more generally useful
def load_model(self, model):
"""load the model from a file, only works for keras right now"""
res = model
if isinstance(model, str):
if os.path.isfile(model):
logger.info("Loading model from file '{}'".format(model))
res = load_model(model)
else:
logger.error("Loading model from file '{}': file not found".format(model))
return res
# TODO: implement
def same_models(self, model_1, model_2):
"""Compare models to see if the are the same architecture.
Not really impelemnted yet"""
same = True
layer_iter_1 = WWLayerIterator(model_1)
layer_iter_2 = WWLayerIterator(model_2)
same = layer_iter_1.framework == layer_iter_2.framework
return same
def distances(self, model_1, model_2):
"""Compute the distances between model_1 and model_2 for each layer.
Reports Frobenius norm of the distance between each layer weights (tensor)
< ||W_1-W_2|| >
output: avg delta W, a details dataframe
models should be the same size and from the same framework
"""
# check and throw exception if inputs incorrect
# TODO: review design here...may need something else
# need to:
# . - iterate over all layers and check
# . - inspect framework by framework
# . - check here instead
#
same = True
layer_iter_1 = WWLayerIterator(model_1)
layer_iter_2 = WWLayerIterator(model_2)
same = layer_iter_1.framework == layer_iter_2.framework
if not same:
raise Exception("Sorry, models are from different frameworks")
details = pd.DataFrame(columns=['layer_id', 'name', 'delta_W', 'delta_b', 'W_shape', 'b_shape'])
data = {}
try:
for layer_1, layer_2 in zip(layer_iter_1, layer_iter_2):
data['layer_id'] = layer_1.layer_id
data['name'] = layer_1.name
if layer_1.has_weights:
data['delta_W'] = np.linalg.norm(layer_1.weights - layer_2.weights)
data['W_shape'] = layer_1.weights.shape
if layer_1.has_biases:
data['delta_b'] = np.linalg.norm(layer_1.biases - layer_2.biases)
data['b_shape'] = layer_1.biases.shape
details = details.append(data, ignore_index=True)
except:
logger.error("Sorry, problem comparing models")
raise Exception("Sorry, problem comparing models")
details.set_layer_id('layer_id', inplace=True)
avg_dW = np.mean(details['delta_W'].to_numpy())
return avg_dW, details
def combined_eigenvalues(self, Wmats, N, M, n_comp, params):
"""Compute the eigenvalues for all weights of the NxM weight matrices (N >= M),
combined into a single, sorted, numpy array
Applied normalization and glorot_fix if specified
Assumes an array of weights comes from a conv2D layer and applies conv2d_norm normalization by default
Also returns max singular value and rank_loss, needed for other calculations
"""
all_evals = []
max_sv = 0.0
rank_loss = 0
# TODO: allow user to specify
normalize = params['normalize']
glorot_fix = params['glorot_fix']
conv2d_norm = params['conv2d_norm'] # True
if type(Wmats) is not list:
logger.debug("combined_eigenvalues: Wmats -> [WMmats]")
Wmats = [Wmats]
count = len(Wmats)
for W in Wmats:
Q = N / M
# SVD can be swapped out here
# svd = TruncatedSVD(n_components=M-1, n_iter=7, random_state=10)
W = W.astype(float)
logger.debug("Running full SVD: W.shape={} n_comp = {}".format(W.shape, n_comp))
sv = np.linalg.svd(W, compute_uv=False)
sv = sv.flatten()
sv = np.sort(sv)[-n_comp:]
# TODO: move to PL fit for robust estimator
# if len(sv) > max_evals:
# #logger.info("chosing {} singular values from {} ".format(max_evals, len(sv)))
# sv = np.random.choice(sv, size=max_evals)
# sv = svd.singular_values_
evals = sv * sv
if normalize:
evals = evals / N
all_evals.extend(evals)
max_sv = np.max([max_sv, np.max(sv)])
rank_loss = rank_loss + calc_rank_loss(sv, N)
return np.sort(np.array(all_evals)), max_sv, rank_loss
def apply_normalize_Wmats(self, ww_layer, params=DEFAULT_PARAMS):
"""Normalize the W matrix or Wmats """
normalize = params['normalize']
glorot_fix = params['glorot_fix']
conv2d_norm = params['conv2d_norm']
M = ww_layer.M
N = ww_layer.N
rf = ww_layer.rf
norm = ww_layer.w_norm # shoud be 1.0 unless reset for some reason
Wmats = ww_layer.Wmats
new_Wmats = []
if type(Wmats) is not list:
logger.debug("combined_eigenvalues: Wmats -> [WMmats]")
Wmats = [Wmats]
for W in Wmats:
# not really used
rf_size = ww_layer.conv2d_count
check, checkTF = self.glorot_norm_check(W, N, M, rf_size)
if glorot_fix:
norm = self.glorot_norm_fix(W, N, M, rf_size)
elif conv2d_norm and ww_layer.the_type is LAYER_TYPE.CONV2D:
# w_norm is reset in slices to fix this
norm = np.sqrt(ww_layer.conv2d_count/2.0)
if normalize and not glorot_fix:
norm = 1 / np.sqrt(N)
W = W * norm
ww_layer.w_norm = norm
new_Wmats.append(W)
ww_layer.Wmats = new_Wmats
# TODO: set np linalg norm, <AVG over layers>
# change from NORM that is computed...
return ww_layer
def apply_esd(self, ww_layer, params=DEFAULT_PARAMS):
"""run full SVD on layer weight matrices, compute ESD on combined eigenvalues, combine all, and save to layer """
layer_id = ww_layer.layer_id
name = ww_layer.name
the_type = ww_layer.the_type
M = ww_layer.M
N = ww_layer.N
rf = ww_layer.rf
logger.debug("apply ESD on Layer {} {} ".format(layer_id, name))
logger.debug("running SVD on Layer {} {} ".format(layer_id, name))
logger.debug("params {} ".format(params))
Wmats = ww_layer.Wmats
n_comp = ww_layer.num_components
evals, sv_max, rank_loss = self.combined_eigenvalues(Wmats, N, M, n_comp, params)
ww_layer.evals = evals
ww_layer.add_column("has_esd", True)
ww_layer.add_column("num_evals", len(evals))
ww_layer.add_column("sv_max", sv_max)
ww_layer.add_column("rank_loss", rank_loss)
ww_layer.add_column("lambda_max", np.max(evals))
return ww_layer
def apply_random_esd(self, ww_layer, params=DEFAULT_PARAMS):
"""Randomize the layer weight matrices, compute ESD on combined eigenvalues, combine all, and save to layer """
layer_id = ww_layer.layer_id
name = ww_layer.name
the_type = ww_layer.the_type
M = ww_layer.M
N = ww_layer.N
rf = ww_layer.rf
logger.debug("apply random ESD on Layer {} {} ".format(layer_id, name))
logger.debug("running SVD on Layer {} {} ".format(layer_id, name))
logger.debug("params {} ".format(params))
Wmats = ww_layer.Wmats
n_comp = ww_layer.num_components
num_replicas = 1
# hack to improve random estimator if we don't have that many evals
if n_comp < 100:
num_replicas = 5
rand_evals = self.random_eigenvalues(Wmats, n_comp, num_replicas , params)
ww_layer.rand_evals = rand_evals
ww_layer.add_column("max_rand_eval", np.max(rand_evals))
if params['plot']:
self.plot_random_esd(ww_layer, params)
return ww_layer
# Not used yet
def apply_plot_esd(self, ww_layer, params=DEFAULT_PARAMS):
"""Plot the ESD on regular and log scale. Only used when powerlaw fit not called"""
evals = ww_layer.evals
name = ww_layer.name
plt.title(name)
plt.hist(evals, bins=100)
plt.show()
plt.title(name)
plt.hist(np.log10(evals), bins=100)
plt.show()
return ww_layer
def apply_fit_powerlaw(self, ww_layer, params=DEFAULT_PARAMS):
"""Plot the ESD on regular and log scale. Only used when powerlaw fit not called"""
evals = ww_layer.evals
layer_id = ww_layer.layer_id
name = ww_layer.name
title = "{} {}".format(layer_id, name)
xmin = None # TODO: allow other xmin settings
xmax = np.max(evals)
plot = params['plot']
sample = False # TODO: decide if we want sampling for large evals
sample_size = None
savefig = params['savefig']
layer_name = "Layer {}".format(layer_id)
alpha, xmin, xmax, D, sigma, num_pl_spikes, best_fit = self.fit_powerlaw(evals, xmin=xmin, xmax=xmax, plot=plot, layer_name=layer_name, layer_id=layer_id, sample=sample, sample_size=sample_size, savefig=savefig)
ww_layer.add_column('alpha', alpha)
ww_layer.add_column('xmin', xmin)
ww_layer.add_column('xmax', xmax)
ww_layer.add_column('D', D)
ww_layer.add_column('sigma', sigma)
ww_layer.add_column('num_pl_spikes', num_pl_spikes)
ww_layer.add_column('best_fit', best_fit)
return ww_layer
# test with https://github.com/osmr/imgclsmob/blob/master/README.md
def analyze(self, model=None, layers=[], min_evals=0, max_evals=None,
min_size=None, max_size=None, # deprecated
normalize=False, glorot_fix=False, plot=False, randomize=False, savefig=False,
mp_fit=False, conv2d_fft=False, conv2d_norm=True, ww2x=False, rescale=True,
deltas=False, intra=False, channels=None):
"""
Analyze the weight matrices of a model.
layers:
List of layer ids. If empty, analyze all layers (default)
If layer ids < 0, then skip the layers specified
All layer ids must be > 0 or < 0
min_evals:
Minimum number of evals (M*rf)
max_evals:
Maximum number of evals (N*rf) (0 = no limit)
normalize:
Normalize the X matrix. Usually True for Keras, False for PyTorch.
Ignored if glorot_norm is set
glorot_fix:
Adjust the norm for the Glorot Normalization.
alphas:
# deprecated
Compute the power laws (alpha) of the weight matrices.
Time consuming so disabled by default (use lognorm if you want speed)
lognorms:
# deprecated
Compute the log norms of the weight matrices.
this is always computed now
spectralnorms:
# deprecated
Compute the spectral norm (max eigenvalue) of the weight matrices.
this is always computed now
softranks:
# deprecated
Compute the soft norm (i.e. StableRank) of the weight matrices.
this is always computed now
mp_fit:
Compute the best Marchenko-Pastur fit of each weight matrix ESD
For square matrices, also applies the Quarter-Circle (QC) law
randomize:
Randomizes the W matrices, plots the ESD and fits to the MP distribution
Attempts to find Correlatkon Traps by computing the number of spikes for the randomized ESD
conv2d_fft: N/A yet
For Conv2D layers, use FFT method. Otherwise, extract and combine the weight matrices for each receptive field
Note: for conf2d_fft, the ESD is automatically subsampled to max_evals eigenvalues max N/A yet
Can not uses with ww2x
ww2x:
Use weightwatcher version 0.2x style iterator, which slices up Conv2D layers in N=rf matrices
savefig:
Save the figures generated in png files. Default: False
if True, saves all figures to the current directory
N/A yet: If set to a folder name, creates and saves the imafes to this folder (i.e. savefig="images")
rescale:
Rescale the ESDs when computing the MP fits (experimental should always be True
N/A yet: rescales the plots back to the original scale
deltaEs:
Compute and plot the deltas of the eigenvalues; only works if plot=True.
Plots both as a sequence of deltaEs and a histogram (level statistics
intra:
Analyze IntraLayer Correlations
Experimental option
channels: None | 'fisrt' | 'last'
re/set the channels from the default for the framework
evecs: N/A yet
Compute the eigenvectors and plots various metrics, including the vector entropy and localization statistics,
both as a sequence (elbow plots) and as histograms
Warning: this takes more memory
N/A yet
params: N/A yet
a dictionary of default parameters, which can be set but will be over-written by
"""
model = model or self.model
if min_size or max_size:
logger.warn("min_size and max_size options changed to min_evals, max_evals, ignored for now")
# I need to figure this out
# can not specify params on input yet
# maybe just have a different analyze() that only uses this
params=DEFAULT_PARAMS
params['min_evals'] = min_evals
params['max_evals'] = max_evals
params['plot'] = plot
params['randomize'] = randomize
params['mp_fit'] = mp_fit
params['normalize'] = normalize
params['glorot_fix'] = glorot_fix
params['conv2d_norm'] = conv2d_norm
params['conv2d_fft'] = conv2d_fft
params['ww2x'] = ww2x
params['savefig'] = savefig
params['rescale'] = rescale
params['deltaEs'] = deltas
params['intra'] = intra
params['channels'] = channels
params['layers'] = layers
logger.info("params {}".format(params))
if not self.valid_params(params):
msg = "Error, params not valid: \n {}".format(params)
logger.error(msg)
raise Exception(msg)
if intra:
logger.info("Intra layer Analysis (experimental)")
layer_iterator = WWIntraLayerIterator(model, filters=layers, params=params)
elif ww2x:
logger.info("Using weightwatcher 0.2x style layer and slice iterator")
layer_iterator = WW2xSliceIterator(model, filters=layers, params=params)
else:
layer_iterator = WWLayerIterator(model, filters=layers, params=params)
details = pd.DataFrame(columns=['layer_id', 'name'])
for ww_layer in layer_iterator:
if not ww_layer.skipped and ww_layer.has_weights:
logger.info("LAYER: {} {} : {}".format(ww_layer.layer_id, ww_layer.the_type, type(ww_layer.layer)))
self.apply_normalize_Wmats(ww_layer, params)
self.apply_esd(ww_layer, params)
if ww_layer.evals is not None:
self.apply_fit_powerlaw(ww_layer, params)
if params['mp_fit']:
logger.info("MP Fitting Layer: {} {} ".format(ww_layer.layer_id, ww_layer.name))
self.apply_mp_fit(ww_layer, random=False, params=params)
if params['deltaEs'] and params['plot']:
logger.info("Cpmputing and Plotting Deltas: {} {} ".format(ww_layer.layer_id, ww_layer.name))
self.apply_plot_deltaEs(ww_layer, random=False, params=params)
if params['randomize']:# params['mp_fit']:
logger.info("Randomizing Layer: {} {} ".format(ww_layer.layer_id, ww_layer.name))
self.apply_random_esd(ww_layer, params)
logger.info("MP Fitting Random layer: {} {} ".format(ww_layer.layer_id, ww_layer.name))
self.apply_mp_fit(ww_layer, random=True, params=params)
if params['deltaEs'] and params['plot']:
logger.info("Cpmputing and Plotting Deltas: {} {} ".format(ww_layer.layer_id, ww_layer.name))
self.apply_plot_deltaEs(ww_layer, random=True, params=params)
self.apply_norm_metrics(ww_layer, params)
#all_evals.extend(ww_layer.evals)
# TODO: add find correlation traps here
details = details.append(ww_layer.get_row(), ignore_index=True)
self.details = details
return details
def get_details(self):
"""get the current details, created by analyze"""
return self.details
def get_summary(self, details=None):
"""Return metric averages, as dict, if available """
summary = {}
if details is None:
details = self.details
columns = []
if details is not None:
columns = details.columns
metrics = ["log_norm","alpha","alpha_weighted","log_alpha_norm", "log_spectral_norm","stable_rank","mp_softrank"]
for metric in metrics:
if metric in columns:
summary[metric]=details[metric].mean()
return summary
# test with https://github.com/osmr/imgclsmob/blob/master/README.md
def describe(self, model=None, layers=[], min_evals=0, max_evals=None,
min_size=None, max_size=None, # deprecated
normalize=False, glorot_fix=False, plot=False, randomize=False, savefig=False,
mp_fit=False, conv2d_fft=False, conv2d_norm=True, ww2x=False, rescale=True,
deltas=False, intra=False, channels=None):
"""
Same as analyze() , but does not run the ESD or Power law fits
"""
model = model or self.model
if min_size or max_size:
logger.warn("min_size and max_size options changed to min_evals, max_evals, ignored for now")
params = DEFAULT_PARAMS
params['min_evals'] = min_evals
params['max_evals'] = max_evals
params['plot'] = plot
params['randomize'] = randomize
params['mp_fit'] = mp_fit
params['normalize'] = normalize
params['glorot_fix'] = glorot_fix
params['conv2d_norm'] = conv2d_norm
params['conv2d_fft'] = conv2d_fft
params['ww2x'] = ww2x
params['savefig'] = savefig
params['rescale'] = rescale
params['deltaEs'] = deltas
params['intra'] = intra
params['channels'] = channels
params['layers'] = layers
logger.info("params {}".format(params))
if not self.valid_params(params):
msg = "Error, params not valid: \n {}".format(params)
logger.error(msg)
raise Exception(msg)
if intra:
logger.info("Intra layer Analysis (experimental)")
layer_iterator = WWIntraLayerIterator(model, filters=layers, params=params)
elif ww2x:
logger.info("Using weightwatcher 0.2x style layer and slice iterator")
layer_iterator = WW2xSliceIterator(model, filters=layers, params=params)
else:
layer_iterator = WWLayerIterator(model, filters=layers, params=params)
details = pd.DataFrame(columns=['layer_id', 'name'])
num_all_evals = 0
for ww_layer in layer_iterator:
if not ww_layer.skipped and ww_layer.has_weights:
logger.debug("LAYER TYPE: {} {} layer type {}".format(ww_layer.layer_id, ww_layer.the_type, type(ww_layer.layer)))
logger.debug("weights shape : {} max size {}".format(ww_layer.weights.shape, params['max_evals']))
if ww2x:
num_evals = ww_layer.M
elif conv2d_fft:
num_evals = ww_layer.num_components
else:
num_evals = ww_layer.M * ww_layer.rf
num_all_evals += num_evals
ww_layer.add_column('num_evals', num_evals)
details = details.append(ww_layer.get_row(), ignore_index=True)
return details
def valid_params(self, params):
"""Validate the input parametersm, return True if valid, False otherwise"""
valid = True
xmin = params.get('xmin')
if xmin and xmin not in [XMIN.UNKNOWN, XMIN.AUTO, XMIN.PEAK]:
logger.warn("param xmin unknown, ignoring {}".format(xmin))
valid = False
xmax = params.get('xmax')
if xmax and xmax not in [XMAX.UNKNOWN, XMIN.AUTO]:
logger.warn("param xmax unknown, ignoring {}".format(xmax))
valid = False
min_evals = params.get('min_evals')
max_evals = params.get('max_evals')
if min_evals and max_evals and min_evals >= max_evals:
logger.warn("min_evals {} > max_evals {}".format(min_evals, max_evals))
valid = False
elif max_evals and max_evals < -1:
logger.warn(" max_evals {} < -1 ".format(max_evals))
valid = False
# can not specify ww2x and conv2d_fft at same time
if params.get('ww2x') and params.get('conv2d_fft'):
logger.warn("can not specify ww2x and conv2d_fft")
valid = False
# can not specify intra and conv2d_fft at same time
if params.get('intra') and params.get('conv2d_fft'):
logger.warn("can not specify intra and conv2d_fft")
valid = False
# channels must be None, 'first', or 'last'
channels = params.get('channels')
if channels is not None and isinstance(channels,str):
if channels.lower() != 'first' and channels.lower() != 'last':
logger.warn("unknown channels {}".format(channels))
valid = False
# layer ids must be all positive or all negative
filters = params.get('layers')
if filters is not None:
filter_ids = [int(f) for f in filters if type(f) is int]
if len(filter_ids) > 0:
if np.max(filter_ids) > 0 and np.min(filter_ids) < 0:
logger.warn("layer filter ids must be all > 0 or < 0: {}".format(filter_ids))
valid = False
return valid
# # @deprecated
# def print_results(self, results=None):
# self.compute_details(results=results)
#
# # @deprecated
# def get_details(self, results=None):
# """
# Deprecated: returns a pandas dataframe with details for each layer
# """
# return self.details
# not used yet
# not used
def normalize_evals(self, evals, N, M):
"""Normalizee evals matrix by 1/N"""
logger.debug(" normalzing evals, N, M {},{},{}".format(N, M))
return evals / N
def glorot_norm_fix(self, W, N, M, rf_size):
"""Apply Glorot Normalization Fix """
kappa = np.sqrt(2 / ((N + M) * rf_size))
W = W / kappa
return W , 1/kappa
def pytorch_norm_fix(self, W, N, M, rf_size):
"""Apply pytorch Channel Normalization Fix
see: https://chsasank.github.io/vision/_modules/torchvision/models/vgg.html
"""
kappa = np.sqrt(2 / (N * rf_size))
W = W / kappa
return W
def glorot_norm_check(self, W, N, M, rf_size,
lower=0.5, upper=1.5):
"""Check if this layer needs Glorot Normalization Fix"""
kappa = np.sqrt(2 / ((N + M) * rf_size))
norm = np.linalg.norm(W)
check1 = norm / np.sqrt(N * M)
check2 = norm / (kappa * np.sqrt(N * M))
if (rf_size > 1) and (check2 > lower) and (check2 < upper):
return check2, True
elif (check1 > lower) & (check1 < upper):
return check1, True
else:
if rf_size > 1:
return check2, False
else:
return check1, False
def random_eigenvalues(self, Wmats, n_comp, num_replicas=1, params=DEFAULT_PARAMS):
"""Compute the eigenvalues for all weights of the NxM skipping layer, num evals ized weight matrices (N >= M),
combined into a single, sorted, numpy array.
see: combined_eigenvalues()
"""
normalize = params['normalize']
glorot_fix = params['glorot_fix']
conv2d_norm = params['conv2d_norm'] # True
all_evals = []
logger.info("generating {} replicas for each W of the random eigenvalues".format(num_replicas))
for num in range(num_replicas):
count = len(Wmats)
for W in Wmats:
M, N = np.min(W.shape), np.max(W.shape)
Q = N / M
Wrand = W.flatten()
np.random.shuffle(Wrand)
W = Wrand.reshape(W.shape)
W = W.astype(float)
logger.debug("Running Randomized Full SVD")
sv = np.linalg.svd(W, compute_uv=False)
sv = sv.flatten()
sv = np.sort(sv)[-n_comp:]
# sv = svd.singular_values_
evals = sv * sv
all_evals.extend(evals)
return np.sort(np.array(all_evals))
def plot_random_esd(self, ww_layer, params=DEFAULT_PARAMS):
"""Plot histogram and log histogram of ESD and randomized ESD"""
savefig = params['savefig']
layer_id = ww_layer.layer_id
evals = ww_layer.evals
rand_evals = ww_layer.rand_evals
title = "Layer {} {}: ESD & Random ESD".format(ww_layer.layer_id,ww_layer.name)
nonzero_evals = evals[evals > 0.0]
nonzero_rand_evals = rand_evals[rand_evals > 0.0]
max_rand_eval = np.max(rand_evals)
plt.hist((nonzero_evals), bins=100, density=True, color='g', label='original')
plt.hist((nonzero_rand_evals), bins=100, density=True, color='r', label='random', alpha=0.5)
plt.axvline(x=(max_rand_eval), color='orange', label='max rand')
plt.title(title)
plt.xlabel(r" Eigenvalues $(\lambda)$")
plt.legend()
if savefig:
plt.savefig("ww.layer{}.randesd.1.png".format(layer_id))
plt.show()
plt.hist(np.log10(nonzero_evals), bins=100, density=True, color='g', label='original')
plt.hist(np.log10(nonzero_rand_evals), bins=100, density=True, color='r', label='random', alpha=0.5)
plt.axvline(x=np.log10(max_rand_eval), color='orange', label='max rand')
title = "Layer {} {}: Log10 ESD & Random ESD".format(ww_layer.layer_id,ww_layer.name)
plt.title(title)
plt.xlabel(r"Log10 Eigenvalues $(log_{10}\lambda)$")
plt.legend()
if savefig:
plt.savefig("ww.layer{}.randesd.2.png".format(layer_id))
plt.show()
# MOves to RMT Util should be static function
#def calc_rank_loss(self, singular_values, M, lambda_max):
# """compute the rank loss for these singular given the tolerances
# """
# sv = singular_values
# tolerance = lambda_max * M * np.finfo(np.max(sv)).eps
# return np.count_nonzero(sv > tolerance, axis=-1)
def fit_powerlaw(self, evals, xmin=None, xmax=None, plot=True, layer_name="", layer_id=0, sample=False, sample_size=None, savefig=False):
"""Fit eigenvalues to powerlaw
if xmin is
'auto' or None, , automatically set this with powerlaw method
'peak' , try to set by finding the peak of the ESD on a log scale
if xmax is 'auto' or None, xmax = np.max(evals)
"""
num_evals = len(evals)
logger.debug("fitting power law on {} eigenvalues".format(num_evals))
# TODO: replace this with a robust sampler / stimator
# requires a lot of refactoring below
if sample and sample_size is None:
logger.info("setting sample size to default MAX_NUM_EVALS={}".format(MAX_NUM_EVALS))
sample_size = MAX_NUM_EVALS
if sample and num_evals > sample_size:
logger.warn("samping not implemented in production yet")
logger.info("chosing {} eigenvalues from {} ".format(sample_size, len(evals)))
evals = np.random.choice(evals, size=sample_size)
if xmax == XMAX.AUTO or xmax is XMAX.UNKNOWN or xmax is None:
xmax = np.max(evals)
if xmin == XMAX.AUTO or xmin is None:
fit = powerlaw.Fit(evals, xmax=xmax, verbose=False)
elif xmin == XMAX.PEAK :
nz_evals = evals[evals > 0.0]
num_bins = 100 # np.min([100, len(nz_evals)])
h = np.histogram(np.log10(nz_evals), bins=num_bins)
ih = np.argmax(h[0])
xmin2 = 10 ** h[1][ih]
xmin_range = (0.95 * xmin2, 1.05 * xmin2)
fit = powerlaw.Fit(evals, xmin=xmin_range, xmax=xmax, verbose=False)
else:
fit = powerlaw.Fit(evals, xmin=xmin, xmax=xmax, verbose=False)
alpha = fit.alpha
D = fit.D
sigma = fit.sigma
xmin = fit.xmin
xmax = fit.xmax
num_pl_spikes = len(evals[evals>=fit.xmin])
logger.debug("finding best distribution for fit")
all_dists = [TPL, POWER_LAW, LOG_NORMAL]#, EXPONENTIAL]
Rs = [0.0]
dists = [TPL]
for dist in all_dists[1:]:
R, p = fit.distribution_compare(dist, TPL, normalized_ratio=True)
if R > 0.1 and p > 0.05:
dists.append(dist)
Rs.append(R)
logger.info("compare dist={} R={} p={}".format(dist, R, p))
best_fit = dists[np.argmax(Rs)]
if plot:
fig2 = fit.plot_pdf(color='b', linewidth=0) # invisbile
plot_loghist(evals[evals>(xmin/100)], bins=100, xmin=xmin)
fig2 = fit.plot_pdf(color='r', linewidth=2)
fit.power_law.plot_pdf(color='r', linestyle='--', ax=fig2)
title = "Log-Log ESD for {}\n".format(layer_name)
title = title + r"$\alpha=${0:.3f}; ".format(alpha) + \
r'$D_{KS}=$'+"{0:.3f}; ".format(D) + \
r"$\lambda_{min}=$"+"{0:.3f}".format(xmin) + "\n"
plt.title(title)
plt.legend()
if savefig:
plt.savefig("ww.layer{}.esd.png".format(layer_id))
plt.show()
# plot eigenvalue histogram
num_bins = 100 # np.min([100,len(evals)])
plt.hist(evals, bins=num_bins, density=True)
title = "Lin-Lin ESD for {}".format(layer_name)
plt.title(title)
plt.axvline(x=fit.xmin, color='red', label=r'$\lambda_{xmin}$')
plt.legend()
if savefig:
plt.savefig("ww.layer{}.esd2.png".format(layer_id))
plt.show()
# plot log eigenvalue histogram
nonzero_evals = evals[evals > 0.0]
plt.hist(np.log10(nonzero_evals), bins=100, density=True)
title = "Log-Lin ESD for {}".format(layer_name)
plt.title(title)
plt.axvline(x=np.log10(fit.xmin), color='red', label=r'$\lambda_{xmin}$')
plt.axvline(x=np.log10(fit.xmax), color='orange', label=r'$\lambda_{xmax}$')
plt.legend()
if savefig:
plt.savefig("ww.layer{}.esd3.png".format(layer_id))
plt.show()
# plot xmins vs D
plt.plot(fit.xmins, fit.Ds, label=r'$D_{KS}$')
plt.axvline(x=fit.xmin, color='red', label=r'$\lambda_{xmin}$')
#plt.plot(fit.xmins, fit.sigmas / fit.alphas, label=r'$\sigma /\alpha$', linestyle='--')
plt.xlabel(r'$x_{min}$')
plt.ylabel(r'$D_{KS}$')
title = r'$D_{KS}$' + ' vs.' + r'$x_{min},\;\lambda_{xmin}=$'
plt.title(title+"{:0.3}".format(fit.xmin))
plt.legend()
if savefig:
plt.savefig("ww.layer{}.esd4.png".format(layer_id))
plt.show()
return alpha, xmin, xmax, D, sigma, num_pl_spikes, best_fit
def get_ESD(self, model=None, layer=None, random=False, params=DEFAULT_PARAMS):
"""Get the ESD (empirical spectral density) for the layer, specified by id or name)"""
model = self.model or model
details = self.describe(model=model)
layer_ids = details['layer_id'].to_numpy()
layer_names = details['name'].to_numpy()
if type(layer) is int and layer not in layer_ids:
logger.error("Can not find layer id {} in valid layer_ids {}".format(layer, layer_ids))
return []
elif type(layer) is str and layer not in layer_names:
logger.error("Can not find layer name {} in valid layer_names {}".format(layer, layer_names))
return []
layer_iter = WWLayerIterator(model=model, filters=[layer], params=params)
details = pd.DataFrame(columns=['layer_id', 'name'])
ww_layer = next(layer_iter)
assert(not ww_layer.skipped)
assert(ww_layer.has_weights)
if not random:
logger.info("Getting ESD for layer {} ".format(layer))
self.apply_esd(ww_layer, params)
esd = ww_layer.evals
else:
logger.info("Getting Randomized ESD for layer {} ".format(layer))
self.apply_random_esd(ww_layer, params)
esd = ww_layer.rand_evals
if esd is None or len(esd)==0:
logger.warn("No eigenvalues found for {} {}".format(ww_layer.layer_id, ww_layer.name))
else:
logger.info("Found {} eiganvalues for {} {}".format(len(esd), ww_layer.layer_id, ww_layer.name))
return esd
def get_Weights(self, model=None, layer=None, params=DEFAULT_PARAMS):
"""Get the Weights for the layer, specified by id or name)"""
model = self.model or model
details = self.describe(model=model)
layer_ids = details['layer_id'].to_numpy()
layer_names = details['name'].to_numpy()
if type(layer) is int and layer not in layer_ids:
logger.error("Can not find layer id {} in valid layer_ids {}".format(layer, layer_ids))
return []
elif type(layer) is str and layer not in layer_names:
logger.error("Can not find layer name {} in valid layer_names {}".format(layer, layer_names))
return []
logger.info("Getting Weights for layer {} ".format(layer))
layer_iter = WWLayerIterator(model=model, filters=[layer], params=params)
details = pd.DataFrame(columns=['layer_id', 'name'])
ww_layer = next(layer_iter)
assert(not ww_layer.skipped)
assert(ww_layer.has_weights)
return ww_layer.Wmats
def apply_norm_metrics(self, ww_layer, params=DEFAULT_PARAMS):
"""Compute the norm metrics, as they depend on the eigenvalues"""
layer_id = ww_layer.layer_id
name = ww_layer.name or ""
evals = ww_layer.evals
# TODO: check normalization on all
norm = np.sum(evals)
log_norm = np.log10(norm)
spectral_norm = np.max(evals)
log_spectral_norm = np.log10(spectral_norm)
# TODO: check formula
alpha = ww_layer.alpha
alpha_weighted = alpha*log_spectral_norm
log_alpha_norm = np.log10(np.sum( [ ev**alpha for ev in evals]))
stable_rank = norm / spectral_norm
N = ww_layer.N
hard_rank = matrix_rank(np.sqrt(evals), N)
entropy = matrix_entropy(np.sqrt(evals), N)
ww_layer.add_column(METRICS.NORM, norm)
ww_layer.add_column(METRICS.LOG_NORM, log_norm)
ww_layer.add_column(METRICS.SPECTRAL_NORM, spectral_norm)
ww_layer.add_column(METRICS.LOG_SPECTRAL_NORM, log_spectral_norm)
ww_layer.add_column(METRICS.ALPHA, alpha)
ww_layer.add_column(METRICS.ALPHA_WEIGHTED, alpha_weighted)
ww_layer.add_column(METRICS.LOG_ALPHA_NORM, log_alpha_norm)
ww_layer.add_column(METRICS.STABLE_RANK, stable_rank)
ww_layer.add_column(METRICS.MATRIX_RANK, hard_rank)
ww_layer.add_column(METRICS.MATRIX_ENTROPY, entropy)
return ww_layer
# TODO: add x bulk max yellow line for bulk edge for random
def apply_plot_deltaEs(self, ww_layer, random=False, params=DEFAULT_PARAMS):
"""Plot the deltas of the layer ESD, both in a sequence as a histogram (level statisitcs)"""
layer_id = ww_layer.layer_id
name = ww_layer.name or ""
layer_name = "{} {}".format(layer_id, name)
savefig = params['savefig']
if random:
layer_name = "{} Randomized".format(layer_name)
title = "Layer {} W".format(layer_name)
evals = ww_layer.rand_evals
color='mediumorchid'
bulk_max = ww_layer.rand_bulk_max
else:
title = "Layer {} W".format(layer_name)
evals = ww_layer.evals
color='blue'
# sequence of deltas
deltaEs = np.diff(evals)
logDeltaEs = np.log10(deltaEs)
x = np.arange(len(deltaEs))
eqn = r"$\log_{10}\Delta(\lambda)$"
plt.scatter(x,logDeltaEs, color=color)
if not random:
idx = np.searchsorted(evals, ww_layer.xmin, side="left")
plt.axvline(x=idx, color='red', label=r'$\lambda_{xmin}$')
else:
idx = np.searchsorted(evals, bulk_max, side="left")
plt.axvline(x=idx, color='red', label=r'$\lambda_{+}$')
plt.title("Log Delta Es for Layer {}".format(layer_name))
plt.ylabel("Log Delta Es: "+eqn)
plt.legend()
if savefig:
plt.savefig("ww.layer{}.deltaEs.png".formt(layer_id))
plt.show()
# level statistics (not mean adjusted because plotting log)
plt.hist(logDeltaEs, bins=100, color=color, density=True)
plt.title("Log Level Statisitcs for Layer {}".format(layer_name))
plt.ylabel("density")
plt.xlabel(eqn)
plt.legend()
if savefig:
plt.savefig("ww.layer{}.level-stats.png".formt(layer_id))
plt.show()
def apply_mp_fit(self, ww_layer, random=True, params=DEFAULT_PARAMS):
"""Perform MP fit on random or actual random eigenvalues
N/A yet"""
layer_id = ww_layer.layer_id
name = ww_layer.name or ""
layer_name = "{} {}".format(layer_id, name)
if random:
layer_name = "{} Randomized".format(layer_name)
title = "Layer {} W".format(layer_name)
evals = ww_layer.rand_evals
color='mediumorchid'
else:
title = "Layer {} W".format(layer_name)
evals = ww_layer.evals
color='blue'
N, M = ww_layer.N, ww_layer.M
rf = ww_layer.rf
num_spikes, sigma_mp, mp_softrank, bulk_min, bulk_max, Wscale = self.mp_fit(evals, N, M, rf, layer_name, layer_id,
params['plot'], params['savefig'], color, params['rescale'])
if random:
ww_layer.add_column('rand_num_spikes', num_spikes)
ww_layer.add_column('rand_sigma_mp', sigma_mp)
ww_layer.add_column('rand_mp_softrank', mp_softrank)
ww_layer.add_column('rand_W_scale', Wscale)
ww_layer.add_column('rand_bulk_max', bulk_max)
ww_layer.add_column('rand_bulk_min', bulk_min)
else:
ww_layer.add_column('num_spikes', num_spikes)
ww_layer.add_column('sigma_mp', sigma_mp)
ww_layer.add_column(METRICS.MP_SOFTRANK, mp_softrank)
ww_layer.add_column('W_scale', Wscale)
ww_layer.add_column('bulk_max', bulk_max)
ww_layer.add_column('bulk_min', bulk_min)
return
def mp_fit(elf, evals, N, M, rf, layer_name, layer_id, plot, savefig, color, rescale):
"""Automatic MP fit to evals, compute numner of spikes and mp_softrank"""
Q = N/M
to_plot = evals.copy()
Wscale=1.0
if rescale:
Wnorm = np.sqrt(np.sum(evals))
Wscale = np.sqrt(N*rf)/Wnorm
logger.info("rescaling {} ESD of W by {:0.2f}".format(layer_id, Wscale))
to_plot = (Wscale*Wscale)*to_plot
lambda_max = np.max(to_plot)
bw = 0.1
s1, f1 = fit_density_with_range(to_plot, Q, bw = bw)
sigma_mp = s1
bulk_max = (s1 * (1 + 1/np.sqrt(Q)))**2
bulk_min = (s1 * (1 - 1/np.sqrt(Q)))**2
#TODO: add Tracy Widom (TW) range
num_spikes = len(to_plot[to_plot > bulk_max])
ratio_numofSpikes = num_spikes / (M - 1)
mp_softrank = bulk_max / lambda_max
if Q == 1.0:
fit_law = 'QC SSD'
#Even if the quarter circle applies, still plot the MP_fit
if plot:
plot_density(to_plot, Q=Q, sigma=s1, method="MP", color=color)#, scale=Wscale)
plt.legend([r'$\rho_{emp}(\lambda)$', 'MP fit'])
plt.title("MP ESD, sigma auto-fit for {}".format(layer_name))
if savefig:
plt.savefig("ww.layer{}.mpfit1.png".formt(layer_id))
plt.show()
else:
fit_law = 'MP ESD'
#
logger.info("MP fit min_esd={:0.2f}, max_esd={:0.2f}, Q={}, s1={:0.2f} Wsc ale={:0.2f}".format(np.min(to_plot), np.max(to_plot), Q, s1, Wscale))
plot_density_and_fit(model=None, eigenvalues=to_plot, layer_name=layer_name, layer_id=0,
Q=Q, num_spikes=0, sigma=s1, verbose = False, plot=plot, color=color)#, scale=Wscale)
if plot:
title = fit_law+" for layer "+layer_name+"\n Q={:0.3} ".format(Q)
title = title + r"$\sigma_{mp}=$"+"{:0.3} ".format(sigma_mp)
title = title + r"$\mathcal{R}_{mp}=$"+"{:0.3} ".format(mp_softrank)
title = title + r"$\#$ spikes={}".format(num_spikes)
plt.title(title)
if savefig:
plt.savefig("ww.layer{}.mpfit2.png".format(layer_id))
plt.show()
bulk_max = bulk_max/(Wscale*Wscale)
bulk_min = bulk_min/(Wscale*Wscale)
return num_spikes, sigma_mp, mp_softrank, bulk_min, bulk_max, Wscale
def smooth_W_alt(self, W, n_comp):
"""Apply the SVD Smoothing Transform to W"
if n_comp < 0, then chomp off the top n_comp eiganvalues
"""
N, M = np.max(W.shape), np.min(W.shape)
# TODO: replace this with truncated SVD
# can't we just appky the svd transform...test
# keep this old method for historical comparison
u, s, vh = np.linalg.svd(W, compute_uv=True)
if n_comp > 0:
s[n_comp:]=0
else:
s[:n_comp]=0
s = list(s)
s.extend([0]*(N-M))
s = np.array(s)
s = np.diag(s)
if u.shape[0] > vh.shape[0]:
smoothed_W = np.dot(np.dot(u,s)[:N,:M],vh)
else:
smoothed_W = np.dot(u, np.dot(s,vh)[:M,:N])
return smoothed_W
def smooth_W(self, W, n_comp):
"""Apply the sklearn TruncatedSVD method to each W, return smoothed W
"""
svd = TruncatedSVD(n_components=n_comp, n_iter=7, random_state=42)
if W.shape[0]<W.shape[1]:
X = svd.fit_transform(W.T)
VT = svd.components_
smoothed_W = np.dot(X,VT).T
else:
X = svd.fit_transform(W)
VT = svd.components_
smoothed_W = np.dot(X,VT)
logger.debug("smoothed W {} -> {} n_comp={}".format(W.shape, smoothed_W.shape, n_comp))
return smoothed_W
def SVDSmoothing(self, model=None, percent=0.2, ww2x=False, layers=[]):
"""Apply the SVD Smoothing Transform to model, keeping (percent)% of the eigenvalues
layers:
List of layer ids. If empty, analyze all layers (default)
If layer ids < 0, then skip the layers specified
All layer ids must be > 0 or < 0
ww2x:
Use weightwatcher version 0.2x style iterator, which slices up Conv2D layers in N=rf matrices
"""
model = model or self.model
params=DEFAULT_PARAMS
params['ww2x'] = ww2x
params['layers'] = layers
params['percent'] = percent
# check framework, return error if framework not supported
# need to access static method on Model class
logger.info("params {}".format(params))
if not self.valid_params(params):
msg = "Error, params not valid: \n {}".format(params)
logger.error(msg)
raise Exception(msg)
if ww2x:
logger.info("Using weightwatcher 0.2x style layer and slice iterator")
layer_iterator = WW2xSliceIterator(model, filters=layers, params=params)
else:
layer_iterator = WWLayerIterator(model, filters=layers, params=params)
# iterate over layers
# naive implementation uses just percent, not the actual tail
# we eventually want to compute the eigenvectors and analyze them
# here we do SVD
for ww_layer in layer_iterator:
if not ww_layer.skipped and ww_layer.has_weights:
logger.info("LAYER: {} {} : {}".format(ww_layer.layer_id, ww_layer.the_type, type(ww_layer.layer)))
self.apply_svdsmoothing(ww_layer, params)
logger.info("Returning smoothed model")
return model
def apply_svdsmoothing(self, ww_layer, params=DEFAULT_PARAMS):
"""run truncated SVD on layer weight matrices and reconstruct the weight matrices
keep all eigenvlues > percent*ncomp
if percent < 0, then keep those < than percent*ncomp"""
percent = params['percent']
layer = ww_layer.layer
layer_id = ww_layer.layer_id
layer_name = ww_layer.name
layer_type = ww_layer.the_type
framework = ww_layer.framework
channels = ww_layer.channels
if framework not in [FRAMEWORK.KERAS, FRAMEWORK.PYTORCH, FRAMEWORK.ONNX]:
logger.error("Sorry, SVDSmoothing does not support this model framework ")
return
if channels == CHANNELS.UNKNOWN:
log.error("Sorry, SVDSmoothing does not understand the channels for this layer, stopping ")
return
M = ww_layer.M
N = ww_layer.N
rf = ww_layer.rf
n_comp = int(ww_layer.num_components*percent)
logger.info("apply truncated SVD on Layer {} {}, keeping {:0.2f}% percent , or ncomp={} out of {}. of the singular vectors".format(layer_id, layer_name, percent, n_comp, ww_layer.num_components))
# get the model weights and biases directly, converted to numpy arrays
has_W, old_W, has_B, old_B = ww_layer.get_weights_and_biases()
if layer_type in [LAYER_TYPE.DENSE, LAYER_TYPE.CONV1D, LAYER_TYPE.EMBEDDING]:
if n_comp > 0:
new_W = self.smooth_W(old_W, n_comp)
elif n_comp < 0:
logger.debug("Chomping off top {} singular values".format(-n_comp))
new_W = self.smooth_W_alt(old_W, n_comp)
else:
logger.warning("Not smoothing {} {}, ncomp=0".format(layer_id, layer_name))
new_W = old_W
new_B = old_B
# did we flip W when analyzing ?
if new_W.shape != old_W.shape:
new_W=new_W.T
self.replace_layer_weights(framework, layer_id, layer, new_W, new_B)
elif layer_type == LAYER_TYPE.CONV2D:
new_W = np.zeros_like(old_W)
new_B = old_B
if new_B is not None:
logger.warn("Something went wrong, Biases found for Conv2D layer, layer {} {} ".format(layer_id, layer_name))
#[k,k,M,N]
if channels == CHANNELS.FIRST:
i_max, j_max, _, _ = new_W.shape
if rf != i_max*j_max:
logger.warn("Channels FIRST not processed correctly W_slice.shape {}, rf={} ?".format(new_W.shape, rf))
for i in range(i_max):
for j in range(j_max):
new_W[i,j,:,:] = self.smooth_W(old_W[i,j,:,:], n_comp)
#[N,M,k,k]
elif channels == CHANNELS.LAST:
_, _, i_max, j_max = new_W.shape
if rf != i_max*j_max:
logger.warn("Channels LAST not processed correctly W_slice.shape {}, rf={} ?".format(new_W.shape, rf))
for i in range(i_max):
for j in range(j_max):
new_W[:,:,i,j] = self.smooth_W(old_W[:,:,i,j], n_comp)
else:
logger.warn("Something went wrong, Channels not defined or detected for Conv2D layer, layer {} {} skipped ".format(layer_id, layer_name))
self.replace_layer_weights(framework, layer_id, layer, new_W)
else:
logger.warn("Something went wrong,UNKNOWN layer {} {} skipped , type={}".format(layer_id, layer_name, layer_type))
return ww_layer
def replace_layer_weights(self, framework, idx, layer, W, B=None):
"""Replace the old layer weights with the new weights
framework: FRAMEWORK.KERAS | FRAMEWORK.PYTORCH
layer: is the framework layerm, not an instance of WWLayer
new_W: numpy array
new_B: numpy vector (array)
"""
if framework==FRAMEWORK.KERAS:
# (I think) this works for Dense and Conv2D, not sure about other layers
if B is not None:
W = [W, B]
layer.set_weights(W)
elif framework==FRAMEWORK.PYTORCH:
# see: https://discuss.pytorch.org/t/fix-bias-and-weights-of-a-layer/75120/4
# this may be deprecated
layer.weight.data = torch.from_numpy(W)
if B is not None:
layer.bias.data = torch.from_numpy(B)
# See; https://github.com/onnx/onnx/issues/2978
elif framework==FRAMEWORK.ONNX:
#if B is not None:
# W = [W, B]
#else:
# W = [W]
layer.set_weights(idx, W)
else:
logger.debug("Layer {} skipped, Layer Type {} not supported".format(layer_id, the_type))
return
|
#!/usr/bin/env python3
from concurrent.futures import ThreadPoolExecutor as PoolExecutor
import re
import mwbot
def convert_redirect(r):
before = r['revisions'][0]['*']
redir = mwbot.parse_redirect(before)
if redir and redir[0].startswith('http'):
text = '{{#exturl:%s}}' % redir[0].strip()
if redir[1] and redir[1] != r['title'].split('/')[-1].replace('_', ' '):
text += '\n[[Hat Linkbeschreibung::%s]]' % redir[1]
mwbot.save(site, r['title'], before, text, 'verwende #exturl Parserfunktion')
def convert_redirects(threads):
results = list(site.results(generator='querypage', gqppage='BrokenRedirects', gqplimit='max', prop='revisions', rvprop='content'))
with PoolExecutor(max_workers=threads) as executor:
for _ in executor.map(convert_redirect, results):
pass
def convert_file(r):
text = r['revisions'][0]['*']
parts = text.split("<!-- Don't edit below this line! -->")
after = parts[0]
if len(parts) == 2:
for line in parts[1].splitlines():
match = re.match('\[\[([^]]+)\]\]', line)
if match:
title = match.group(1).strip().replace('_', ' ')
if not title.startswith('Kategorie'):
after += '{{#attach:%s}}\n' % title
else:
print('cannot handle', r['title'])
mwbot.save(site, r['title'], text, after, 'verwende #attach Parserfunktion')
def convert_files(threads):
results = list(site.results(generator='categorymembers', gcmtitle='Category:Materialien', gcmtype='file', prop='revisions', rvprop='content', gcmlimit='max'))
with PoolExecutor(max_workers=threads) as executor:
for _ in executor.map(convert_file, results):
pass
if __name__ == '__main__':
parser = mwbot.get_argparser()
parser.add_argument('--threads', default=1, type=int)
args = parser.parse_args()
site = mwbot.getsite('materialien2attachments.py', args)
convert_redirects(args.threads)
convert_files(args.threads)
|
"""
A server that responds with two pages, one showing the most recent
100 tweets for given user and the other showing the people that follow
that given user (sorted by the number of followers those users have).
For authentication purposes, the server takes a commandline argument
that indicates the file containing Twitter data in a CSV file format:
consumer_key, consumer_secret, access_token, access_token_secret
For example, I pass in my secrets via file name:
/Users/parrt/Dropbox/licenses/twitter.csv
Please keep in mind the limits imposed by the twitter API:
https://dev.twitter.com/rest/public/rate-limits
For example, you can only do 15 follower list fetches per
15 minute window, but you can do 900 user timeline fetches.
"""
import sys
from flask import Flask, render_template
from tweetie import *
from colour import Color
from numpy import median
app = Flask(__name__)
def add_color(tweets):
"""
Given a list of tweets, one dictionary per tweet, add
a "color" key to each tweets dictionary with a value
containing a color graded from red to green. Pure red
would be for -1.0 sentiment score and pure green would be for
sentiment score 1.0.
Use colour.Color to get 100 color values in the range
from red to green. Then convert the sentiment score from -1..1
to an index from 0..100. That index gives you the color increment
from the 100 gradients.
This function modifies the dictionary of each tweet. It lives in
the server script because it has to do with display not collecting
tweets.
"""
colors = list(Color("red").range_to(Color("green"), 100))
for t in tweets:
score = t['score']
...
@app.route("/favicon.ico")
def favicon():
"""
Open and return a 16x16 or 32x32 .png or other image file in binary mode.
This is the icon shown in the browser tab next to the title.
"""
@app.route("/<name>")
def tweets(name):
"Display the tweets for a screen name color-coded by sentiment score"
@app.route("/following/<name>")
def following(name):
"""
Display the list of users followed by a screen name, sorted in
reverse order by the number of followers of those users.
"""
i = sys.argv.index('server:app')
twitter_auth_filename = sys.argv[i+1] # e.g., "/Users/parrt/Dropbox/licenses/twitter.csv"
api = authenticate(twitter_auth_filename)
#app.run(host='0.0.0.0', port=80)
|
import os
import pathlib
from typing import Optional, Tuple
import requests
import tensorflow as tf
from chitra.constants import IMAGENET_LABEL_URL
from chitra.logging import logger
IMAGENET_LABELS: Optional[Tuple[str]] = None
def remove_dsstore(path) -> None:
"""Deletes .DS_Store files from path and sub-folders of path."""
path = pathlib.Path(path)
for e in path.glob("*.DS_Store"):
os.remove(e)
for e in path.glob("*/*.DS_Store"):
os.remove(e)
def get_basename(path: tf.string) -> tf.string:
if not isinstance(path, tf.Tensor):
raise AssertionError
return tf.strings.split(path, os.path.sep)[-1]
def load_imagenet_labels() -> Tuple[str]:
global IMAGENET_LABELS
if IMAGENET_LABELS is None:
logger.debug("Downloading imagenet labels...")
IMAGENET_LABELS = (
requests.get(IMAGENET_LABEL_URL).content.decode("UTF-8").split("\n")[1:]
)
return IMAGENET_LABELS
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
API handler for osvmexpire
"""
import pkgutil
import six
from oslo_policy import policy
from oslo_serialization import jsonutils as json
import pecan
from os_vm_expire.common import config
from os_vm_expire.common import utils
from os_vm_expire import i18n as u
LOG = utils.getLogger(__name__)
CONF = config.CONF
class ApiResource(object):
"""Base class for API resources."""
pass
def load_body(req, resp=None, validator=None):
"""Helper function for loading an HTTP request body from JSON.
This body is placed into into a Python dictionary.
:param req: The HTTP request instance to load the body from.
:param resp: The HTTP response instance.
:param validator: The JSON validator to enforce.
:return: A dict of values from the JSON request.
"""
try:
# body = req.body_file.read(CONF.max_allowed_request_size_in_bytes)
body = req.body_file.read()
req.body_file.seek(0)
except IOError:
LOG.exception("Problem reading request JSON stream.")
pecan.abort(500, u._('Read Error'))
try:
parsed_body = json.loads(body)
strip_whitespace(parsed_body)
except ValueError:
LOG.exception("Problem loading request JSON.")
pecan.abort(400, u._('Malformed JSON'))
if validator:
try:
parsed_body = validator.validate(parsed_body)
except Exception as e:
LOG.exception(six.text_type(e))
pecan.abort(e.status_code, e.client_message)
return parsed_body
def generate_safe_exception_message(operation_name, excep):
"""Generates an exception message that is 'safe' for clients to consume.
A 'safe' message is one that doesn't contain sensitive information.
:param operation_name: Name of attempted operation, with a 'Verb noun'
format (e.g. 'Create Secret).
:param excep: The Exception instance that halted the operation.
:return: (status, message) where 'status' is one of the webob.exc.HTTP_xxx
codes, and 'message' is the sanitized message
associated with the error.
"""
message = None
reason = None
status = 500
try:
raise excep
except policy.PolicyNotAuthorized:
message = u._(
'{operation} attempt not allowed - '
'please review your '
'user/project privileges').format(operation=operation_name)
status = 403
except Exception as http_exception:
reason = http_exception.client_message
status = http_exception.status_code
except Exception:
message = u._('{operation} failure seen - please contact site '
'administrator.').format(operation=operation_name)
if reason:
message = u._('{operation} issue seen - {reason}.').format(
operation=operation_name, reason=reason)
return status, message
@pkgutil.simplegeneric
def get_items(obj):
"""This is used to get items from either a list or a dictionary.
While false generator is need to process scalar object
"""
while False:
yield None
@get_items.register(dict)
def _json_object(obj):
return obj.items()
@get_items.register(list)
def _json_array(obj):
return enumerate(obj)
def strip_whitespace(json_data):
"""Recursively trim values from the object passed in using get_items()."""
for key, value in get_items(json_data):
if hasattr(value, 'strip'):
json_data[key] = value.strip()
else:
strip_whitespace(value)
|
#!/usr/bin/env python
#
# Volconv - geometry-aware DICOM-to-NIfTI converter
# Raw writer (primarily for NIfTI data, hence using NIfTI type system)
#
# Copyright 2006-2016 Mark J White <mark@celos.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See COPYING.txt and NOTICE.txt in the distribution for details.
#
import struct
import sys
import numpy
import gzip
class NiftiType:
# complete set of allowed Nifti-1.1 types
Bool = 1
UInt8 = 2
Int8 = 256
UInt16 = 512
Int16 = 4
UInt32 = 768
Int32 = 8
UInt64 = 1280
Int64 = 1024
Float32 = 16
Float64 = 64
Float128 = 1536
Complex64 = 32
Complex128 = 1792
Complex256 = 2048
RGB24 = 128
Map = {
# Type: ( NArray , NArrayShort, Pack , Bitpix )
# Type name = NumPy string = Nifti #define
Bool: ( "Bool", "", "c", 1 ),
Int8: ( "Int8", "", "b", 8 ),
UInt8: ( "UInt8", "", "B", 8 ),
Int16: ( "Int16", "i2", "h", 16 ),
UInt16: ( "UInt16", "u2", "H", 16 ),
Int32: ( "Int32", "i4", "i", 32 ),
UInt32: ( "UInt32", "u4", "I", 32 ),
Float32: ( "Float32", "f4", "f", 32 ),
Float64: ( "Float64", "f8", "d", 64 ),
Complex64: ( "Complex64", "", "ff", 64 ),
Complex128: ( "Complex128", "", "dd", 128 ),
}
class RawWriter:
def __init__(self,filename,gzip=False):
self.filename = filename
if gzip:
self.fh = gzip.GzipFile(filename, 'wb')
else:
self.fh = file(filename, 'wb')
# currently expect user to set:
self.type = NiftiType.Int16
self.data = None
self.offset = 0
self.order = "<"
def write(self):
self.fh.close()
nastr = self.order + NiftiType.Map[self.type][1]
dim = self.data.shape
flat = numpy.reshape(self.data, dim[0]*dim[1]*dim[2], order="F")
mm = numpy.memmap(self.filename, dtype=nastr, mode="r+",
offset=self.offset, shape=(dim[0]*dim[1]*dim[2],))
mm[:] = flat[:]
del mm
self.fh = file(self.filename, 'rb+')
# self.fh.seek(self.offset)
# packstr = self.order + NiftiType.Map[self.type][2]
# for e in flat:
# bytes = struct.pack(packstr, int(e)) # numpy-related fix
# self.fh.write(bytes)
|
def factorial(num) :
if num <= 1 :
print('1 반환')
return 1
print("%d * %d! 호출" % (num, num-1))
retVal = factorial(num-1)
print("%d * %d!(=%d) 반환" % (num, num-1, retVal))
return num * retVal
print('\n5! = ', factorial(5))
|
import keras
from keras.datasets import fashion_mnist
import numpy as np
from PIL import Image, ImageOps
import os
import random
def convert(size, box):
dw = 1./size[0]
dh = 1./size[1]
x = (box[0] + box[1])/2.0
y = (box[2] + box[3])/2.0
w = box[1] - box[0]
h = box[3] - box[2]
x = x*dw
w = w*dw
y = y*dh
h = h*dh
return (x,y,w,h)
def save_image(filename, data_array):
#bgcolor = (0xff, 0xff, 0xff)
bgcolor = (0x00, 0x00, 0xff)
screen = (500, 375)
img = Image.new('RGB', screen, bgcolor)
mnist_img = Image.fromarray(data_array.astype('uint8'))
mnist_img_invert = ImageOps.invert(mnist_img)
#w = int(round(mnist_img.width * random.uniform(8.0, 10.0)))
w = int(mnist_img.width*10)
mnist_img_invert = mnist_img_invert.resize((w,w))
#x = random.randint(0, img.width-w)
#y = random.randint(0, img.height-w)
x = int((img.width-w)/2)
y = int((img.height-w)/2)
img.paste(mnist_img_invert, (x, y))
img.save(filename)
return convert((img.width,img.height), (float(x), float(x+w), float(y), float(y+w)))
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
DIR_NAME = "JPEGImages"
if os.path.exists(DIR_NAME) == False:
os.mkdir(DIR_NAME)
LABEL_DIR_NAME = "labels"
if os.path.exists(LABEL_DIR_NAME) == False:
os.mkdir(LABEL_DIR_NAME)
j = 0
no = 0
for li in [x_train, x_test]:
j += 1
i = 0
print("[---------------------------------------------------------------]")
for x in li:
# Write Image file
filename = "{0}/{1:05d}.jpg".format(DIR_NAME,no)
print(filename)
ret = save_image(filename, x)
print(ret)
# Write label file
label_filename = "{0}/{1:05d}.txt".format(LABEL_DIR_NAME,no)
print(label_filename)
f = open(label_filename, 'w')
y = 0
if j == 1:
y = y_train[i]
else:
y = y_test[i]
str = "{0:d} {1:f} {2:f} {3:f} {4:f}".format(y, ret[0], ret[1], ret[2], ret[3])
f.write(str)
f.close()
i += 1
no += 1
|
import asyncio
from typing import List, Optional, Union
import aioredis
from ..utils.scripts import generate_key
USER_KEY = "user_urls"
class RedisDB:
def __init__(self, host: str, port: int, password: str, db: int):
self._host = host
self._port = port
self._password = password
self._db = db
self._redis: Optional[aioredis.RedisConnection] = None
self._connection_lock = asyncio.Lock()
async def redis(self) -> aioredis.Redis:
async with self._connection_lock:
if self._redis is None or self._redis.closed:
self._redis = await aioredis.create_redis_pool(address=(self._host, self._port),
password=self._password,
db=self._db,
encoding="utf-8")
return self._redis
async def close(self):
async with self._connection_lock:
if self._redis and not self._redis.closed:
self._redis.close()
async def wait_closed(self):
async with self._connection_lock:
if self._redis:
await self._redis.wait_closed()
async def get_received_urls(self, user_id: Union[str, int]) -> List[str]:
redis = await self.redis()
received_urls = await redis.smembers(generate_key(USER_KEY, user_id))
return received_urls or list()
async def add_received_url(self, user_id: Union[str, int], url: str):
redis = await self.redis()
await redis.sadd(generate_key(USER_KEY, user_id), url)
async def clear_received_urls(self, user_id: Union[str, int]):
redis = await self.redis()
await redis.unlink(generate_key(USER_KEY, user_id))
|
from django.forms import widgets
class Autocomplete(widgets.Select):
template_name = "forms/autocomplete_field.html"
|
from setuptools import setup, find_packages
__version__ = "1.0.2"
with open("readme.md", "r") as f:
readme = f.read()
requirements = ["requests-HTML>=0.10.0", "MangaDex.py>=2.0.6", "pillow>=8.0.1", "imgurpython>=1.1.7"]
setup(
name = "mangas-dl",
version = __version__,
author = "Boubou0909",
author_email = "balthazar0909@gmail.com",
description = "Mangas' scans downloader app",
long_description = readme,
long_description_content_type = "text/markdown",
url = "https://github.com/Boubou0909/Mangas-dl",
packages = find_packages(),
data_files = [("mangas_dl", ["mangas_dl/HELP.md", "mangas_dl/conf.ini", "mangas_dl/language_codes.json", "mangas_dl/settings.json", "mangas_dl/websites_used.json"])],
install_requires = requirements,
entry_points = '''
[console_scripts]
mangas-dl=mangas_dl.__main__:main
''',
classifiers =
[
"Programming Language :: Python :: 3.9"
]
)
|
# -*- coding: utf-8 -*-
#
# FoundationDB documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sphinx_bootstrap_theme
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('extensions'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'brokenrole',
'relativelink',
'rubydomain',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'FoundationDB'
copyright = u'2013-2021 Apple, Inc and the FoundationDB project authors'
# Load the version information from 'versions.target'
import xml.etree.ElementTree as ET
version_path = os.path.join(os.path.dirname(sys.executable), '..', '..', '..', 'versions.target')
tree = ET.parse(version_path)
root = tree.getroot()
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = root.find(".//{http://schemas.microsoft.com/developer/msbuild/2003}PackageName").text
# The full version, including alpha/beta/rc tags.
# FoundationDB special note: also see guide-common.rst.inc and update the link to the EC2 template
release = root.find(".//{http://schemas.microsoft.com/developer/msbuild/2003}Version").text
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'globaltoc_depth': 2,
'globaltoc_includehidden': "true",
'navbar_links': [
("Site Map", "contents"),
],
'source_link_position': "footer",
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'FoundationDB ' + version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html'],
'contents': [],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FoundationDB'
# Disable permalinks
html_add_permalinks = ""
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'FoundationDB.tex', u'FoundationDB Documentation',
u'FoundationDB', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'foundationdb', u'FoundationDB Documentation',
[u'FoundationDB'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FoundationDB', u'FoundationDB Documentation',
u'FoundationDB', 'FoundationDB', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
########### Check for inappropriate use of the default role ##########
default_role = "broken"
|
import argparse
import logging
import os
import time
import numpy as np
import pybullet as p
import igibson
from igibson.external.pybullet_tools.utils import (
get_max_limits,
get_min_limits,
get_sample_fn,
joints_from_names,
set_joint_positions,
)
from igibson.objects.visual_marker import VisualMarker
from igibson.robots.fetch import Fetch
from igibson.scenes.empty_scene import EmptyScene
from igibson.simulator import Simulator
from igibson.utils.utils import l2_distance, parse_config, restoreState
def main(selection="user", headless=False, short_exec=False):
"""
Example of usage of inverse kinematics solver
This is a pybullet functionality but we keep an example because it can be useful and we do not provide a direct
API from iGibson
"""
print("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
# Assuming that if selection!="user", headless=True, short_exec=True, we are calling it from tests and we
# do not want to parse args (it would fail because the calling function is pytest "testfile.py")
if not (selection != "user" and headless and short_exec):
parser = argparse.ArgumentParser()
parser.add_argument(
"--programmatic",
"-p",
dest="programmatic_pos",
action="store_true",
help="if the IK solvers should be used with the GUI or programmatically",
)
args = parser.parse_args()
programmatic_pos = args.programmatic_pos
else:
programmatic_pos = True
# Create simulator, scene and robot (Fetch)
config = parse_config(os.path.join(igibson.configs_path, "fetch_reaching.yaml"))
s = Simulator(mode="headless", use_pb_gui=True if not headless else False)
scene = EmptyScene()
s.import_scene(scene)
robot_config = config["robot"]
robot_config.pop("name")
fetch = Fetch(**robot_config)
s.import_object(fetch)
body_ids = fetch.get_body_ids()
assert len(body_ids) == 1, "Fetch robot is expected to be single-body."
robot_id = body_ids[0]
arm_default_joint_positions = (
0.10322468280792236,
-1.414019864768982,
1.5178184935241699,
0.8189625336474915,
2.200358942909668,
2.9631312579803466,
-1.2862852996643066,
0.0008453550418615341,
)
robot_default_joint_positions = (
[0.0, 0.0]
+ [arm_default_joint_positions[0]]
+ [0.0, 0.0]
+ list(arm_default_joint_positions[1:])
+ [0.01, 0.01]
)
robot_joint_names = [
"r_wheel_joint",
"l_wheel_joint",
"torso_lift_joint",
"head_pan_joint",
"head_tilt_joint",
"shoulder_pan_joint",
"shoulder_lift_joint",
"upperarm_roll_joint",
"elbow_flex_joint",
"forearm_roll_joint",
"wrist_flex_joint",
"wrist_roll_joint",
"r_gripper_finger_joint",
"l_gripper_finger_joint",
]
arm_joints_names = [
"torso_lift_joint",
"shoulder_pan_joint",
"shoulder_lift_joint",
"upperarm_roll_joint",
"elbow_flex_joint",
"forearm_roll_joint",
"wrist_flex_joint",
"wrist_roll_joint",
]
# Indices of the joints of the arm in the vectors returned by IK and motion planning (excluding wheels, head, fingers)
robot_arm_indices = [robot_joint_names.index(arm_joint_name) for arm_joint_name in arm_joints_names]
# PyBullet ids of the joints corresponding to the joints of the arm
arm_joint_ids = joints_from_names(robot_id, arm_joints_names)
all_joint_ids = joints_from_names(robot_id, robot_joint_names)
set_joint_positions(robot_id, arm_joint_ids, arm_default_joint_positions)
# Set robot base
fetch.set_position_orientation([0, 0, 0], [0, 0, 0, 1])
fetch.keep_still()
# Get initial EE position
x, y, z = fetch.get_eef_position()
# Define the limits (max and min, range), and resting position for the joints, including the two joints of the
# wheels of the base (indices 0 and 1), the two joints for the head (indices 3 and 4) and the two joints of the
# fingers (indices 12 and 13)
max_limits = get_max_limits(robot_id, all_joint_ids)
min_limits = get_min_limits(robot_id, all_joint_ids)
rest_position = robot_default_joint_positions
joint_range = list(np.array(max_limits) - np.array(min_limits))
joint_range = [item + 1 for item in joint_range]
joint_damping = [0.1 for _ in joint_range]
def accurate_calculate_inverse_kinematics(robot_id, eef_link_id, target_pos, threshold, max_iter):
print("IK solution to end effector position {}".format(target_pos))
# Save initial robot pose
state_id = p.saveState()
max_attempts = 5
solution_found = False
joint_poses = None
for attempt in range(1, max_attempts + 1):
print("Attempt {} of {}".format(attempt, max_attempts))
# Get a random robot pose to start the IK solver iterative process
# We attempt from max_attempt different initial random poses
sample_fn = get_sample_fn(robot_id, arm_joint_ids)
sample = np.array(sample_fn())
# Set the pose of the robot there
set_joint_positions(robot_id, arm_joint_ids, sample)
it = 0
# Query IK, set the pose to the solution, check if it is good enough repeat if not
while it < max_iter:
joint_poses = p.calculateInverseKinematics(
robot_id,
eef_link_id,
target_pos,
lowerLimits=min_limits,
upperLimits=max_limits,
jointRanges=joint_range,
restPoses=rest_position,
jointDamping=joint_damping,
)
joint_poses = np.array(joint_poses)[robot_arm_indices]
set_joint_positions(robot_id, arm_joint_ids, joint_poses)
dist = l2_distance(fetch.get_eef_position(), target_pos)
if dist < threshold:
solution_found = True
break
logging.debug("Dist: " + str(dist))
it += 1
if solution_found:
print("Solution found at iter: " + str(it) + ", residual: " + str(dist))
break
else:
print("Attempt failed. Retry")
joint_poses = None
restoreState(state_id)
p.removeState(state_id)
return joint_poses
threshold = 0.03
max_iter = 100
if programmatic_pos or headless:
query_positions = [[1, 0, 0.8], [1, 1, 1], [0.5, 0.5, 0], [0.5, 0.5, 0.5]]
for pos in query_positions:
print("Querying joint configuration to current marker position")
joint_pos = accurate_calculate_inverse_kinematics(
robot_id, fetch.eef_links[fetch.default_arm].link_id, pos, threshold, max_iter
)
if joint_pos is not None and len(joint_pos) > 0:
print("Solution found. Setting new arm configuration.")
set_joint_positions(robot_id, arm_joint_ids, joint_pos)
else:
print("EE position not reachable.")
fetch.set_position_orientation([0, 0, 0], [0, 0, 0, 1])
fetch.keep_still()
time.sleep(10)
else:
marker = VisualMarker(visual_shape=p.GEOM_SPHERE, radius=0.06)
s.import_object(marker)
marker.set_position([x, y, z])
print_message()
quit_now = False
while True:
keys = p.getKeyboardEvents()
for k, v in keys.items():
if k == p.B3G_RIGHT_ARROW and (v & p.KEY_IS_DOWN):
y -= 0.01
if k == p.B3G_LEFT_ARROW and (v & p.KEY_IS_DOWN):
y += 0.01
if k == p.B3G_UP_ARROW and (v & p.KEY_IS_DOWN):
x += 0.01
if k == p.B3G_DOWN_ARROW and (v & p.KEY_IS_DOWN):
x -= 0.01
if k == ord("z") and (v & p.KEY_IS_DOWN):
z += 0.01
if k == ord("x") and (v & p.KEY_IS_DOWN):
z -= 0.01
if k == ord(" "):
print("Querying joint configuration to current marker position")
joint_pos = accurate_calculate_inverse_kinematics(
robot_id, fetch.eef_links[fetch.default_arm].link_id, [x, y, z], threshold, max_iter
)
if joint_pos is not None and len(joint_pos) > 0:
print("Solution found. Setting new arm configuration.")
set_joint_positions(robot_id, arm_joint_ids, joint_pos)
print_message()
else:
print(
"No configuration to reach that point. Move the marker to a different configuration and try again."
)
if k == ord("q"):
print("Quit.")
quit_now = True
break
if quit_now:
break
marker.set_position([x, y, z])
fetch.set_position_orientation([0, 0, 0], [0, 0, 0, 1])
fetch.keep_still()
s.step()
s.disconnect()
def print_message():
print("*" * 80)
print("Move the marker to a desired position to query IK and press SPACE")
print("Up/Down arrows: move marker further away or closer to the robot")
print("Left/Right arrows: move marker to the left or the right of the robot")
print("z/x: move marker up and down")
print("q: quit")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import boto3
import datetime
s3 = boto3.resource('s3')
sm = boto3.client('sagemaker')
time_created = datetime.datetime.now()
def lambda_handler(event, context):
print(f'Time Lambda created: {time_created}')
#Check version of Boto3 - It must be at least 1.16.55
print(f"The version of Boto3 is {boto3.__version__}")
#Get location for where the new data (csv) file was uploaded
data_bucket = event['Records'][0]['s3']['bucket']['name']
data_key = event['Records'][0]['s3']['object']['key']
print(f"A new file named {data_key} was just uploaded to Amazon S3 in {data_bucket}")
#Update values for where Data Wrangler .flow is saved
flow_bucket = 'sagemaker-us-east-1-572539092864'
flow_key = 'sagemaker-feature-store/fscw/data_wrangler_flows/DWF-Orders.flow'
pipeline_name = 'featurestore-ingest-pipeline-12-14-08-07'
execution_display = f"{data_key.split('/')[-1].replace('_','').replace('.csv','')}"
#Get .flow file from Amazon S3
get_object = s3.Object(flow_bucket,flow_key)
get_flow = get_object.get()
#Read, update and save the .flow file
flow_content = json.loads(get_flow['Body'].read())
flow_content['nodes'][0]['parameters']['dataset_definition']['name'] = data_key.split('/')[-1]
flow_content['nodes'][0]['parameters']['dataset_definition']['s3ExecutionContext']['s3Uri'] = f"s3://{data_bucket}/{data_key}"
new_flow_key = flow_key.replace('.flow', '-' + data_key.split('/')[-1].replace('.csv','') + '.flow')
new_flow_uri = f"s3://{flow_bucket}/{new_flow_key}"
put_object = s3.Object(flow_bucket,new_flow_key)
put_flow = put_object.put(Body=json.dumps(flow_content))
#Start the pipeline execution
start_pipeline = sm.start_pipeline_execution(
PipelineName=pipeline_name,
PipelineExecutionDisplayName=f"{data_key.split('/')[-1].replace('_','').replace('.csv','')}",
PipelineParameters=[
{
'Name': 'InputFlow',
'Value': new_flow_uri
},
],
PipelineExecutionDescription=data_key
)
print(start_pipeline)
return('SageMaker Pipeline has been successfully started...')
|
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''RSA key generation code.
Create new keys with the newkeys() function. It will give you a PublicKey and a
PrivateKey object.
Loading and saving keys requires the pyasn1 module. This module is imported as
late as possible, such that other functionality will remain working in absence
of pyasn1.
'''
import logging
import rsa.prime
import rsa.pem
import rsa.common
log = logging.getLogger(__name__)
class AbstractKey(object):
'''Abstract superclass for private and public keys.'''
@classmethod
def load_pkcs1(cls, keyfile, format='PEM'):
r'''Loads a key in PKCS#1 DER or PEM format.
:param keyfile: contents of a DER- or PEM-encoded file that contains
the public key.
:param format: the format of the file to load; 'PEM' or 'DER'
:return: a PublicKey object
'''
methods = {
'PEM': cls._load_pkcs1_pem,
'DER': cls._load_pkcs1_der,
}
if format not in methods:
formats = ', '.join(sorted(methods.keys()))
raise ValueError('Unsupported format: %r, try one of %s' % (format,
formats))
method = methods[format]
return method(keyfile)
def save_pkcs1(self, format='PEM'):
'''Saves the public key in PKCS#1 DER or PEM format.
:param format: the format to save; 'PEM' or 'DER'
:returns: the DER- or PEM-encoded public key.
'''
methods = {
'PEM': self._save_pkcs1_pem,
'DER': self._save_pkcs1_der,
}
if format not in methods:
formats = ', '.join(sorted(methods.keys()))
raise ValueError('Unsupported format: %r, try one of %s' % (format,
formats))
method = methods[format]
return method()
class PublicKey(AbstractKey):
'''Represents a public RSA key.
This key is also known as the 'encryption key'. It contains the 'n' and 'e'
values.
Supports attributes as well as dictionary-like access. Attribute accesss is
faster, though.
>>> PublicKey(5, 3)
PublicKey(5, 3)
>>> key = PublicKey(5, 3)
>>> key.n
5
>>> key['n']
5
>>> key.e
3
>>> key['e']
3
'''
__slots__ = ('n', 'e')
def __init__(self, n, e):
self.n = n
self.e = e
def __getitem__(self, key):
return getattr(self, key)
def __repr__(self):
return u'PublicKey(%i, %i)' % (self.n, self.e)
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, PublicKey):
return False
return self.n == other.n and self.e == other.e
def __ne__(self, other):
return not (self == other)
@classmethod
def _load_pkcs1_der(cls, keyfile):
r'''Loads a key in PKCS#1 DER format.
@param keyfile: contents of a DER-encoded file that contains the public
key.
@return: a PublicKey object
First let's construct a DER encoded key:
>>> import base64
>>> b64der = 'MAwCBQCNGmYtAgMBAAE='
>>> der = base64.decodestring(b64der)
This loads the file:
>>> PublicKey._load_pkcs1_der(der)
PublicKey(2367317549, 65537)
'''
from pyasn1.codec.der import decoder
(priv, _) = decoder.decode(keyfile)
# ASN.1 contents of DER encoded public key:
#
# RSAPublicKey ::= SEQUENCE {
# modulus INTEGER, -- n
# publicExponent INTEGER, -- e
as_ints = tuple(int(x) for x in priv)
return cls(*as_ints)
def _save_pkcs1_der(self):
'''Saves the public key in PKCS#1 DER format.
@returns: the DER-encoded public key.
'''
from pyasn1.type import univ, namedtype
from pyasn1.codec.der import encoder
class AsnPubKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
)
# Create the ASN object
asn_key = AsnPubKey()
asn_key.setComponentByName('modulus', self.n)
asn_key.setComponentByName('publicExponent', self.e)
return encoder.encode(asn_key)
@classmethod
def _load_pkcs1_pem(cls, keyfile):
'''Loads a PKCS#1 PEM-encoded public key file.
The contents of the file before the "-----BEGIN RSA PUBLIC KEY-----" and
after the "-----END RSA PUBLIC KEY-----" lines is ignored.
@param keyfile: contents of a PEM-encoded file that contains the public
key.
@return: a PublicKey object
'''
der = rsa.pem.load_pem(keyfile, 'RSA PUBLIC KEY')
return cls._load_pkcs1_der(der)
def _save_pkcs1_pem(self):
'''Saves a PKCS#1 PEM-encoded public key file.
@return: contents of a PEM-encoded file that contains the public key.
'''
der = self._save_pkcs1_der()
return rsa.pem.save_pem(der, 'RSA PUBLIC KEY')
class PrivateKey(AbstractKey):
'''Represents a private RSA key.
This key is also known as the 'decryption key'. It contains the 'n', 'e',
'd', 'p', 'q' and other values.
Supports attributes as well as dictionary-like access. Attribute accesss is
faster, though.
>>> PrivateKey(3247, 65537, 833, 191, 17)
PrivateKey(3247, 65537, 833, 191, 17)
exp1, exp2 and coef don't have to be given, they will be calculated:
>>> pk = PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
>>> pk.exp1
55063
>>> pk.exp2
10095
>>> pk.coef
50797
If you give exp1, exp2 or coef, they will be used as-is:
>>> pk = PrivateKey(1, 2, 3, 4, 5, 6, 7, 8)
>>> pk.exp1
6
>>> pk.exp2
7
>>> pk.coef
8
'''
__slots__ = ('n', 'e', 'd', 'p', 'q', 'exp1', 'exp2', 'coef')
def __init__(self, n, e, d, p, q, exp1=None, exp2=None, coef=None):
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
# Calculate the other values if they aren't supplied
if exp1 is None:
self.exp1 = int(d % (p - 1))
else:
self.exp1 = exp1
if exp1 is None:
self.exp2 = int(d % (q - 1))
else:
self.exp2 = exp2
if coef is None:
(_, self.coef, _) = extended_gcd(q, p)
else:
self.coef = coef
def __getitem__(self, key):
return getattr(self, key)
def __repr__(self):
return u'PrivateKey(%(n)i, %(e)i, %(d)i, %(p)i, %(q)i)' % self
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, PrivateKey):
return False
return (self.n == other.n and
self.e == other.e and
self.d == other.d and
self.p == other.p and
self.q == other.q and
self.exp1 == other.exp1 and
self.exp2 == other.exp2 and
self.coef == other.coef)
def __ne__(self, other):
return not (self == other)
@classmethod
def _load_pkcs1_der(cls, keyfile):
r'''Loads a key in PKCS#1 DER format.
@param keyfile: contents of a DER-encoded file that contains the private
key.
@return: a PrivateKey object
First let's construct a DER encoded key:
>>> import base64
>>> b64der = 'MC4CAQACBQDeKYlRAgMBAAECBQDHn4npAgMA/icCAwDfxwIDANcXAgInbwIDAMZt'
>>> der = base64.decodestring(b64der)
This loads the file:
>>> PrivateKey._load_pkcs1_der(der)
PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
'''
from pyasn1.codec.der import decoder
(priv, _) = decoder.decode(keyfile)
# ASN.1 contents of DER encoded private key:
#
# RSAPrivateKey ::= SEQUENCE {
# version Version,
# modulus INTEGER, -- n
# publicExponent INTEGER, -- e
# privateExponent INTEGER, -- d
# prime1 INTEGER, -- p
# prime2 INTEGER, -- q
# exponent1 INTEGER, -- d mod (p-1)
# exponent2 INTEGER, -- d mod (q-1)
# coefficient INTEGER, -- (inverse of q) mod p
# otherPrimeInfos OtherPrimeInfos OPTIONAL
# }
if priv[0] != 0:
raise ValueError('Unable to read this file, version %s != 0' % priv[0])
as_ints = tuple(int(x) for x in priv[1:9])
return cls(*as_ints)
def _save_pkcs1_der(self):
'''Saves the private key in PKCS#1 DER format.
@returns: the DER-encoded private key.
'''
from pyasn1.type import univ, namedtype
from pyasn1.codec.der import encoder
class AsnPrivKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer()),
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
namedtype.NamedType('privateExponent', univ.Integer()),
namedtype.NamedType('prime1', univ.Integer()),
namedtype.NamedType('prime2', univ.Integer()),
namedtype.NamedType('exponent1', univ.Integer()),
namedtype.NamedType('exponent2', univ.Integer()),
namedtype.NamedType('coefficient', univ.Integer()),
)
# Create the ASN object
asn_key = AsnPrivKey()
asn_key.setComponentByName('version', 0)
asn_key.setComponentByName('modulus', self.n)
asn_key.setComponentByName('publicExponent', self.e)
asn_key.setComponentByName('privateExponent', self.d)
asn_key.setComponentByName('prime1', self.p)
asn_key.setComponentByName('prime2', self.q)
asn_key.setComponentByName('exponent1', self.exp1)
asn_key.setComponentByName('exponent2', self.exp2)
asn_key.setComponentByName('coefficient', self.coef)
return encoder.encode(asn_key)
@classmethod
def _load_pkcs1_pem(cls, keyfile):
'''Loads a PKCS#1 PEM-encoded private key file.
The contents of the file before the "-----BEGIN RSA PRIVATE KEY-----" and
after the "-----END RSA PRIVATE KEY-----" lines is ignored.
@param keyfile: contents of a PEM-encoded file that contains the private
key.
@return: a PrivateKey object
'''
der = rsa.pem.load_pem(keyfile, 'RSA PRIVATE KEY')
return cls._load_pkcs1_der(der)
def _save_pkcs1_pem(self):
'''Saves a PKCS#1 PEM-encoded private key file.
@return: contents of a PEM-encoded file that contains the private key.
'''
der = self._save_pkcs1_der()
return rsa.pem.save_pem(der, 'RSA PRIVATE KEY')
def extended_gcd(a, b):
"""Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb
"""
# r = gcd(a,b) i = multiplicitive inverse of a mod b
# or j = multiplicitive inverse of b mod a
# Neg return values for i or j are made positive mod b or a respectively
# Iterateive Version is faster and uses much less stack space
x = 0
y = 1
lx = 1
ly = 0
oa = a #Remember original a/b to remove
ob = b #negative values from return results
while b != 0:
q = a // b
(a, b) = (b, a % b)
(x, lx) = ((lx - (q * x)),x)
(y, ly) = ((ly - (q * y)),y)
if (lx < 0): lx += ob #If neg wrap modulo orignal b
if (ly < 0): ly += oa #If neg wrap modulo orignal a
return (a, lx, ly) #Return only positive values
def find_p_q(nbits, accurate=True):
''''Returns a tuple of two different primes of nbits bits each.
The resulting p * q has exacty 2 * nbits bits, and the returned p and q
will not be equal.
@param nbits: the number of bits in each of p and q.
@param accurate: whether to enable accurate mode or not.
@returns (p, q), where p > q
>>> (p, q) = find_p_q(128)
>>> from rsa import common
>>> common.bit_size(p * q)
256
When not in accurate mode, the number of bits can be slightly less
>>> (p, q) = find_p_q(128, accurate=False)
>>> from rsa import common
>>> common.bit_size(p * q) <= 256
True
>>> common.bit_size(p * q) > 240
True
'''
total_bits = nbits * 2
# Make sure that p and q aren't too close or the factoring programs can
# factor n.
shift = nbits // 16
pbits = nbits + shift
qbits = nbits - shift
# Choose the two initial primes
log.debug('find_p_q(%i): Finding p', nbits)
p = rsa.prime.getprime(pbits)
log.debug('find_p_q(%i): Finding q', nbits)
q = rsa.prime.getprime(qbits)
def is_acceptable(p, q):
'''Returns True iff p and q are acceptable:
- p and q differ
- (p * q) has the right nr of bits (when accurate=True)
'''
if p == q:
return False
if not accurate:
return True
# Make sure we have just the right amount of bits
found_size = rsa.common.bit_size(p * q)
return total_bits == found_size
# Keep choosing other primes until they match our requirements.
change_p = False
tries = 0
while not is_acceptable(p, q):
tries += 1
# Change p on one iteration and q on the other
if change_p:
log.debug(' find another p')
p = rsa.prime.getprime(pbits)
else:
log.debug(' find another q')
q = rsa.prime.getprime(qbits)
change_p = not change_p
# We want p > q as described on
# http://www.di-mgt.com.au/rsa_alg.html#crt
return (max(p, q), min(p, q))
def calculate_keys(p, q, nbits):
"""Calculates an encryption and a decryption key given p and q, and
returns them as a tuple (e, d)
"""
phi_n = (p - 1) * (q - 1)
# A very common choice for e is 65537
e = 65537
(divider, d, _) = extended_gcd(e, phi_n)
if divider != 1:
raise ValueError("e (%d) and phi_n (%d) are not relatively prime" %
(e, phi_n))
if (d < 0):
raise ValueError("extended_gcd shouldn't return negative values, "
"please file a bug")
if (e * d) % phi_n != 1:
raise ValueError("e (%d) and d (%d) are not mult. inv. modulo "
"phi_n (%d)" % (e, d, phi_n))
return (e, d)
def gen_keys(nbits, accurate=True):
"""Generate RSA keys of nbits bits. Returns (p, q, e, d).
Note: this can take a long time, depending on the key size.
@param nbits: the total number of bits in ``p`` and ``q``. Both ``p`` and
``q`` will use ``nbits/2`` bits.
"""
(p, q) = find_p_q(nbits // 2, accurate)
(e, d) = calculate_keys(p, q, nbits // 2)
return (p, q, e, d)
def newkeys(nbits, accurate=True):
"""Generates public and private keys, and returns them as (pub, priv).
The public key is also known as the 'encryption key', and is a
:py:class:`PublicKey` object. The private key is also known as the
'decryption key' and is a :py:class:`PrivateKey` object.
:param nbits: the number of bits required to store ``n = p*q``.
:param accurate: when True, ``n`` will have exactly the number of bits you
asked for. However, this makes key generation much slower. When False,
`n`` may have slightly less bits.
:returns: a tuple (:py:class:`rsa.PublicKey`, :py:class:`rsa.PrivateKey`)
"""
if nbits < 16:
raise ValueError('Key too small')
(p, q, e, d) = gen_keys(nbits)
n = p * q
return (
PublicKey(n, e),
PrivateKey(n, e, d, p, q)
)
__all__ = ['PublicKey', 'PrivateKey', 'newkeys']
if __name__ == '__main__':
import doctest
try:
for count in range(100):
(failures, tests) = doctest.testmod()
if failures:
break
if (count and count % 10 == 0) or count == 1:
print '%i times' % count
except KeyboardInterrupt:
print 'Aborted'
else:
print 'Doctests done'
|
# Set random seed
np.random.seed(0)
# Set parameters
T = 50 # Time duration
tau = 25 # dynamics time constant
process_noise = 2 # process noise in Astrocat's propulsion unit (standard deviation)
measurement_noise = 9 # measurement noise in Astrocat's collar (standard deviation)
# Auxiliary variables
process_noise_cov = process_noise**2 # process noise in Astrocat's propulsion unit (variance)
measurement_noise_cov = measurement_noise**2 # measurement noise in Astrocat's collar (variance)
# Initialize arrays
t = np.arange(0, T, 1) # timeline
s = np.zeros(T) # states
D = np.exp(-1/tau) # dynamics multiplier (matrix if s is vector)
m = np.zeros(T) # measurement
s_ = np.zeros(T) # estimate (posterior mean)
cov_ = np.zeros(T) # uncertainty (posterior covariance)
# Initial guess of the posterior at time 0
initial_guess = gaussian(0, process_noise_cov/(1-D**2)) # In this case, the initial guess (posterior distribution
# at time 0) is the equilibrium distribution, but feel free to
# experiment with other gaussians
posterior = initial_guess
# Sample initial conditions
s[0] = posterior.mean + np.sqrt(posterior.cov) * np.random.randn() # Sample initial condition from posterior distribution at time 0
s_[0] = posterior.mean
cov_[0] = posterior.cov
# Loop over steps
for i in range(1, T):
# Sample true states and corresponding measurements
s[i] = D * s[i-1] + np.random.normal(0, process_noise) # variable `s` records the true position of Astrocat
m[i] = s[i] + np.random.normal(0, measurement_noise) # variable `m` records the measurements of Astrocat's collar
# Step 1. Shift yesterday's posterior to match the deterministic change of the system's dynamics,
# and broad it to account for the random change (i.e., add mean and variance of process noise).
todays_prior = gaussian(D * posterior.mean, D**2 * posterior.cov + process_noise_cov)
# Step 2. Now that yesterday's posterior has become today's prior, integrate new evidence
# (i.e., multiply gaussians from today's prior and likelihood)
likelihood = gaussian(m[i], measurement_noise_cov)
# Step 2a: To find the posterior variance, add informations (inverse variances) of prior and likelihood
info_prior = 1/todays_prior.cov
info_likelihood = 1/likelihood.cov
info_posterior = info_prior + info_likelihood
# Step 2b: To find the posterior mean, calculate a weighted average of means from prior and likelihood;
# the weights are just the fraction of information that each gaussian provides!
prior_weight = info_prior / info_posterior
likelihood_weight = info_likelihood / info_posterior
posterior_mean = prior_weight * todays_prior.mean + likelihood_weight * likelihood.mean
# Don't forget to convert back posterior information to posterior variance!
posterior_cov = 1/info_posterior
posterior = gaussian(posterior_mean, posterior_cov)
s_[i] = posterior.mean
cov_[i] = posterior.cov
# Visualize
with plt.xkcd():
paintMyFilter(D, initial_guess, process_noise_cov, measurement_noise_cov, s, m, s_, cov_)
|
import winrm
import base64
import subprocess # noqa: B404
def fix_run_ps(self, script): # Fixes string bug in python 3 for NTLM connection
encoded_ps = base64.b64encode(script.encode("utf_16_le")).decode("ascii")
rs = self.run_cmd("powershell -encodedcommand {0}".format(encoded_ps))
if len(rs.std_err):
rs.std_err = self._clean_error_msg(rs.std_err.decode("utf-8"))
return rs
winrm.Session.run_ps = fix_run_ps
def local(action, powershell_script):
action.logger.info("Running on local VM")
action.logger.debug("PowerShell script: " + powershell_script)
process = subprocess.Popen(
powershell_script,
shell="true", # noqa: B602
executable="pwsh",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output, error = process.communicate()
try:
output = output.decode("utf-8")
except AttributeError:
pass
try:
error = error.decode("utf-8")
except AttributeError:
pass
return {"output": output, "stderr": error}
def ntlm(action, host_ip, powershell_script, username, password, port):
# Adds needed https and port number to host IP
action.logger.info("Running a NTLM connection")
host_connection = "https://{host_ip}:{port}/wsman".format(host_ip=host_ip, port=port)
action.logger.debug("Host Connection: " + host_connection)
action.logger.debug("PowerShell script: " + powershell_script)
powershell_session = winrm.Session(host_connection, auth=(username, password), transport="ntlm")
# Forces the Protocol to not fail with self signed certs
p = winrm.Protocol(
endpoint=host_connection,
transport="ntlm",
username=username,
password=password,
server_cert_validation="ignore",
message_encryption="auto",
)
powershell_session.protocol = p
run_script = powershell_session.run_ps(powershell_script)
exit_code = run_script.status_code
error_value = run_script.std_err
output = run_script.std_out
try:
error_value = error_value.decode("utf-8")
except AttributeError:
pass
output = output.decode("utf-8")
if exit_code != 0:
action.logger.error(error_value)
raise Exception("An error occurred in the PowerShell script, see logging for more info")
return {"output": output, "stderr": error_value}
def kerberos(action, host_ip, kdc, domain, host_name, powershell_script, password, username, port):
action.logger.info("Running Kerberos connection")
# Adds needed https and port number to host IP
host_connection = "https://{host_ip}:{port}/wsman".format(host_ip=host_ip, port=port)
action.logger.debug("PowerShell script: " + powershell_script)
udomain = domain.upper()
# Config for krb5 file to the domain
krb_config = """[libdefaults]
default_realm = {udomain}
forwardable = true
proxiable = true
[realms]
{udomain} = {{
kdc = {kdc}
admin_server = {kdc}
default_domain = {udomain}
}}
[domain_realm]
.{domain} = {udomain}
{domain} = {udomain}""".format(
udomain=udomain, domain=domain, kdc=kdc
)
action.logger.debug(krb_config)
# Config for DNS
dns = "search %s\r\nnameserver %s" % (domain, kdc)
action.logger.debug(dns)
# Sends output from stdout on shell commands to logging. Preventing errors
subprocess.call("mkdir -p /var/lib/samba/private", shell="true") # noqa: B607,B602
subprocess.call("systemctl enable sssd", shell="true") # noqa: B607,B602
# Setup realm to join the domain
with open("/etc/krb5.conf", "w") as f:
f.write(krb_config)
# Creates a Kerberos ticket
kinit = """echo '%s' | kinit %s@%s""" % (password, username, domain.upper())
response = subprocess.Popen(kinit, shell="true", stdout=subprocess.PIPE, stderr=subprocess.PIPE) # noqa: B602
(stdout, stderr) = response.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
action.logger.info("Attempt to make Kerberos ticket stdout: " + stdout)
action.logger.info("Attempt to make Kerberos ticket stderr: " + stderr)
# DNS info so the plugin knows where to find the domain
with open("/etc/resolv.conf", "w") as f:
f.write(dns)
# Joins Komand to domain
realm = """echo '%s' | realm --install=/ join --user=%s %s""" % (password, username, domain)
response = subprocess.Popen(realm, shell="true", stdout=subprocess.PIPE, stderr=subprocess.PIPE) # noqa: B602
(stdout, stderr) = response.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
action.logger.info("Attempt to join domain stdout: " + stdout)
action.logger.info("Attempt to join domain stderr: " + stderr)
# Allows resolution if A name not set for host
with open("/etc/hosts", "a") as f:
f.write("\r\n" + host_ip + " " + host_name)
# Runs the script on the host
powershell_session = winrm.Session(host_connection, auth=(username, password), transport="kerberos")
# Forces the protocol to not fail with self signed certs
p = winrm.Protocol(
endpoint=host_connection,
transport="kerberos",
username=username,
password=password,
server_cert_validation="ignore",
)
powershell_session.protocol = p
run_script = powershell_session.run_ps(powershell_script)
exit_code = run_script.status_code
error_value = run_script.std_err
output = run_script.std_out
try:
error_value = error_value.decode("utf-8")
except AttributeError:
pass
output = output.decode("utf-8")
if exit_code != 0:
action.logger.error(error_value)
raise Exception("An error occurred in the PowerShell script, see logging for more info")
return {"output": output, "stderr": error_value}
def credssp(action, host_ip, powershell_script, username, password, port):
# Adds needed https and port number to host IP
action.logger.info("Running a CredSSP connection")
host_connection = "https://{host_ip}:{port}/wsman".format(host_ip=host_ip, port=port)
action.logger.debug("Host Connection: " + host_connection)
action.logger.debug("PowerShell script: " + powershell_script)
powershell_session = winrm.Session(host_connection, auth=(username, password), transport="credssp")
# Forces the Protocol to not fail with self signed certs
p = winrm.Protocol(
endpoint=host_connection,
transport="credssp",
username=username,
password=password,
server_cert_validation="ignore",
message_encryption="auto",
)
powershell_session.protocol = p
run_script = powershell_session.run_ps(powershell_script)
exit_code = run_script.status_code
error_value = run_script.std_err
output = run_script.std_out
try:
error_value = error_value.decode("utf-8")
except AttributeError:
pass
output = output.decode("utf-8")
if exit_code != 0:
action.logger.error(error_value)
raise Exception("An error occurred in the PowerShell script, see logging for more info")
return {"output": output, "stderr": error_value}
|
# Copyright Verizon.
# Licensed under the terms of the Apache 2.0 license. See LICENSE file in project root for terms.
#!/bin/env python3
import argparse
import csv
import fileinput
import getpass
import os
import re
import shlex
import shutil
import sys
import textwrap
from datetime import datetime, timezone
import pexpect
from cmd2 import Cmd, Settable, with_argparser
from rich import box
from rich.console import Console
from rich.prompt import Prompt
from rich.table import Table
def xstr(s):
"""return empty string if input is none/false"""
return '' if not s else s
class Logger():
logs = []
logfile_csv = None
csv_writer = None
def __init__(self, command, ip=None, dns_name=None, netbios_name=None, user_name=None, pid=None, ttps=None):
self.timestamp = datetime.now(timezone.utc)
self.command = command
self.ip = ip
self.dns_name = dns_name
self.netbios_name = netbios_name
self.user_name = user_name
self.pid = pid
self.ttps = ttps
self.write_log_entry()
Logger.logs.append(self)
@classmethod
def open_logfile(cls, basefilename):
logfile_csv = f"{basefilename}.csv"
print("Logging to: ", logfile_csv)
cls.logfile_csv = open(logfile_csv, 'w', newline='')
fieldnames = ['Datetime', 'IP Address', 'DNS Name', 'NetBIOS Name', 'User', 'PID', 'Activity', 'TTPs']
cls.csv_writer = csv.DictWriter(cls.logfile_csv, fieldnames=fieldnames)
cls.csv_writer.writeheader()
cls.logfile_csv.flush()
def asdict(self):
return {'Datetime': self.timestamp.strftime("%Y/%m/%d %H:%M:%S %z"),
'IP Address': xstr(self.ip),
'DNS Name': xstr(self.dns_name),
'NetBIOS Name': xstr(self.netbios_name),
'User': xstr(self.user_name),
'PID': xstr(self.pid),
'Activity': self.command,
'TTPs': self.ttps}
def write_log_entry(self):
Logger.csv_writer.writerow(self.asdict())
Logger.logfile_csv.flush()
@classmethod
def close_logfile(cls):
cls.logfile_csv.close()
class CSProxyPivots():
instances = {}
count = 0
def __init__(self, bid, pid, port, user, computer, ip, alive, last):
self.bid = bid
self.pid = pid
self.port = port
self.user = user
self.computer = computer
self.ip = ip
self.alive = alive
self.last = last
CSProxyPivots.count += 1
self.id = CSProxyPivots.count
CSProxyPivots.instances[CSProxyPivots.count] = self
@classmethod
def get_pivots(cls):
data = []
data.append(['ID', 'Alive', 'Socks Port', 'PID', 'User', 'Computer', 'Last'])
for pivot in CSProxyPivots.instances.values():
data.append([str(pivot.id), str(pivot.alive), pivot.port, pivot.pid, pivot.user, pivot.computer, pivot.last])
return data
@classmethod
def reset(cls):
CSProxyPivots.instances.clear()
CSProxyPivots.count = 0
class RedShell(Cmd):
intro = """
____ _______ __ ____
/ __ \___ ____/ / ___// /_ ___ / / /
/ /_/ / _ \/ __ /\__ \/ __ \/ _ \/ / /
/ _, _/ __/ /_/ /___/ / / / / __/ / /
/_/ |_|\___/\__,_//____/_/ /_/\___/_/_/
"""
prompt = 'RedShell> '
def __init__(self):
super().__init__()
# remove built-in commands
try:
del Cmd.do_alias
del Cmd.do_edit
del Cmd.do_macro
del Cmd.do_run_pyscript
del Cmd.do_run_script
del Cmd.do_shortcuts
del Cmd.do_py
except AttributeError:
pass
# remove built-in settings
for key in ['allow_style', 'always_show_hint', 'editor', 'echo', 'feedback_to_output', 'quiet', 'timing', 'max_completion_items']:
try:
self.remove_settable(key)
except:
pass
# check/create redshell user dir
home_dir = os.path.expanduser("~")
self.redshell_user_directory = f"{home_dir}/.redshell/"
if not os.path.exists(self.redshell_user_directory):
os.makedirs(self.redshell_user_directory)
# set cobalt strike directory, if exists
if os.path.exists('/opt/cobaltstrike'):
self.cs_directory = '/opt/cobaltstrike'
else:
self.cs_directory = ''
# set config variables
self.redshell_directory = os.getcwd()
self.proxychains_config = f"{self.redshell_directory}/proxychains_redshell.conf"
self.cs_host = ''
self.cs_port = ''
self.cs_user = ''
self.cs_pass = ''
self.cs_process = None
self.cs_beacon_pid = ''
self.cs_beacon_id = ''
self.cs_beacon_user = ''
self.cs_beacon_computer = ''
self.cs_beacon_ip = ''
self.context_ip = ''
self.context_dns_name = ''
self.context_netbios_name = ''
self.context_user_name = ''
self.context_pid = ''
self.socks_host = ''
self.socks_port = ''
self.socks_port_connected = False
self.password = ''
# initialze user settable options
self.add_settable(Settable('redshell_directory', str, 'redshell install directory', self, completer=Cmd.path_complete, onchange_cb=self._onchange_redshell_directory))
self.add_settable(Settable('proxychains_config', str, 'proxychains config file', self, completer=Cmd.path_complete))
self.add_settable(Settable('cs_directory', str, 'Cobalt Strike install directory', self, completer=Cmd.path_complete))
self.add_settable(Settable('cs_host', str, 'Cobalt Strike team server host', self))
self.add_settable(Settable('cs_port', str, 'Cobalt Strike team server port', self))
self.add_settable(Settable('cs_user', str, 'Cobalt Strike user', self, onchange_cb=self._onchange_cs_user))
self.add_settable(Settable('password', str, 'Password for beacon_exec commands. Invoke with $password.', self))
# start logger
now = datetime.now()
timestamp = now.strftime("%Y_%m_%d_%H_%M_%S")
basefilename = f"{self.redshell_user_directory}redshell_{timestamp}"
Logger.open_logfile(basefilename)
def _onchange_redshell_directory(self, param_name, old, new):
self.proxychains_config = f"{self.redshell_directory}/proxychains_redshell.conf"
# append '_redshell' to CS username
def _onchange_cs_user(self, param_name, old, new):
self.cs_user += '_redshell'
def print_table(self, data, header=False):
"""print all tables in console output"""
if header:
table = Table(show_lines=True, show_header=header)
else:
table = Table(show_lines=True, show_header=header, box=box.SQUARE)
column_count = range(0, len(data[0]))
for i in column_count:
if header:
table.add_column(data[0][i])
else:
table.add_column()
for row in data:
if header and data.index(row) == 0:
continue
table.add_row(*row)
console = Console()
console.print(table)
def update_proxychains_conf(self, socks_type, ip, socks_port):
for line in fileinput.input(self.proxychains_config, inplace=True):
if line.startswith('socks'):
print(f"{socks_type} {ip} {socks_port}", end="\n")
else:
print(line, end = '')
def clear_context(self, clear_socks=False, clear_cs=False):
# clear existing connection
self.cs_beacon_id = ''
self.cs_beacon_pid = ''
self.cs_beacon_user = ''
self.cs_beacon_computer = ''
self.cs_beacon_ip = ''
self.context_ip = ''
self.context_dns_name = ''
self.context_netbios_name = ''
self.context_user_name = ''
self.context_pid = ''
# clear socks port if user is applying a new one
if clear_socks:
self.socks_port = ''
self.socks_port_connected = False
# if connected to cs team server, kill connection
if clear_cs:
# close the agscript process
if self.cs_process:
self.cs_process.close()
self.cs_process = None
argparser = argparse.ArgumentParser()
argparser.add_argument('-d', '--dnsname', type=str, help="DNS Name")
argparser.add_argument('-n', '--netbiosname', type=str, help="NetBIOS Name")
argparser.add_argument('-u', '--username', type=str, help="User Name")
argparser.add_argument('-p', '--pid', type=str, help="Process ID")
argparser.add_argument(type=str, dest="ip_address", help="Source IP Address")
@with_argparser(argparser)
def do_context(self, args):
"""Set a custom context (Source IP/DNS/NetBIOS/User/PID) for logging"""
if self.context_ip:
self.poutput("Context changed!")
self.pwarning("WARNING: If moving to a new socks port, be sure to update your socks connection accordingly.")
else:
self.poutput("New context applied!")
# if connected to cs team server, kill connection and socks. else clear context values only
if self.cs_process:
self.clear_context(clear_socks=True, clear_cs=True)
else:
self.clear_context()
self.context_ip = args.ip_address
if args.dnsname:
self.context_dns_name = args.dnsname
if args.netbiosname:
self.context_netbios_name = args.netbiosname
if args.username:
self.context_user_name = args.username
if args.pid:
self.context_pid = args.pid
argparser = argparse.ArgumentParser()
argparser.add_argument(type=str, dest="socks_type", choices=['socks4', 'socks5'])
argparser.add_argument(type=str, dest="ip_address")
argparser.add_argument(type=str, dest="socks_port")
@with_argparser(argparser)
def do_socks(self, args):
"""Use a custom socks4/5 port"""
# clear any existing context, socks port, and cobalt strike connections
self.clear_context(clear_socks=True, clear_cs=True)
self.socks_host = args.ip_address
self.socks_port = args.socks_port
self.socks_port_connected = True
self.update_proxychains_conf(args.socks_type, args.ip_address, args.socks_port)
self.poutput("Socks port updated.")
self.pwarning("WARNING: Be sure to update your context accordingly with the 'context' command.")
def do_cs_connect(self, args):
"""Connect to Cobalt Strike team server"""
self.clear_context(clear_socks=True)
# check config directories before attempting connection
if not os.path.exists(f"{self.redshell_directory}/agscript.sh"):
self.perror("Error: redshell install directory not found! Set the directory with this command: 'set redshell_directory'")
return
if not os.path.exists(f"{self.cs_directory}/agscript"):
self.perror("Error: Cobalt Strike install directory not found! Set the directory with this command: 'set cs_directory'")
return
# check permissions on agscript.sh
if not shutil.which(f"{self.redshell_directory}/agscript.sh"):
self.perror("Error: agscript.sh does not appear to be executable! Fix it with this command: 'chmod +x agscript.sh'")
return
# prompt user for team server password
self.cs_pass = getpass.getpass("Enter Cobalt Strike password: ")
# spawn agscript process
self.cs_process = pexpect.spawn(f"{self.redshell_directory}/agscript.sh {self.cs_directory} {self.cs_host} {self.cs_port} {self.cs_user} {self.cs_pass}")
# check if process is alive
if not self.cs_process.isalive():
self.perror("Error connecting to CS team server! Check config and try again.")
return
# look for the aggressor prompt
try:
self.cs_process.expect('.*aggressor.*> ')
except:
self.perror("Error connecting to CS team server! Check config and try again.")
return
self.poutput("Connecting...")
# upon successful connection, display status
self.do_cs_status('')
def do_cs_disconnect(self, args):
"""Disconnect from CS team server"""
self.clear_context(clear_socks=True, clear_cs=True)
def do_cs_status(self, args):
"""Display CS team server and beacon socks port connection status"""
if self.cs_process and self.cs_process.isalive():
cs_server_status = f"[green]Connected via {self.cs_user}@{self.cs_host}:{self.cs_port}[/]"
else:
cs_server_status = "[red]Disconnected[/]"
if self.cs_process and self.cs_process.isalive() and self.socks_port_connected:
socks_port_status = f"[green]Connected via socks port {self.socks_port} @ beacon PID {self.cs_beacon_pid}[/]"
else:
socks_port_status = "[red]Disconnected[/]"
data = [
["[i]CS Team Server Status[/]", cs_server_status],
["[i]Socks Port Status[/]", socks_port_status]
]
self.print_table(data)
def do_config(self, args):
"""Display current config"""
data = [
["[i]Redshell Install Directory[/]", self.redshell_directory],
["[i]Proxychains Config[/]", self.proxychains_config],
["[i]Log File[/]", Logger.logfile_csv.name],
["[i]CS Install Directory[/]", self.cs_directory],
]
if self.cs_host:
data.append(["[i]CS Team Server[/]", self.cs_host])
data.append(["[i]CS Team Server Port[/]", self.cs_port])
data.append(["[i]CS User[/]", self.cs_user])
if self.socks_port:
data.append(["[i]Socks Host/Port", f"{self.socks_host}:{self.socks_port}"])
else:
data.append(["[i]Socks Host/Port", ''])
context = ''
if self.context_ip:
context += f"[i]IP:[/] {self.context_ip}"
if self.context_dns_name:
context += f" [i]DNS:[/] {self.context_dns_name}"
if self.context_netbios_name:
context += f" [i]NetBIOS:[/] {self.context_netbios_name}"
if self.context_user_name:
context += f" [i]User:[/] {self.context_user_name}"
if self.context_pid:
context += f" [i]PID:[/] {self.context_pid}"
data.append(["[i]Context[/]", context])
if self.password:
data.append(["[i]Password[/]", self.password])
self.print_table(data)
argparser = argparse.ArgumentParser()
argparser.add_argument(type=str, dest="file_name", completer=Cmd.path_complete)
@with_argparser(argparser)
def do_cs_load_config(self, args):
"""Load Cobalt Strike team server config (host, port, and user) from file"""
self.clear_context(clear_socks=True)
try:
with open(args.file_name, 'r') as cf:
for line in cf.readlines():
cs_host = re.search('cs_host=(.*)', line)
if cs_host:
self.cs_host = cs_host.group(1)
cs_port = re.search('cs_port=(.*)', line)
if cs_port:
self.cs_port = cs_port.group(1)
cs_directory = re.search('cs_directory=(.*)', line)
if cs_directory:
self.cs_directory = cs_directory.group(1).strip(' ')
cs_user = re.search('cs_user=(.*)', line)
if cs_user:
self.cs_user = cs_user.group(1)
self.cs_user += '_redshell'
self.poutput("Config applied:")
self.do_config('')
self.do_cs_connect('')
except FileNotFoundError:
self.perror("Error: config file not found!")
def do_cs_pivots(self, args):
"""Show Cobalt Strike proxy pivots available on the team server"""
# check for active connection to the team server
if not self.cs_process or not self.cs_process.isalive():
self.perror("Error: not connected to CS team server. Connect first and then select a pivot.")
self.socks_port = ''
self.cs_beacon_pid = ''
self.socks_port_connected = False
return
else:
# clear known pivots each time we run this method
CSProxyPivots.reset()
# ask agscript for pivots
self.cs_process.sendline('x pivots()')
self.cs_process.expect('.*aggressor.*> ')
if self.cs_process.after:
# parse through results, only looking for socks proxy pivots
for result in re.findall('%\(.*?SOCKS4a Proxy.*?\)', self.cs_process.after.decode()):
pivot_port = None
pivot_bid = None
pivot_pid = None
pivot_user = ''
pivot_computer = None
pivot_alive = None
pivot_last = None
# get socks port
result_port = re.search("port => '([0-9]+)'", result)
if result_port:
pivot_port = result_port.group(1)
# get beacon ID
result_bid = re.search("bid => '([0-9]+)'", result)
if result_bid:
pivot_bid = result_bid.group(1)
if pivot_bid:
# get full beacon info for beacon ID
self.cs_process.sendline(f"x beacon_info({pivot_bid})")
self.cs_process.expect('.*aggressor.*> ')
if self.cs_process.after:
beacon_info = self.cs_process.after.decode()
# check if beacon is alive or dead
result_alive = re.search("alive => 'true'", beacon_info)
if result_alive:
pivot_alive = True
# get beacon user
result_user = re.search("user => '(.*?)'", beacon_info)
if result_user:
pivot_user = result_user.group(1)
# get beacon computer
result_computer = re.search("computer => '(.*?)'", beacon_info)
if result_computer:
pivot_computer = result_computer.group(1)
# get beacon ip
result_ip = re.search("internal => '(.*?)'", beacon_info)
if result_ip:
pivot_ip = result_ip.group(1)
# get beacon pid
result_pid = re.search("pid => '([0-9]+)'", beacon_info)
if result_pid:
pivot_pid = result_pid.group(1)
result_last = re.search("lastf => '(.*?)'", beacon_info)
if result_last:
pivot_last = result_last.group(1)
# intialize ProxyPivot instance if we have all the necessary details
if pivot_bid and pivot_port and pivot_pid and pivot_computer:
CSProxyPivots(bid=pivot_bid, port=pivot_port, pid=pivot_pid, user=pivot_user, computer=pivot_computer, ip=pivot_ip, alive=pivot_alive, last=pivot_last)
# display ProxyPivot table
if CSProxyPivots.instances.items():
self.print_table(CSProxyPivots.get_pivots(), header=True)
else:
self.pwarning("No proxy pivots found!")
def do_cs_use_pivot(self, arg_pivot_id):
"""Set RedShell to use Cobalt Strike pivot ID"""
self.clear_context(clear_socks=True)
# convert arg to int
try:
pivot_id = int(arg_pivot_id)
except ValueError:
self.perror('Invalid pivot ID!')
return
# get pivot instance by specified ID
proxy_pivot = CSProxyPivots.instances.get(pivot_id)
if proxy_pivot:
if proxy_pivot.alive:
# set config vars from selected ProxyPiot instance
self.cs_beacon_id = proxy_pivot.bid
self.cs_beacon_pid = proxy_pivot.pid
self.cs_beacon_user = proxy_pivot.user
self.cs_beacon_computer = proxy_pivot.computer
self.cs_beacon_ip = proxy_pivot.ip
self.context_ip = self.cs_beacon_ip
self.context_netbios_name = self.cs_beacon_computer
self.context_user_name = self.cs_beacon_user
self.context_pid = self.cs_beacon_pid
self.socks_host = self.cs_host
self.socks_port = proxy_pivot.port
self.socks_port_connected = True
self.update_proxychains_conf('socks4', self.cs_host, self.socks_port)
self.do_cs_status('')
return
else:
self.pwarning('Specified pivot ID is not alive!')
return
else:
self.perror('Invalid pivot ID!')
return
def do_cd(self, args):
"""Change directory"""
os.chdir(args)
# configure auto complete on the cd command
complete_cd = Cmd.path_complete
def do_pwd(self, args):
"""Print working directory"""
self.poutput(os.getcwd())
def do_exit(self, args):
"""Exit RedShell"""
Logger.close_logfile()
return True
def validate_ttps(self, ttps):
ttps_valid = []
ttps_check = ttps.split(',')
for ttp in ttps_check:
if re.match('^(T[0-9]{4})(\.[0-9]{3})?$', ttp):
ttps_valid.append(ttp)
else:
self.pwarning(f"Invalid TTP specified: {ttp}. Not including in log.")
validated_ttps = ', '.join(ttps_valid)
return validated_ttps
argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
argparser.description = "Execute a command through beacon socks proxy and simultaneously log it to the teamserver."
argparser.epilog = textwrap.dedent('''
example:
beacon_exec -t T1550.002,T1003.002 cme smb 192.168.1.1 --local-auth -u Administrator -H C713B1D611657D0687A568122193F230 --sam
''')
argparser.add_argument('-t', '--ttp', type=str, help="MITRE ATT&CK Tactic IDs. Comma delimited to specify multiple.")
argparser.add_argument('command', nargs=argparse.REMAINDER, help="Command to execute through the beacon proxy and log.", completer=Cmd.shell_cmd_complete)
@with_argparser(argparser)
def do_beacon_exec(self, args):
# check if agscript process is alive
if not self.cs_process or not self.cs_process.isalive():
self.perror("Error: not connected to CS team server. Connect first and then select a pivot.")
return
# check if socks port is connected
elif not self.socks_port_connected:
self.perror("Error: socks port not connected!")
return
else:
# make a copy of the user-specified command
command_list = args.command
# add proxychains to the command if user didn't include it
if 'proxychains' not in command_list:
id = 0
if 'sudo' in command_list:
id = 1
for item in ['proxychains', '-f', self.proxychains_config]:
command_list.insert(id, item)
id += 1
# convert command list into a string
command = shlex.join(command_list)
if '$password' in command and not self.password:
self.perror("Error: $password invoked, but password is not set. Add it with command: set password <password>")
return
command = re.sub("\$password", self.password, command)
# only log the command (minus sudo and proxychains)
cs_log_command = re.sub("proxychains.*?conf |sudo ", '', command)
cs_log_command = re.sub("\\\\", "\\\\\\\\", cs_log_command)
cs_log_command = re.sub("\$", "\$", cs_log_command) # escape $ char
cs_log_command = cs_log_command.replace('"', '\\"') # escape " char
cs_log_command = f"[PROXY] {cs_log_command}" # append [PROXY] to logged command
log_command = re.sub("proxychains.*?conf |sudo ", '', command)
log_command = f"[PROXY] {log_command}" # append [PROXY] to logged command
ttps = ''
if args.ttp:
ttps = self.validate_ttps(args.ttp)
if ttps:
# log command with TTPs to team server
self.cs_process.sendline(f'x btask({self.cs_beacon_id}, "{cs_log_command}", "{ttps}")')
self.cs_process.expect('.*aggressor.*> ')
else:
# log command without TTPs to team server
self.cs_process.sendline(f'x btask({self.cs_beacon_id}, "{cs_log_command}")')
self.cs_process.expect('.*aggressor.*> ')
Logger(log_command, ip=self.cs_beacon_ip, netbios_name=self.cs_beacon_computer, user_name=self.cs_beacon_user, pid=self.cs_beacon_pid, ttps=ttps.replace(' ', ''))
# run the command
self.do_shell(command)
argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
argparser.description = "Execute a command through custom socks proxy and simultaneously log it to the local file."
argparser.epilog = textwrap.dedent('''
example:
proxy_exec -t T1550.002,T1003.002 cme smb 192.168.1.1 --local-auth -u Administrator -H C713B1D611657D0687A568122193F230 --sam
''')
argparser.add_argument('-t', '--ttp', type=str, help="MITRE ATT&CK Tactic IDs. Comma delimited to specify multiple.")
argparser.add_argument('command', nargs=argparse.REMAINDER, help="Command to execute through the proxy and log.", completer=Cmd.shell_cmd_complete)
@with_argparser(argparser)
def do_proxy_exec(self, args):
# check if socks port is connected
if not self.socks_port_connected:
self.perror("Error: socks port not connected!")
return
# make a copy of the user-specified command
command_list = args.command
# add proxychains to the command if user didn't include it
if 'proxychains' not in command_list:
id = 0
if 'sudo' in command_list:
id = 1
for item in ['proxychains', '-f', self.proxychains_config]:
command_list.insert(id, item)
id += 1
# convert command list into a string
command = shlex.join(command_list)
if '$password' in command and not self.password:
self.perror("Error: $password invoked, but password is not set. Add it with command: set password <password>")
return
command = re.sub("\$password", self.password, command)
# only log the command (minus sudo and proxychains)
log_command = re.sub("proxychains.*?conf |sudo ", '', command)
# append [PROXY] to logged command
log_command = f"[PROXY] {log_command}"
ttps = ''
if args.ttp:
ttps = self.validate_ttps(args.ttp)
Logger(log_command, ip=self.context_ip, dns_name=self.context_dns_name, netbios_name=self.context_netbios_name, user_name=self.context_user_name, pid=self.context_pid, ttps=ttps.replace(' ', ''))
# run the command
self.do_shell(command)
argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
argparser.description = "Execute a command and log it to the local file."
argparser.epilog = textwrap.dedent('''
example:
exec -t T1550.002,T1003.002 cme smb 192.168.1.1 --local-auth -u Administrator -H C713B1D611657D0687A568122193F230 --sam
''')
argparser.add_argument('-t', '--ttp', type=str, help="MITRE ATT&CK Tactic IDs. Comma delimited to specify multiple.")
argparser.add_argument('command', nargs=argparse.REMAINDER, help="Command to execute and log.", completer=Cmd.shell_cmd_complete)
@with_argparser(argparser)
def do_exec(self, args):
# make a copy of the user-specified command
command_list = args.command
# convert command list into a string
command = shlex.join(command_list)
if '$password' in command and not self.password:
self.perror("Error: $password invoked, but password is not set. Add it with command: set password <password>")
return
command = re.sub("\$password", self.password, command)
# only log the command (minus sudo)
log_command = re.sub("sudo ", '', command)
ttps = ''
if args.ttp:
ttps = self.validate_ttps(args.ttp)
Logger(log_command, ip=self.context_ip, dns_name=self.context_dns_name, netbios_name=self.context_netbios_name, user_name=self.context_user_name, pid=self.context_pid, ttps=ttps.replace(' ', ''))
# run the command
self.do_shell(command)
argparser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
argparser.description = "Add a manual log entry to the local file."
argparser.epilog = textwrap.dedent('''
example:
log -t T1608.001 Uploaded malware to LOTS site
''')
argparser.add_argument('-t', '--ttp', type=str, help="MITRE ATT&CK Tactic IDs. Comma delimited to specify multiple.")
argparser.add_argument('log_entry', nargs=argparse.REMAINDER, help="Entry to log.")
@with_argparser(argparser)
def do_log(self, args):
# make a copy of the user-specified log entry
log_list = args.log_entry
# convert command list into a string
log_entry = ' '.join(log_list)
ttps = ''
if args.ttp:
ttps = self.validate_ttps(args.ttp)
Logger(log_entry, ip=self.context_ip, dns_name=self.context_dns_name, netbios_name=self.context_netbios_name, user_name=self.context_user_name, pid=self.context_pid, ttps=ttps.replace(' ', ''))
if __name__ == '__main__':
app = RedShell()
sys.exit(app.cmdloop())
|
'''Test idlelib.help_about.
Coverage:
'''
from idlelib import help_about
from idlelib import textview
from idlelib.idle_test.mock_idle import Func
from idlelib.idle_test.mock_tk import Mbox_func
from test.support import requires, findfile
requires('gui')
from tkinter import Tk
import unittest
About = help_about.AboutDialog
class Dummy_about_dialog():
# Dummy class for testing file display functions.
idle_credits = About.ShowIDLECredits
idle_readme = About.ShowIDLEAbout
idle_news = About.ShowIDLENEWS
# Called by the above
display_file_text = About.display_file_text
_utest = True
class AboutDialogTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.root = Tk()
cls.root.withdraw()
cls.dialog = About(cls.root, 'About IDLE', _utest=True)
@classmethod
def tearDownClass(cls):
del cls.dialog
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
def tearDown(self):
if self.dialog._current_textview:
self.dialog._current_textview.destroy()
def test_dialog_title(self):
"""This will test about dialog title"""
self.assertEqual(self.dialog.title(), 'About IDLE')
def test_printer_dialog(self):
"""This will test dialog which using printer"""
buttons = [(license, self.dialog.buttonLicense),
(copyright, self.dialog.buttonCopyright),
(credits, self.dialog.buttonCredits)]
for printer, button in buttons:
dialog = self.dialog
printer._Printer__setup()
button.invoke()
self.assertEqual(printer._Printer__lines[0],
dialog._current_textview.textView.get('1.0', '1.end'))
self.assertEqual(printer._Printer__lines[1],
dialog._current_textview.textView.get('2.0', '2.end'))
dialog._current_textview.destroy()
def test_file_dialog(self):
"""This will test dialog which using file"""
buttons = [('README.txt', self.dialog.idle_about_b),
('NEWS.txt', self.dialog.idle_news_b),
('CREDITS.txt', self.dialog.idle_credits_b)]
for filename, button in buttons:
dialog = self.dialog
button.invoke()
fn = findfile(filename, subdir='idlelib')
with open(fn) as f:
self.assertEqual(f.readline().strip(),
dialog._current_textview.textView.get('1.0', '1.end'))
f.readline()
self.assertEqual(f.readline().strip(),
dialog._current_textview.textView.get('3.0', '3.end'))
dialog._current_textview.destroy()
class DisplayFileTest(unittest.TestCase):
dialog = Dummy_about_dialog()
@classmethod
def setUpClass(cls):
cls.orig_error = textview.showerror
cls.orig_view = textview.view_text
cls.error = Mbox_func()
cls.view = Func()
textview.showerror = cls.error
textview.view_text = cls.view
cls.About = Dummy_about_dialog()
@classmethod
def tearDownClass(cls):
textview.showerror = cls.orig_error
textview.view_text = cls.orig_view
def test_file_isplay(self):
for handler in (self.dialog.idle_credits,
self.dialog.idle_readme,
self.dialog.idle_news):
self.error.message = ''
self.view.called = False
with self.subTest(handler=handler):
handler()
self.assertEqual(self.error.message, '')
self.assertEqual(self.view.called, True)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
from datetime import date
from django.views.generic import ListView
from django.views.generic import TemplateView
from django.views.generic import DetailView
from django.http import HttpResponseRedirect
from django.utils.encoding import iri_to_uri
from django.core.urlresolvers import reverse
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Q
from bibliomap import models
from bibliomap import suma_dias
class Inicio(TemplateView):
template_name = 'inicio.html'
def get_context_data(self, **kwargs):
context = super(Inicio, self).get_context_data(**kwargs)
try:
context['libro'] = models.Libro.objects.all().filter(estado=True)
except Exception:
pass
try:
context['categoria'] = models.Categoria.objects.all().filter(estado=True)
except Exception:
pass
try:
context['seccion'] = models.Seccion.objects.all().filter(estado=True)
except Exception:
pass
if self.request.user.is_authenticated():
try:
context['reservacion'] = models.Reservacion.objects.all().filter(usuario=self.request.user).exclude(estado=False)
except Exception:
pass
try:
prestamo = models.Prestamo.objects.all().filter(usuario=self.request.user)
context['devolucion_pendiente'] = prestamo.filter(estado=True)
context['prestamo'] = prestamo.filter(estado=False)
except Exception:
pass
return context
class Libro(ListView):
model = models.Libro
template_name = 'libro.html'
queryset = model.objects.all().filter(estado=True)
class LibroDetalle(DetailView):
model = models.Libro
template_name = 'libro_detalle.html'
pk_url_kwarg = 'id'
class Categoria(ListView):
model = models.Categoria
template_name = 'categoria.html'
queryset = model.objects.all().filter(estado=True)
class CategoriaDetalle(DetailView):
model = models.Categoria
template_name = 'categoria_detalle.html'
pk_url_kwarg = 'id'
def get_context_data(self, **kwargs):
context = super(CategoriaDetalle, self).get_context_data(**kwargs)
categoria = context['object']
context['object_list'] = categoria.libro_set.all().filter(estado=True)
return context
class Seccion(ListView):
model = models.Seccion
template_name = 'seccion.html'
queryset = model.objects.all().filter(estado=True)
class SeccionDetalle(DetailView):
model = models.Seccion
template_name = 'seccion_detalle.html'
pk_url_kwarg = 'id'
def get_context_data(self, **kwargs):
context = super(SeccionDetalle, self).get_context_data(**kwargs)
seccion = context['object']
context['object_list'] = seccion.libro_set.all().filter(estado=True)
return context
class Buscar(ListView):
model = models.Libro
template_name = 'buscar.html'
queryset = None
def get_queryset(self):
parametro = self.kwargs['buscar']
if parametro:
qset = (
Q(titulo__icontains=parametro) |
Q(autores__nombre__icontains=parametro) |
Q(autores__apellido__icontains=parametro)
)
queryset = self.model.objects.filter(qset).distinct().exclude(estado=False)
return queryset
else:
return HttpResponseRedirect('/')
class Prestamo(LoginRequiredMixin, DetailView):
model = models.Libro
template_name = 'prestamo.html'
pk_url_kwarg = 'id'
def get_context_data(self, **kwargs):
context = super(Prestamo, self).get_context_data(**kwargs)
context['parametro'] = models.Parametro.objects.all()[0]
return context
def post(self, request, *args, **kwargs):
try:
libro = self.model.objects.get(id=self.request.POST['libro'])
except Exception:
libro = None
if libro and self.request.user.is_authenticated()==True:
user = self.request.user
dias_prestamo = int(self.request.POST['prestamo'])
fecha_salida = date.today()
fecha_maxima = suma_dias.addworkdays(fecha_salida,dias_prestamo)
prestamo = models.Prestamo(usuario=user,
libro=libro,
fecha_salida=fecha_salida,
fecha_maxima=fecha_maxima,
estado=True)
prestamo.save()
return HttpResponseRedirect(iri_to_uri(reverse('completado')))
else:
return HttpResponseRedirect(iri_to_uri(reverse('inicio')))
class Prestamos(LoginRequiredMixin, ListView):
model = models.Prestamo
template_name = 'prestamos.html'
queryset = None
def get_queryset(self):
queryset = self.model.objects.filter(usuario=self.request.user).exclude(estado=True)
return queryset
class PrestamoDetalle(LoginRequiredMixin, DetailView):
model = models.Prestamo
template_name = 'prestamo_detalle.html'
pk_url_kwarg = 'id'
class Reservacion(LoginRequiredMixin, DetailView):
model = models.Libro
template_name = 'reservacion.html'
pk_url_kwarg = 'id'
def post(self, request, *args, **kwargs):
try:
libro = self.model.objects.get(id=self.request.POST['libro'])
except Exception:
libro = None
if libro and self.request.user.is_authenticated()==True:
user = self.request.user
reservacion = models.Reservacion(usuario=user,
libro=libro,
estado=True)
reservacion.save()
return HttpResponseRedirect(iri_to_uri(reverse('completado')))
else:
return HttpResponseRedirect('')
class Reservaciones(LoginRequiredMixin, ListView):
model = models.Reservacion
template_name = 'reservaciones.html'
queryset = None
def get_queryset(self):
queryset = self.model.objects.filter(usuario=self.request.user).exclude(estado=False)
return queryset
class ReservacionDetalle(LoginRequiredMixin, DetailView):
model = models.Reservacion
template_name = 'reservacion_detalle.html'
pk_url_kwarg = 'id'
def post(self, request, *args, **kwargs):
try:
if self.kwargs['id']==self.request.POST['reservacion']:
id = self.kwargs['id']
reservacion = self.model.objects.get(id=id)
except Exception:
reservacion = None
if reservacion and reservacion.estado and self.request.user.is_authenticated():
reservacion.estado = False
reservacion.save()
return HttpResponseRedirect(iri_to_uri(reverse('completado')))
else:
return HttpResponseRedirect(iri_to_uri(reverse('inicio')))
class Devoluciones(LoginRequiredMixin, ListView):
model = models.Prestamo
template_name = 'devoluciones.html'
queryset = None
def get_queryset(self):
queryset = self.model.objects.filter(usuario=self.request.user).exclude(estado=False)
return queryset
|
from django.conf.urls import patterns, url, include
ajax_urls = [
url(r'^get-kml/$', 'tethys_gizmos.views.gizmo_showcase.get_kml', name='get_kml'),
url(r'^swap-kml/$', 'tethys_gizmos.views.gizmo_showcase.swap_kml', name='swap_kml'),
url(r'^swap-overlays/$', 'tethys_gizmos.views.gizmo_showcase.swap_overlays', name='swap_overlays'),
url(r'^fetchclimate/single-request/$', 'tethys_gizmos.views.gizmos.fetchclimate.data_request_single', name='single_request'),
]
urlpatterns = patterns('',
url(r'^$', 'tethys_gizmos.views.gizmo_showcase.index', name='showcase'),
url(r'^editable-map/$', 'tethys_gizmos.views.gizmo_showcase.editable_map', name='editable_map'),
url(r'^google-map/$', 'tethys_gizmos.views.gizmo_showcase.google_map', name='google_map'),
url(r'^map-view', 'tethys_gizmos.views.gizmo_showcase.map_view', name='map_view'),
url(r'^fetch-climate-map/$', 'tethys_gizmos.views.gizmo_showcase.fetchclimate_map', name='fetchclimate_map'),
url(r'^ajax/', include(ajax_urls)),
)
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Illustrates how to perform asynchronous requests using the MutateJobService.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: MutateJobService.mutate
Tags: MutateJobService.get
Tags: MutateJobService.getResult
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import random
import re
import time
from googleads import adwords
from googleads import errors
RETRY_INTERVAL = 10
RETRIES_COUNT = 30
PLACEMENT_NUMBER = 100
INDEX_REGEX = r'operations\[(\d+)\].operand'
AD_GROUP_ID = 'INSERT_AD_GROUP_ID_HERE'
def main(client, ad_group_id):
# Initialize appropriate service.
mutate_job_service = client.GetService('MutateJobService', version='v201506')
# Create list of all operations for the job.
operations = []
# Create AdGroupCriterionOperations to add placements.
for i in range(PLACEMENT_NUMBER):
url = 'www.example.com/mars%d' % i
if random.randint(1, 10) == 1:
url = 'NOT_@_URL'
operations.append({
'xsi_type': 'AdGroupCriterionOperation',
'operator': 'ADD',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': ad_group_id,
'criterion': {
'xsi_type': 'Placement',
'url': url
}
}
})
# You can specify up to 3 job IDs that must successfully complete before
# this job can be processed.
policy = {
'prerequisiteJobIds': []
}
# Call mutate to create a new job.
response = mutate_job_service.mutate(operations, policy)
if not response:
raise errors.GoogleAdsError('Failed to submit a job; aborting.')
job_id = response['id']
print 'Job with ID %s was successfully created.' % job_id
# Create selector to retrieve job status and wait for it to complete.
selector = {
'xsi_type': 'BulkMutateJobSelector',
'jobIds': [job_id]
}
time.sleep(RETRY_INTERVAL)
# Poll for job status until it's finished.
print 'Retrieving job status...'
for i in range(RETRIES_COUNT):
job_status_response = mutate_job_service.get(selector)
status = job_status_response[0]['status']
if status in ('COMPLETED', 'FAILED'):
break
print ('[%d] Current status is \'%s\', waiting %d seconds to retry...' %
(i, status, RETRY_INTERVAL))
time.sleep(RETRY_INTERVAL)
if status == 'FAILED':
raise errors.GoogleAdsError('Job failed with reason: \'%s\'' %
job_status_response[0]['failure_reason'])
if status in ('PROCESSING', 'PENDING'):
raise errors.GoogleAdsError('Job did not complete within %d seconds' %
(RETRY_INTERVAL * (RETRIES_COUNT - 1)))
# Status must be COMPLETED.
# Get the job result. Here we re-use the same selector.
result_response = mutate_job_service.getResult(selector)
# Output results.
index = 0
for result in result_response['SimpleMutateResult']['results']:
if 'PlaceHolder' in result:
print 'Operation [%d] - FAILED' % index
else:
print 'Operation [%d] - SUCCEEDED' % index
index += 1
# Output errors
for error in result_response['SimpleMutateResult']['errors']:
index = int(re.search(INDEX_REGEX, error['fieldPath']).group(1))
reason = error['reason']
url = operations[index]['operand']['criterion']['url']
print ('ERROR - placement \'%s\' failed due to \'%s\'' %
(url, reason))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, AD_GROUP_ID)
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Product(models.Model):
title = models.CharField(max_length=120)
description = models.TextField(null=True)
price = models.DecimalField(decimal_places=2, max_digits=100)
summary = models.TextField(blank=True)
featured = models.BooleanField(default=False)
def get_absolute_url(self):
return reverse("products:product_detail", kwargs={"id_lookup": self.id})
|
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as AddressProvider
class Provider(AddressProvider):
building_number_formats = ('###', '##', '#',)
city_formats = ('{{city_name}}',)
postcode_formats = ('#####',)
street_name_formats = (
'{{street_prefix_short}} {{street}}',
'{{street_prefix_long}} {{street}}',
)
street_address_formats = (
'{{street_name}} No. {{building_number}}',
)
address_formats = (
'{{street_address}}\n{{city}}, {{state}} {{postcode}}',
'{{street_address}}\n{{city}}, {{state_abbr}} {{postcode}}',
)
# From
# http://elibrary.dephub.go.id/elibrary/media/catalog/0010-021500000000135/swf/618/Lampiran%20E%20Data%20Bandung.pdf
streets = (
'Abdul Muis ', 'Antapani Lama', 'Asia Afrika', 'Astana Anyar', 'BKR',
'Cihampelas', 'Cikapayang', 'Cikutra Barat', 'Cikutra Timur',
'Ciumbuleuit', 'Ciwastra', 'Dipatiukur', 'Dipenogoro', 'Dr. Djunjunan',
'Gardujati', 'Gedebage Selatan', 'Gegerkalong Hilir',
'HOS. Cokroaminoto', 'Ir. H. Djuanda', 'Jakarta ', 'Jamika',
'Jend. A. Yani', 'Jend. Sudirman', 'K.H. Wahid Hasyim', 'Kebonjati',
'Kiaracondong', 'Laswi', 'Lembong', 'Merdeka', 'Moch. Ramdan',
'Moch. Toha', 'Pacuan Kuda', 'Pasir Koja', 'Pasirkoja', 'Pasteur',
'Pelajar Pejuang', 'Peta', 'PHH. Mustofa ', 'Rajawali Barat',
'Rajawali Timur', 'Raya Setiabudhi', 'Raya Ujungberung', 'Rumah Sakit',
'Sadang Serang', 'Sentot Alibasa', 'Setiabudhi', 'Siliwangi',
'Soekarno Hatta', 'Sukabumi', 'Sukajadi', 'Suniaraja', 'Surapati',
'Tubagus Ismail', 'Veteran', 'W.R. Supratman',
)
# Currently this is my own work
street_prefixes_long = (
'Jalan', 'Lorong', 'Gang',
)
# Currently this is my own work
street_prefixes_short = (
'Jl.', 'Lr.', 'Gg.',
)
# From
# https://id.wikipedia.org/wiki/Daftar_kabupaten_dan_kota_di_Indonesia#Daftar_kota
cities = (
'Ambon', 'Balikpapan', 'Banda Aceh', 'Bandar Lampung', 'Bandung',
'Banjar', 'Banjarbaru', 'Banjarmasin', 'Batam', 'Batu', 'Bau-Bau',
'Bekasi', 'Bengkulu', 'Bima', 'Binjai', 'Bitung', 'Blitar', 'Bogor',
'Bontang', 'Bukittinggi', 'Cilegon', 'Cimahi', 'Cirebon', 'Denpasar',
'Depok', 'Dumai', 'Gorontalo', 'Jambi', 'Jayapura', 'Kediri', 'Kendari',
'Kota Administrasi Jakarta Barat', 'Kota Administrasi Jakarta Pusat',
'Kota Administrasi Jakarta Selatan', 'Kota Administrasi Jakarta Timur',
'Kota Administrasi Jakarta Utara', 'Kotamobagu', 'Kupang', 'Langsa',
'Lhokseumawe', 'Lubuklinggau', 'Madiun', 'Magelang', 'Makassar',
'Malang', 'Manado', 'Mataram', 'Medan', 'Metro', 'Meulaboh',
'Mojokerto', 'Padang', 'Padang Sidempuan', 'Padangpanjang', 'Pagaralam',
'Palangkaraya', 'Palembang', 'Palopo', 'Palu', 'Pangkalpinang',
'Parepare', 'Pariaman', 'Pasuruan', 'Payakumbuh', 'Pekalongan',
'Pekanbaru', 'Pematangsiantar', 'Pontianak', 'Prabumulih',
'Probolinggo', 'Purwokerto', 'Sabang', 'Salatiga', 'Samarinda',
'Sawahlunto', 'Semarang', 'Serang', 'Sibolga', 'Singkawang', 'Solok',
'Sorong', 'Subulussalam', 'Sukabumi', 'Sungai Penuh', 'Surabaya',
'Surakarta', 'Tangerang', 'Tangerang Selatan', 'Tanjungbalai',
'Tanjungpinang', 'Tarakan', 'Tasikmalaya', 'Tebingtinggi', 'Tegal',
'Ternate', 'Tidore Kepulauan', 'Tomohon', 'Tual', 'Yogyakarta',
)
# From https://id.wikipedia.org/wiki/Daftar_provinsi_di_Indonesia
states = (
'Aceh', 'Bali', 'Banten', 'Bengkulu', 'DI Yogyakarta', 'DKI Jakarta',
'Gorontalo', 'Jambi', 'Jawa Barat', 'Jawa Tengah', 'Jawa Timur',
'Kalimantan Barat', 'Kalimantan Selatan', 'Kalimantan Tengah',
'Kalimantan Timur', 'Kalimantan Utara', 'Kepulauan Bangka Belitung',
'Kepulauan Riau', 'Lampung', 'Maluku', 'Maluku Utara',
'Nusa Tenggara Barat', 'Nusa Tenggara Timur', 'Papua', 'Papua Barat',
'Riau', 'Sulawesi Barat', 'Sulawesi Selatan', 'Sulawesi Tengah',
'Sulawesi Tenggara', 'Sulawesi Utara', 'Sumatera Barat',
'Sumatera Selatan', 'Sumatera Utara',
)
# Currently this is my own work
states_abbr = (
'Aceh', 'Bali', 'Banten', 'Bengkulu', 'DIY', 'DKI', 'Gorontalo',
'Jambi', 'Jabar', 'Jateng', 'Jatim', 'Kalbar', 'Kalsel', 'Kalteng',
'Kaltim', 'Kalut', 'Babel', 'Kepri', 'Lampung', 'Maluku', 'Malut',
'NTB', 'NTT', 'Papua', 'Papbar', 'Riau', 'Sulbar', 'Sulsel', 'Sulteng',
'Sultra', 'Sulut', 'Sumbar', 'Sumsel', 'Sumut',
)
# From https://id.wikipedia.org/wiki/Daftar_negara-negara_di_dunia
countries = (
'Afganistan', 'Afrika Selatan', 'Afrika Tengah', 'Albania', 'Aljazair',
'Amerika Serikat', 'Andorra', 'Angola', 'Antigua dan Barbuda',
'Arab Saudi', 'Argentina', 'Armenia', 'Australia', 'Austria',
'Azerbaijan', 'Bahama', 'Bahrain', 'Bangladesh', 'Barbados', 'Belanda',
'Belarus', 'Belgia', 'Belize', 'Benin', 'Bhutan', 'Bolivia',
'Bosnia dan Herzegovina', 'Botswana', 'Brasil', 'Britania Raya',
'Brunei', 'Bulgaria', 'Burkina Faso', 'Burundi', 'Ceko', 'Chad',
'Chili', 'Denmark', 'Djibouti', 'Dominika', 'Ekuador', 'El Salvador',
'Eritrea', 'Estonia', 'Ethiopia', 'Federasi Mikronesia', 'Fiji',
'Filipina', 'Finlandia', 'Gabon', 'Gambia', 'Georgia', 'Ghana',
'Grenada', 'Guatemala', 'Guinea', 'Guinea Khatulistiwa',
'Guinea-Bissau', 'Guyana', 'Haiti', 'Honduras', 'Hongaria', 'India',
'Indonesia', 'Irak', 'Iran', 'Islandia', 'Israel', 'Italia', 'Jamaika',
'Jepang', 'Jerman', 'Kamboja', 'Kamerun', 'Kanada', 'Kazakhstan',
'Kenya', 'Kepulauan Marshall', 'Kepulauan Solomon', 'Kirgizstan',
'Kiribati', 'Kolombia', 'Komoro', 'Korea Selatan', 'Korea Utara',
'Kosta Rika', 'Kroasia', 'Kuba', 'Kuwait', 'Laos', 'Latvia', 'Lebanon',
'Lesotho', 'Liberia', 'Libya', 'Liechtenstein', 'Lituania',
'Luksemburg', 'Madagaskar', 'Maladewa', 'Malawi', 'Malaysia', 'Mali',
'Malta', 'Maroko', 'Mauritania', 'Mauritius', 'Meksiko', 'Mesir',
'Moldova', 'Monako', 'Mongolia', 'Montenegro', 'Mozambik', 'Myanmar',
'Namibia', 'Nauru', 'Nepal', 'Niger', 'Nigeria', 'Nikaragua',
'Norwegia', 'Oman', 'Pakistan', 'Palau', 'Panama', 'Pantai Gading',
'Papua Nugini', 'Paraguay', 'Perancis', 'Peru', 'Polandia', 'Portugal',
'Qatar', 'Republik Demokratik Kongo', 'Republik Dominika',
'Republik Irlandia', 'Republik Kongo', 'Republik Makedonia',
'Republik Rakyat Tiongkok', 'Rumania', 'Rusia', 'Rwanda',
'Saint Kitts dan Nevis', 'Saint Lucia', 'Saint Vincent dan Grenadine',
'Samoa', 'San Marino', 'São Tomé dan Príncipe', 'Selandia Baru',
'Senegal', 'Serbia', 'Seychelles', 'Sierra Leone', 'Singapura',
'Siprus', 'Slovenia', 'Slowakia', 'Somalia', 'Spanyol', 'Sri Lanka',
'Sudan', 'Sudan Selatan', 'Suriah', 'Suriname', 'Swaziland', 'Swedia',
'Swiss', 'Tajikistan', 'Tanjung Verde', 'Tanzania', 'Thailand',
'Timor Leste', 'Togo', 'Tonga', 'Trinidad dan Tobago', 'Tunisia',
'Turki', 'Turkmenistan', 'Tuvalu', 'Uganda', 'Ukraina',
'Uni Emirat Arab', 'Uruguay', 'Uzbekistan', 'Vanuatu', 'Vatikan',
'Venezuela', 'Vietnam', 'Yaman', 'Yordania', 'Yunani', 'Zambia',
'Zimbabwe',
)
def street(self):
return self.random_element(self.streets)
def street_prefix_short(self):
return self.random_element(self.street_prefixes_short)
def street_prefix_long(self):
return self.random_element(self.street_prefixes_long)
def city_name(self):
return self.random_element(self.cities)
def state(self):
return self.random_element(self.states)
def state_abbr(self):
return self.random_element(self.states_abbr)
def country(self):
return self.random_element(self.countries)
|
from datetime import date
import json
import petl as etl
import fhir_petl.fhir as fhir
def test_to_ratio():
numerator_tuple = (100, '<', 'mL', 'http://unitsofmeasure.org', 'mL')
denominator_tuple = (2, '>', 'h', 'http://unitsofmeasure.org', 'h')
ratio_tuple = (numerator_tuple, denominator_tuple)
ratio_dict = fhir.to_ratio(ratio_tuple)
assert ratio_dict['numerator'] == {'value': 100, 'comparator': '<', 'unit': 'mL', 'system': 'http://unitsofmeasure.org', 'code': 'mL'}
assert ratio_dict['denominator'] == {'value': 2, 'comparator': '>', 'unit': 'h', 'system': 'http://unitsofmeasure.org', 'code': 'h'}
def test_to_quantity():
quantity_tuple = (100, '<', 'mL', 'http://unitsofmeasure.org', 'mL')
quantity_dict = fhir.to_quantity(quantity_tuple)
assert quantity_dict == {'value': 100, 'comparator': '<', 'unit': 'mL', 'system': 'http://unitsofmeasure.org', 'code': 'mL'}
def test_to_simple_quantity():
simple_quantity_tuple = (2, 'h', 'http://unitsofmeasure.org', 'h')
simple_quantity_dict = fhir.to_simple_quantity(simple_quantity_tuple)
assert simple_quantity_dict == {'value': 2, 'unit': 'h', 'system': 'http://unitsofmeasure.org', 'code': 'h'}
def test_to_range():
low_tuple = (2, 'h', 'http://unitsofmeasure.org', 'h')
high_tuple = (4, 'h', 'http://unitsofmeasure.org', 'h')
range_tuple = (low_tuple, high_tuple)
range_dict = fhir.to_range(range_tuple)
assert range_dict['low'] == {'value': 2, 'unit': 'h', 'system': 'http://unitsofmeasure.org', 'code': 'h'}
assert range_dict['high'] == {'value': 4, 'unit': 'h', 'system': 'http://unitsofmeasure.org', 'code': 'h'}
def test_to_dosage():
header = ['sequence', 'dosage_text', 'additionalInstruction', 'patientInstruction', 'timing',
'asNeededBoolean', 'route', 'method', 'type', 'doseQuantity', 'rateQuantity', 'maxDosePerLifetime']
data = [1, 'i po qd cf', ('http://snomed.info/sct', '311504000', 'With or after food'), 'Once per day with food', ('18:00:00', None),
'false', ('http://snomed.info/sct', '26643006', 'Oral Route (qualifier value)'), ('http://snomed.info/sct', '421521009', 'Swallow'),
('http://terminology.hl7.org/CodeSystem/dose-rate-type', 'ordered', 'Ordered'), (100, 'mg', 'http://unitsofmeasure.org', 'mg'),
(24, 'h', 'http://unitsofmeasure.org', 'h'), (5, 'g', 'http://unitsofmeasure.org', 'g')]
dosage_table = etl.util.base.Record(data, header)
dosage = fhir.to_dosage(dosage_table)
assert dosage['sequence'] == 1
assert dosage['text'] == 'i po qd cf'
assert dosage['additionalInstruction'] == {'coding': [{'system': 'http://snomed.info/sct', 'code': '311504000', 'display': 'With or after food'}]}
assert dosage['timing'] == {'event': '18:00:00'}
assert dosage['asNeededBoolean'] == 'false'
assert dosage['route'] == {'coding': [{'system': 'http://snomed.info/sct', 'code': '26643006', 'display': 'Oral Route (qualifier value)'}]}
assert dosage['method'] == {'coding': [{'system': 'http://snomed.info/sct', 'code': '421521009', 'display': 'Swallow'}]}
assert dosage['doseAndRate'][0]['type'] == {'coding': [{'system': 'http://terminology.hl7.org/CodeSystem/dose-rate-type',
'code': 'ordered', 'display': 'Ordered'}]}
assert dosage['doseAndRate'][0]['doseQuantity'] == {'value': 100, 'unit': 'mg', 'system': 'http://unitsofmeasure.org', 'code': 'mg'}
assert dosage['doseAndRate'][0]['rateQuantity'] == {'value': 24, 'unit': 'h', 'system': 'http://unitsofmeasure.org', 'code': 'h'}
assert dosage['maxDosePerLifetime'] == {'value': 5, 'unit': 'g', 'system': 'http://unitsofmeasure.org', 'code': 'g'}
def test_to_med_administration():
header = ['id', 'status', 'subject', 'medication', 'start_date', 'end_date', 'note', 'dosage_text', 'route', 'rateRatio']
data = ['e1aa3a08-5c36-49cf-96e4-dcca7c1b7a50', 'completed', '071f8ae4-52fd-4f2d-8090-60d1ef3a4452',
('http://hl7.org/fhir/sid/ndc', '49884046905', 'Ibuprofen Tab 800 MG'), date(2020, 5, 5), date(2020, 5, 7),
'Test note', 'Test dosage text', ('http://snomed.info/sct', '26643006', 'Oral Route (qualifier value)'),
((100, '<', 'mg', 'http://unitsofmeasure.org', 'mg'), (24, '>', 'h', 'http://unitsofmeasure.org', 'h'))]
administration_table = etl.util.base.Record(data, header)
admin = json.loads(fhir.to_med_administration(administration_table))
assert admin['id'] == 'e1aa3a08-5c36-49cf-96e4-dcca7c1b7a50'
assert admin['status'] == 'completed'
assert admin['subject'] == {'reference' : 'Patient/071f8ae4-52fd-4f2d-8090-60d1ef3a4452'}
assert admin['medicationCodeableConcept'] == {'coding': [{'system': 'http://hl7.org/fhir/sid/ndc', 'code': '49884046905',
'display': 'Ibuprofen Tab 800 MG'}]}
assert admin['effectivePeriod'] == {'start': '2020-05-05', 'end': '2020-05-07'}
assert admin['note'] == [{'text': 'Test note'}]
assert admin['dosage']['text'] == 'Test dosage text'
assert admin['dosage']['route'] == {'coding': [{'system': 'http://snomed.info/sct',
'code': '26643006', 'display': 'Oral Route (qualifier value)'}]}
assert admin['dosage']['rateRatio'] == {'numerator': {'value': 100, 'comparator': '<', 'unit': 'mg',
'system': 'http://unitsofmeasure.org', 'code': 'mg'},
'denominator': {'value': 24, 'comparator': '>', 'unit': 'h',
'system': 'http://unitsofmeasure.org', 'code': 'h'}}
|
"""Script defined to test the Customer class."""
import httpretty
from paystackapi.transaction import Transaction
from paystackapi.tests.base_test_case import BaseTestCase
class TestTransaction(BaseTestCase):
"""Method defined to test transaction initialize."""
@httpretty.activate
def test_initialize(self):
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transaction/initialize"),
content_type='text/json',
body='{"status": true, "contributors": true}',
status=201,
)
response = Transaction.initialize(
reference='getupall', amount=12000,
email='samuel.james@andela.com')
self.assertTrue(response['status'])
@httpretty.activate
def test_charge(self):
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transaction/charge_authorization"),
content_type='text/json',
body='{"status": true, "contributors": true}',
status=201,
)
response = Transaction.charge(
reference='getupall', authorization_code='authorization_code',
email='email', amount='amount')
self.assertTrue(response['status'])
@httpretty.activate
def test_charge_token(self):
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transaction/charge_token"),
content_type='text/json',
body='{"status": true, "contributors": true}',
status=201,
)
response = Transaction.charge_token(
reference='getupall', token='token',
email='email', amount=100000)
self.assertTrue(response['status'])
@httpretty.activate
def test_get(self):
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/transaction/4013"),
content_type='text/json',
body='{"status": true, "contributors": true}',
status=201,
)
response = Transaction.get(transaction_id=4013)
self.assertTrue(response['status'])
@httpretty.activate
def test_list(self):
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/transaction"),
content_type='text/json',
body='{"status": true, "contributors": true}',
status=201,
)
response = Transaction.list()
self.assertTrue(response['status'])
@httpretty.activate
def test_totals(self):
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/transaction/totals"),
content_type='text/json',
body='{"status": true, "contributors": true}',
status=201,
)
response = Transaction.totals()
self.assertTrue(response['status'])
@httpretty.activate
def test_verify(self):
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/transaction/verify/reference"),
content_type='text/json',
body='{"status": true, "contributors": true}',
status=201,
)
response = Transaction.verify('reference')
self.assertTrue(response['status'])
|
# Copyright (C) 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A command line tool for diffing files.
This demonstrates the simplest possible way to turn a module into a command line
interface with Python Fire. It exemplifies the power and shortcomings of relying
on Python Fire's simplicity.
See //strictfire/examples/diff/diff.py for another way of turning
difflib into a CLI that requires more code, but gives the developer more control
over the CLI's API.
Use the help flag to see usage for all the things this CLI can do. For example:
difffull -- -h
difffull HtmlDiff -- -h # Help for the HtmlDiff class
difffull HtmlDiff - -- -h # Help for an HtmlDiff object, not the HtmlDiff class
Here are some of the diff commands available:
difffull ndiff A B [LINEJUNK] [CHARJUNK]
difffull context-diff A B [FROMFILE] [TOFILE] [FROMFILEDATE] [TOFILEDATE] [N]
difffull unified-diff A B [FROMFILE] [TOFILE] [FROMFILEDATE] [TOFILEDATE] [N]
difffull HtmlDiff - make-file FROMLINES TOLINES [FROMDESC] [TODESC] [CONTEXT]
For more useful versions of those last four commands using Python Fire, see
//strictfire/examples/diff:diff.par
"""
import difflib
import strictfire
def main():
strictfire.StrictFire(difflib, name='difffull')
if __name__ == '__main__':
main()
|
import time
def get_primes_till(limit):
l = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53]
for i in range(54,limit+1):
prime = True
sq_root = int(i**0.5)
for j in l:
if(j>sq_root):
break
if(i%j==0):
prime = False
break
if prime:
l.append(i)
return l
def get_primes_from_list(start, end, primes_list):
l = []
for i in range(start, end+1):
prime = True
sq_root = int(i**0.5)
for j in primes_list:
if(j>sq_root):
break
if(i%j==0):
prime = False
break
if prime :
l.append(i)
return l
def get_third(first, second, sums_list, digits):
s = 0
c_multiplier = 10
p_multiplier = 1
for i in range(digits) :
digit = sums_list[i]-(int((first%c_multiplier)/p_multiplier)+int((second%c_multiplier)/p_multiplier))
if digit<0 or digit>9:
return -1
s += digit*p_multiplier
p_multiplier, c_multiplier = c_multiplier, c_multiplier*10
return s
def generate_triplets(start, end, primes_list, sums_list):
l = get_primes_from_list(start, end, primes_list)
le = len(l)
def check_presence(n) :
low , high = 0, le-1
while low<=high :
mid = int((low+high)/2)
if l[mid]==n:
return True
elif l[mid]>n:
high = mid-1
else:
low = mid+1
return False
# test code
# if(20533 in l and 44927 in l and 87179 in l):
# print("Prime list check")
# if(check_presence(20533)):
# print("Binary search check 1")
# if(check_presence(44927)):
# print("Binary search check 2")
# if(check_presence(87179)):
# print("Binary search check 3")
# if(get_third(20533,44927,sums_list,5)==87179):
# print("Generation correct ",get_third(20533,44927,sums_list,5))
# else :
# print("Generation incorrect ",get_third(20533,44927,sums_list,5))
for i in range(le):
for j in range(i+1,le):
third = get_third(l[i], l[j], sums_list,5)
if(third==-1):
continue
if check_presence(third):
return [l[i], l[j], third]
return []
def get3(a):
primes_list = get_primes_till(1000)
return generate_triplets(10000,99999,primes_list,a)
def main():
a = list(map(int, input().split()))
start = time.time()
for i in get3(a):
print(i,end="\t")
print()
end = time.time()
print("Time : ",(end-start))
if __name__=='__main__':
main()
# TEST CASES
#-------------------------------
# Case 1 :
# 19 12 15 11 14
# 10007 42643 99989
# Time : 0.06131744384765625
#-------------------------------
# Case 2 : (worst case)
# 22 19 3 8 23
# Time : 22.881287574768066
#-------------------------------
|
from sqlalchemy import Table, MetaData, create_engine, Integer, \
Column, String, Date, UniqueConstraint
class DataAccessLayer:
connection = None
engine = None
conn_string = None
metadata = MetaData()
def db_init(self, conn_string):
self.engine = create_engine(conn_string or self.conn_string)
self.metadata.create_all(self.engine)
self.connection = self.engine.connect()
stg_truck_production = Table('stg_truck_production',
metadata,
Column('truck_production_id', Integer(), primary_key=True),
Column('date', Date()),
Column('year', Integer()),
Column('month', Integer()),
Column('units', Integer()),
Column('operation', String(20), default='production'),
UniqueConstraint('date', 'units')
)
stg_truck_sales = Table('stg_truck_sales',
metadata,
Column('truck_sales_id', Integer(), primary_key=True),
Column('date', Date()),
Column('year', Integer()),
Column('month', Integer()),
Column('units', Integer()),
Column('operation', String(20), default='sales'),
UniqueConstraint('date', 'units')
)
stg_export = Table('stg_export',
metadata,
Column('export_id', Integer(), primary_key=True),
Column('direction', String(6), default='export'),
Column('co_ano', Integer(), index=True),
Column('co_mes', Integer()),
Column('co_ncm', String(8)),
Column('co_unid', Integer()),
Column('co_pais', String(3)),
Column('sg_uf_ncm', String(2)),
Column('co_via', String(2)),
Column('co_urf', String(7)),
Column('qt_estat', Integer()),
Column('kg_liquido', Integer()),
Column('vl_fob', Integer())
)
stg_import = Table('stg_import',
metadata,
Column('import_id', Integer(), primary_key=True),
Column('direction', String(6), default='import'),
Column('co_ano', Integer(), index=True),
Column('co_mes', Integer()),
Column('co_ncm', String(8)),
Column('co_unid', Integer()),
Column('co_pais', String(3)),
Column('sg_uf_ncm', String(2), index=True),
Column('co_via', String(2)),
Column('co_urf', String(7)),
Column('qt_estat', Integer()),
Column('kg_liquido', Integer()),
Column('vl_fob', Integer())
)
stg_ncm = Table('stg_ncm',
metadata,
Column('ncm_id', Integer(), primary_key=True),
Column('co_ncm', String(8)),
Column('co_unid', Integer()),
Column('co_sh6', String(6), index=True),
Column('co_ppe', Integer()),
Column('co_ppi', Integer()),
Column('co_fat_agreg', String(2)),
Column('co_cuci_item', Integer()),
Column('co_cgce_n3', Integer()),
Column('co_siit', Integer()),
Column('co_isic4', Integer()),
Column('co_exp_subset', Integer()),
Column('no_ncm_por', String(255)),
Column('no_ncm_esp', String(255)),
Column('no_ncm_ing', String(255))
)
stg_ncm_sh = Table('stg_ncm_sh',
metadata,
Column('ncm_sh_id', Integer(), primary_key=True),
Column('co_sh6', String(6), unique=True),
Column('no_sh6_por', String(300)),
Column('no_sh6_esp', String(300)),
Column('no_sh6_ing', String(300)),
Column('co_sh4', String(4)),
Column('no_sh4_por', String(400)),
Column('no_sh4_esp', String(300)),
Column('no_sh4_ing', String(300)),
Column('co_sh2', String(15)),
Column('no_sh2_por', String(255)),
Column('no_sh2_esp', String(255)),
Column('no_sh2_ing', String(255)),
Column('co_ncm_secrom', String(5)),
Column('no_sec_por', String(255)),
Column('no_sec_esp', String(255)),
Column('no_sec_ing', String(255))
)
stg_pais = Table('stg_pais',
metadata,
Column('pais_id', Integer(), primary_key=True),
Column('co_pais', String(3), unique=True),
Column('co_pais_ison3', String(3)),
Column('co_pais_isoa3', String(3)),
Column('no_pais', String(150)),
Column('no_pais_ing', String(150)),
Column('no_pais_esp', String(150))
)
stg_pais_bloco = Table('stg_pais_bloco',
metadata,
Column('pais_bloco_id', Integer(), primary_key=True),
Column('co_pais', String(3)),
Column('co_bloco', Integer()),
Column('no_bloco', String(100)),
Column('no_bloco_ing', String(100)),
Column('no_bloco_esp', String(100))
)
stg_urf = Table('stg_urf',
metadata,
Column('urf_id', Integer, primary_key=True),
Column('co_urf', String(7), unique=True),
Column('no_urf', String(100))
)
stg_via = Table('stg_via',
metadata,
Column('via_id', Integer(), primary_key=True),
Column('co_via', String(2), unique=True),
Column('no_via', String(32))
)
dim_date = Table('dim_date',
metadata,
Column('date_sk', Integer(), primary_key=True),
Column('year', Integer()),
Column('month', Integer()),
UniqueConstraint('year', 'month')
)
dim_mode = Table('dim_mode',
metadata,
Column('mode_sk', Integer(), primary_key=True),
Column('mode_nk', String(2), unique=True),
Column('mode_name', String(255)),
UniqueConstraint('mode_name')
)
dim_product = Table('dim_product',
metadata,
Column('product_sk', Integer(), primary_key=True),
Column('product_nk', Integer(), unique=True),
Column('ncm_name', String(400)),
Column('sh6_name', String(400)),
Column('sh4_name', String(400)),
Column('sh2_name', String(400)),
Column('section', String(400))
)
dim_country = Table('dim_country',
metadata,
Column('country_sk', Integer(), primary_key=True),
Column('country_nk', String(3), unique=True),
Column('iso3', String(3)),
Column('country_name', String(255))
)
dim_region = Table('dim_region',
metadata,
Column('region_sk', Integer(), primary_key=True),
Column('region_nk', String(2), unique=True),
Column('region_name', String(100))
)
dim_port = Table('dim_port',
metadata,
Column('port_sk', Integer(), primary_key=True),
Column('port_nk', String(7), unique=True),
Column('port_name', String(255))
)
dim_operation = Table('dim_operation',
metadata,
Column('operation_sk', Integer(), primary_key=True),
Column('operation_type', String(50), unique=True)
)
fact_truck = Table('fact_truck',
metadata,
Column('fact_truck_sk', Integer(), primary_key=True),
Column('date_sk', Integer()),
Column('operation', String(20)),
Column('truck_units', Integer())
)
fact_trading = Table('fact_trading',
metadata,
Column('fact_trading_sk', Integer(), primary_key=True),
Column('date_sk', Integer()),
Column('product_sk', Integer()),
Column('country_sk', Integer()),
Column('region_sk', Integer()),
Column('port_sk', Integer()),
Column('mode_sk', Integer()),
Column('direction', String(6)),
Column('net_kilogram', Integer()),
Column('fob_value_usd', Integer())
)
dal = DataAccessLayer()
|
class DeploymentError(Exception):
pass
|
#!/usr/bin/env python
"""The EE Python library."""
__version__ = '0.1.135'
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
# pylint: disable=g-bad-import-order
import collections
import datetime
import inspect
import numbers
import os
import six
from . import batch
from . import data
from . import deserializer
from . import ee_types as types
from ._helpers import _GetPersistentCredentials
# Public re-exports.
from ._helpers import ServiceAccountCredentials
from ._helpers import apply # pylint: disable=redefined-builtin
from ._helpers import call
from ._helpers import profilePrinting
from .apifunction import ApiFunction
from .collection import Collection
from .computedobject import ComputedObject
from .customfunction import CustomFunction
from .dictionary import Dictionary
from .ee_date import Date
from .ee_exception import EEException
from .ee_list import List
from .ee_number import Number
from .ee_string import String
from .element import Element
from .encodable import Encodable
from .feature import Feature
from .featurecollection import FeatureCollection
from .filter import Filter
from .function import Function
from .geometry import Geometry
from .image import Image
from .imagecollection import ImageCollection
from .serializer import Serializer
from .terrain import Terrain
# A list of autogenerated class names added by _InitializeGenerateClasses.
_generatedClasses = []
class _AlgorithmsContainer(dict):
"""A lightweight class that is used as a dictionary with dot notation.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
# A dictionary of algorithms that are not bound to a specific class.
Algorithms = _AlgorithmsContainer()
def Initialize(credentials='persistent', opt_url=None):
"""Initialize the EE library.
If this hasn't been called by the time any object constructor is used,
it will be called then. If this is called a second time with a different
URL, this doesn't do an un-initialization of e.g.: the previously loaded
Algorithms, but will overwrite them and let point at alternate servers.
Args:
credentials: OAuth2 credentials. 'persistent' (default) means use
credentials already stored in the filesystem, or raise an explanatory
exception guiding the user to create those credentials.
opt_url: The base url for the EarthEngine REST API to connect to.
"""
if credentials == 'persistent':
credentials = _GetPersistentCredentials()
data.initialize(credentials, (opt_url + '/api' if opt_url else None), opt_url)
# Initialize the dynamically loaded functions on the objects that want them.
ApiFunction.initialize()
Element.initialize()
Image.initialize()
Feature.initialize()
Collection.initialize()
ImageCollection.initialize()
FeatureCollection.initialize()
Filter.initialize()
Geometry.initialize()
List.initialize()
Number.initialize()
String.initialize()
Date.initialize()
Dictionary.initialize()
Terrain.initialize()
_InitializeGeneratedClasses()
_InitializeUnboundMethods()
def Reset():
"""Reset the library. Useful for re-initializing to a different server."""
data.reset()
ApiFunction.reset()
Element.reset()
Image.reset()
Feature.reset()
Collection.reset()
ImageCollection.reset()
FeatureCollection.reset()
Filter.reset()
Geometry.reset()
List.reset()
Number.reset()
String.reset()
Date.reset()
Dictionary.reset()
Terrain.reset()
_ResetGeneratedClasses()
global Algorithms
Algorithms = _AlgorithmsContainer()
def _ResetGeneratedClasses():
"""Remove the dynamic classes."""
global _generatedClasses
for name in _generatedClasses:
ApiFunction.clearApi(globals()[name])
del globals()[name]
_generatedClasses = []
# Warning: we're passing all of globals() into registerClasses.
# This is a) pass by reference, and b) a lot more stuff.
types._registerClasses(globals()) # pylint: disable=protected-access
def _Promote(arg, klass):
"""Wrap an argument in an object of the specified class.
This is used to e.g.: promote numbers or strings to Images and arrays
to Collections.
Args:
arg: The object to promote.
klass: The expected type.
Returns:
The argument promoted if the class is recognized, otherwise the
original argument.
"""
if arg is None:
return arg
if klass == 'Image':
return Image(arg)
elif klass == 'Feature':
if isinstance(arg, Collection):
# TODO(user): Decide whether we want to leave this in. It can be
# quite dangerous on large collections.
return ApiFunction.call_(
'Feature', ApiFunction.call_('Collection.geometry', arg))
else:
return Feature(arg)
elif klass == 'Element':
if isinstance(arg, Element):
# Already an Element.
return arg
elif isinstance(arg, Geometry):
# Geometries get promoted to Features.
return Feature(arg)
elif isinstance(arg, ComputedObject):
# Try a cast.
return Element(arg.func, arg.args, arg.varName)
else:
# No way to convert.
raise EEException('Cannot convert %s to Element.' % arg)
elif klass == 'Geometry':
if isinstance(arg, Collection):
return ApiFunction.call_('Collection.geometry', arg)
else:
return Geometry(arg)
elif klass in ('FeatureCollection', 'Collection'):
# For now Collection is synonymous with FeatureCollection.
if isinstance(arg, Collection):
return arg
else:
return FeatureCollection(arg)
elif klass == 'ImageCollection':
return ImageCollection(arg)
elif klass == 'Filter':
return Filter(arg)
elif klass == 'Algorithm':
if isinstance(arg, six.string_types):
# An API function name.
return ApiFunction.lookup(arg)
elif callable(arg):
# A native function that needs to be wrapped.
args_count = len(inspect.getargspec(arg).args)
return CustomFunction.create(arg, 'Object', ['Object'] * args_count)
elif isinstance(arg, Encodable):
# An ee.Function or a computed function like the return value of
# Image.parseExpression().
return arg
else:
raise EEException('Argument is not a function: %s' % arg)
elif klass == 'Dictionary':
if isinstance(arg, dict):
return arg
else:
return Dictionary(arg)
elif klass == 'String':
if (types.isString(arg) or
isinstance(arg, ComputedObject) or
isinstance(arg, String)):
return String(arg)
else:
return arg
elif klass == 'List':
return List(arg)
elif klass in ('Number', 'Float', 'Long', 'Integer', 'Short', 'Byte'):
return Number(arg)
elif klass in globals():
cls = globals()[klass]
ctor = ApiFunction.lookupInternal(klass)
# Handle dynamically created classes.
if isinstance(arg, cls):
# Return unchanged.
return arg
elif ctor:
# The client-side constructor will call the server-side constructor.
return cls(arg)
elif isinstance(arg, six.string_types):
if hasattr(cls, arg):
# arg is the name of a method in klass.
return getattr(cls, arg)()
else:
raise EEException('Unknown algorithm: %s.%s' % (klass, arg))
else:
# Client-side cast.
return cls(arg)
else:
return arg
def _InitializeUnboundMethods():
# Sort the items by length, so parents get created before children.
items = sorted(
ApiFunction.unboundFunctions().items(), key=lambda x: len(x[0]))
for name, func in items:
signature = func.getSignature()
if signature.get('hidden', False):
continue
# Create nested objects as needed.
name_parts = name.split('.')
target = Algorithms
while len(name_parts) > 1:
first = name_parts[0]
# Set the attribute if it doesn't already exist. The try/except block
# works in both Python 2 & 3.
try:
getattr(target, first)
except AttributeError:
setattr(target, first, _AlgorithmsContainer())
target = getattr(target, first)
name_parts = name_parts[1:]
# Attach the function.
# We need a copy of the function to attach properties.
def GenerateFunction(f):
return lambda *args, **kwargs: f.call(*args, **kwargs) # pylint: disable=unnecessary-lambda
bound = GenerateFunction(func)
bound.signature = signature
bound.__doc__ = str(func)
setattr(target, name_parts[0], bound)
def _InitializeGeneratedClasses():
"""Generate classes for extra types that appear in the web API."""
signatures = ApiFunction.allSignatures()
# Collect the first part of all function names.
names = set([name.split('.')[0] for name in signatures])
# Collect the return types of all functions.
returns = set([signatures[sig]['returns'] for sig in signatures])
want = [name for name in names.intersection(returns) if name not in globals()]
for name in want:
globals()[name] = _MakeClass(name)
_generatedClasses.append(name)
ApiFunction._bound_signatures.add(name) # pylint: disable=protected-access
# Warning: we're passing all of globals() into registerClasses.
# This is a) pass by reference, and b) a lot more stuff.
types._registerClasses(globals()) # pylint: disable=protected-access
def _MakeClass(name):
"""Generates a dynamic API class for a given name."""
def init(self, *args):
"""Initializer for dynamically created classes.
Args:
self: The instance of this class. Listed to make the linter hush.
*args: Either a ComputedObject to be promoted to this type, or
arguments to an algorithm with the same name as this class.
Returns:
The new class.
"""
klass = globals()[name]
onlyOneArg = (len(args) == 1)
# Are we trying to cast something that's already of the right class?
if onlyOneArg and isinstance(args[0], klass):
result = args[0]
else:
# Decide whether to call a server-side constructor or just do a
# client-side cast.
ctor = ApiFunction.lookupInternal(name)
firstArgIsPrimitive = not isinstance(args[0], ComputedObject)
shouldUseConstructor = False
if ctor:
if not onlyOneArg:
# Can't client-cast multiple arguments.
shouldUseConstructor = True
elif firstArgIsPrimitive:
# Can't cast a primitive.
shouldUseConstructor = True
elif args[0].func != ctor:
# We haven't already called the constructor on this object.
shouldUseConstructor = True
# Apply our decision.
if shouldUseConstructor:
# Call ctor manually to avoid having promote() called on the output.
ComputedObject.__init__(
self, ctor, ctor.promoteArgs(ctor.nameArgs(args)))
else:
# Just cast and hope for the best.
if not onlyOneArg:
# We don't know what to do with multiple args.
raise EEException(
'Too many arguments for ee.%s(): %s' % (name, args))
elif firstArgIsPrimitive:
# Can't cast a primitive.
raise EEException(
'Invalid argument for ee.%s(): %s. Must be a ComputedObject.' %
(name, args))
else:
result = args[0]
ComputedObject.__init__(self, result.func, result.args, result.varName)
properties = {'__init__': init, 'name': lambda self: name}
new_class = type(str(name), (ComputedObject,), properties)
ApiFunction.importApi(new_class, name, name)
return new_class
# Set up type promotion rules as soon the package is loaded.
Function._registerPromoter(_Promote) # pylint: disable=protected-access
|
import copy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from argparse import ArgumentParser
def generate_random_state(dim):
return [np.random.randint(2, size = dim) for _ in range(dim)]
def generate_initial_state_from_file(file):
f = open(file, "r")
grid = []
for row in f.readlines():
grid.append([int(col) for col in row if col.isnumeric()])
return grid
def get_glider():
return generate_initial_state_from_file("figures/glider.txt")
def get_spaceships():
return generate_initial_state_from_file("figures/spaceships.txt")
def get_wave():
return generate_initial_state_from_file("figures/wave.txt")
def update_state(grid):
tmp_grid = copy.deepcopy(grid)
num_rows = len(grid)
num_cols = len(grid[0])
for row in range(0, num_rows):
for col in range(0, num_cols):
# moore neighborhood
living_nbrs = grid[(row - 1) % num_rows][(col - 1) % num_cols] \
+ grid[(row - 1) % num_rows][col % num_cols] \
+ grid[(row - 1) % num_rows][(col + 1) % num_cols] \
+ grid[row % num_rows][(col - 1) % num_cols] \
+ grid[row % num_rows][(col + 1) % num_cols] \
+ grid[(row + 1) % num_rows][(col - 1) % num_cols] \
+ grid[(row + 1) % num_rows][col % num_cols] \
+ grid[(row + 1) % num_rows][(col + 1) % num_cols]
# dead
if grid[row][col] == 0:
# resurrection
if living_nbrs == 3:
tmp_grid[row][col] = 1
# alive
else:
if living_nbrs < 2:
# solitude
tmp_grid[row][col] = 0
elif living_nbrs > 3:
# overpopulation
tmp_grid[row][col] = 0
return tmp_grid
def visualize_grid(grid):
cmap = mpl.colors.ListedColormap(['#28bd5a', "k"])
bounds = [0., 0.5, 1.]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
ax = plt.subplots()[1]
img = ax.imshow(grid, interpolation = 'none', cmap = cmap, norm = norm)
for _ in range(100):
grid = update_state(grid)
img.set_data(grid)
plt.pause(0.05)
if __name__ == '__main__':
parser = ArgumentParser(description = "Visualization for Conway's Game of Life")
parser.add_argument("-r", "--random", type = int, required = False, metavar = "", help = "using random initial state of specified size")
parser.add_argument("-i", "--input", type = str, required = False, metavar = "", help = "reading initial state from specified file")
parser.add_argument("-o", "--object", type = str, required = False, metavar = "", help = "using predefined object(s)")
args = parser.parse_args()
if args.random:
print("generating random grid with size", args.random)
grid = generate_random_state(args.random)
visualize_grid(grid)
elif args.input:
print("generating grid from file", args.input)
grid = generate_initial_state_from_file(args.input)
visualize_grid(grid)
elif args.object:
print("generating", args.object)
if args.object == "glider":
grid = get_glider()
elif args.object == "spaceships":
grid = get_spaceships()
elif args.object == "wave":
grid = get_wave()
else:
print("unknown object: using random state")
grid = generate_random_state(64)
visualize_grid(grid)
else:
print("since no initial state was specified, a random grid of size 64x64 is generated")
grid = generate_random_state(64)
visualize_grid(grid)
|
# Copyright (C) 2021-2022, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
import math
import random
from typing import Any, Callable, Dict, List, Tuple
import numpy as np
from doctr.utils.repr import NestedObject
from .. import functional as F
__all__ = ['SampleCompose', 'ImageTransform', 'ColorInversion', 'OneOf', 'RandomApply', 'RandomRotate', 'RandomCrop']
class SampleCompose(NestedObject):
"""Implements a wrapper that will apply transformations sequentially on both image and target
.. tabs::
.. tab:: TensorFlow
.. code:: python
>>> import numpy as np
>>> import tensorflow as tf
>>> from doctr.transforms import SampleCompose, ImageTransform, ColorInversion, RandomRotate
>>> transfo = SampleCompose([ImageTransform(ColorInversion((32, 32))), RandomRotate(30)])
>>> out, out_boxes = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1), np.zeros((2, 4)))
.. tab:: PyTorch
.. code:: python
>>> import numpy as np
>>> import torch
>>> from doctr.transforms import SampleCompose, ImageTransform, ColorInversion, RandomRotate
>>> transfos = SampleCompose([ImageTransform(ColorInversion((32, 32))), RandomRotate(30)])
>>> out, out_boxes = transfos(torch.rand(8, 64, 64, 3), np.zeros((2, 4)))
Args:
transforms: list of transformation modules
"""
_children_names: List[str] = ['sample_transforms']
def __init__(self, transforms: List[Callable[[Any, Any], Tuple[Any, Any]]]) -> None:
self.sample_transforms = transforms
def __call__(self, x: Any, target: Any) -> Tuple[Any, Any]:
for t in self.sample_transforms:
x, target = t(x, target)
return x, target
class ImageTransform(NestedObject):
"""Implements a transform wrapper to turn an image-only transformation into an image+target transform
.. tabs::
.. tab:: TensorFlow
.. code:: python
>>> import tensorflow as tf
>>> from doctr.transforms import ImageTransform, ColorInversion
>>> transfo = ImageTransform(ColorInversion((32, 32)))
>>> out, _ = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1), None)
.. tab:: PyTorch
.. code:: python
>>> import torch
>>> from doctr.transforms import ImageTransform, ColorInversion
>>> transfo = ImageTransform(ColorInversion((32, 32)))
>>> out, _ = transfo(torch.rand(8, 64, 64, 3), None)
Args:
transform: the image transformation module to wrap
"""
_children_names: List[str] = ['img_transform']
def __init__(self, transform: Callable[[Any], Any]) -> None:
self.img_transform = transform
def __call__(self, img: Any, target: Any) -> Tuple[Any, Any]:
img = self.img_transform(img)
return img, target
class ColorInversion(NestedObject):
"""Applies the following tranformation to a tensor (image or batch of images):
convert to grayscale, colorize (shift 0-values randomly), and then invert colors
.. tabs::
.. tab:: TensorFlow
.. code:: python
>>> import tensorflow as tf
>>> from doctr.transforms import ColorInversion
>>> transfo = ColorInversion(min_val=0.6)
>>> out = transfo(tf.random.uniform(shape=[8, 64, 64, 3], minval=0, maxval=1))
.. tab:: PyTorch
.. code:: python
>>> import torch
>>> from doctr.transforms import ColorInversion
>>> transfo = ColorInversion(min_val=0.6)
>>> out = transfo(torch.rand(8, 64, 64, 3))
Args:
min_val: range [min_val, 1] to colorize RGB pixels
"""
def __init__(self, min_val: float = 0.5) -> None:
self.min_val = min_val
def extra_repr(self) -> str:
return f"min_val={self.min_val}"
def __call__(self, img: Any) -> Any:
return F.invert_colors(img, self.min_val)
class OneOf(NestedObject):
"""Randomly apply one of the input transformations
.. tabs::
.. tab:: TensorFlow
.. code:: python
>>> import tensorflow as tf
>>> from doctr.transforms import OneOf
>>> transfo = OneOf([JpegQuality(), Gamma()])
>>> out = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1))
.. tab:: PyTorch
.. code:: python
>>> import torch
>>> from doctr.transforms import OneOf
>>> transfo = OneOf([JpegQuality(), Gamma()])
>>> out = transfo(torch.rand(1, 64, 64, 3))
Args:
transforms: list of transformations, one only will be picked
"""
_children_names: List[str] = ['transforms']
def __init__(self, transforms: List[Callable[[Any], Any]]) -> None:
self.transforms = transforms
def __call__(self, img: Any) -> Any:
# Pick transformation
transfo = self.transforms[int(random.random() * len(self.transforms))]
# Apply
return transfo(img)
class RandomApply(NestedObject):
"""Apply with a probability p the input transformation
.. tabs::
.. tab:: TensorFlow
.. code:: python
>>> import tensorflow as tf
>>> from doctr.transforms import RandomApply
>>> transfo = RandomApply(Gamma(), p=.5)
>>> out = transfo(tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1))
.. tab:: PyTorch
.. code:: python
>>> import torch
>>> from doctr.transforms import RandomApply
>>> transfo = RandomApply(Gamma(), p=.5)
>>> out = transfo(torch.rand(1, 64, 64, 3))
Args:
transform: transformation to apply
p: probability to apply
"""
def __init__(self, transform: Callable[[Any], Any], p: float = .5) -> None:
self.transform = transform
self.p = p
def extra_repr(self) -> str:
return f"transform={self.transform}, p={self.p}"
def __call__(self, img: Any) -> Any:
if random.random() < self.p:
return self.transform(img)
return img
class RandomRotate(NestedObject):
"""Randomly rotate a tensor image and its boxes
.. image:: https://github.com/mindee/doctr/releases/download/v0.4.0/rotation_illustration.png
:align: center
Args:
max_angle: maximum angle for rotation, in degrees. Angles will be uniformly picked in
[-max_angle, max_angle]
expand: whether the image should be padded before the rotation
"""
def __init__(self, max_angle: float = 5., expand: bool = False) -> None:
self.max_angle = max_angle
self.expand = expand
def extra_repr(self) -> str:
return f"max_angle={self.max_angle}, expand={self.expand}"
def __call__(self, img: Any, target: np.ndarray) -> Tuple[Any, np.ndarray]:
angle = random.uniform(-self.max_angle, self.max_angle)
r_img, r_polys = F.rotate_sample(img, target, angle, self.expand)
# Removes deleted boxes
is_kept = (r_polys.max(1) > r_polys.min(1)).sum(1) == 2
return r_img, r_polys[is_kept]
class RandomCrop(NestedObject):
"""Randomly crop a tensor image and its boxes
Args:
scale: tuple of floats, relative (min_area, max_area) of the crop
ratio: tuple of float, relative (min_ratio, max_ratio) where ratio = h/w
"""
def __init__(self, scale: Tuple[float, float] = (0.08, 1.), ratio: Tuple[float, float] = (0.75, 1.33)) -> None:
self.scale = scale
self.ratio = ratio
def extra_repr(self) -> str:
return f"scale={self.scale}, ratio={self.ratio}"
def __call__(self, img: Any, target: Dict[str, np.ndarray]) -> Tuple[Any, Dict[str, np.ndarray]]:
scale = random.uniform(self.scale[0], self.scale[1])
ratio = random.uniform(self.ratio[0], self.ratio[1])
# Those might overflow
crop_h = math.sqrt(scale * ratio)
crop_w = math.sqrt(scale / ratio)
xmin, ymin = random.uniform(0, 1 - crop_w), random.uniform(0, 1 - crop_h)
xmax, ymax = xmin + crop_w, ymin + crop_h
# Clip them
xmin, ymin = max(xmin, 0), max(ymin, 0)
xmax, ymax = min(xmax, 1), min(ymax, 1)
croped_img, crop_boxes = F.crop_detection(img, target["boxes"], (xmin, ymin, xmax, ymax))
return croped_img, dict(boxes=crop_boxes)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxCrossEntropyWithLogits op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class XentTest(test.TestCase):
def _npXent(self, features, labels, dim=-1):
if dim == -1:
dim = len(features.shape) - 1
one_only_on_dim = list(features.shape)
one_only_on_dim[dim] = 1
e = np.exp(
features - np.reshape(np.amax(features, axis=dim), one_only_on_dim))
probs = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
bp = (probs - labels)
l = -np.sum(labels * np.log(probs + 1.0e-20), axis=dim)
return l, bp
# TODO(b/123860949): The values are constant folded for XLA, so placeholders
# are needed.
def _testXent(self,
np_features,
np_labels,
use_gpu=False,
with_placeholders=False):
np_loss, np_backprop = self._npXent(np_features, np_labels)
with self.cached_session(use_gpu=use_gpu) as sess:
if with_placeholders:
features_placeholder = array_ops.placeholder(np_features.dtype)
labels_placeholder = array_ops.placeholder(np_labels.dtype)
loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
labels=labels_placeholder, features=features_placeholder)
tf_loss, tf_backprop = sess.run([loss, backprop],
feed_dict={
labels_placeholder: np_labels,
features_placeholder: np_features
})
else:
loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
np_features, np_labels)
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
def _testXentWrapper(self, np_features, np_labels, dim=-1, use_gpu=False):
np_loss, _ = self._npXent(np_features, np_labels, dim=dim)
with self.cached_session(use_gpu=use_gpu) as sess:
loss = nn_ops.softmax_cross_entropy_with_logits(
labels=np_labels, logits=np_features, dim=dim)
tf_loss = self.evaluate(loss)
print("np_loss:", np_loss)
print("tf_loss:", tf_loss)
self.assertAllCloseAccordingToType(np_loss, tf_loss)
# TODO(b/123860949): The values are constant folded for XLA, so placeholders
# are needed.
def _testAll(self, features, labels, with_placeholders=False):
self._testXent(
features, labels, use_gpu=False, with_placeholders=with_placeholders)
self._testXent(
features, labels, use_gpu=True, with_placeholders=with_placeholders)
def _testSingleClass(self, use_gpu=False):
for dtype in np.float16, np.float32:
with self.cached_session(use_gpu=use_gpu) as sess:
loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
np.array([[1.], [-1.], [0.]]).astype(dtype),
np.array([[-1.], [0.], [1.]]).astype(dtype))
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllClose([0.0, 0.0, 0.0], tf_loss)
self.assertAllClose([[2.0], [1.0], [0.0]], tf_backprop)
def testSingleClass(self):
self._testSingleClass(True)
self._testSingleClass(False)
@test_util.run_deprecated_v1
def testRankTooLarge(self):
for dtype in np.float16, np.float32:
np_features = np.array([[[1., 1., 1., 1.]], [[1., 2., 3.,
4.]]]).astype(dtype)
np_labels = np.array([[[0., 0., 0., 1.]], [[0., .5, .5,
0.]]]).astype(dtype)
self.assertRaisesRegexp(ValueError, "rank 2, but is rank 3",
gen_nn_ops.softmax_cross_entropy_with_logits,
np_features, np_labels)
def testNpXent(self):
# We create 2 batches of logits for testing.
# batch 0 is the boring uniform distribution: 1, 1, 1, 1, with target 3.
# batch 1 has a bit of difference: 1, 2, 3, 4, with soft targets (1, 2).
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
labels = [[0., 0., 0., 1.], [0., .5, .5, 0.]]
# For batch 0, we expect the uniform distribution: 0.25, 0.25, 0.25, 0.25
# With a hard target 3, the backprop is [0.25, 0.25, 0.25, -0.75]
# The loss for this batch is -log(0.25) = 1.386
#
# For batch 1, we have:
# exp(0) = 1
# exp(1) = 2.718
# exp(2) = 7.389
# exp(3) = 20.085
# SUM = 31.192
# So we have as probabilities:
# exp(0) / SUM = 0.032
# exp(1) / SUM = 0.087
# exp(2) / SUM = 0.237
# exp(3) / SUM = 0.644
# With a soft target (1, 2), the backprop is
# [0.032, 0.087 - 0.5 = -0.413, 0.237 - 0.5 = -0.263, 0.644]
# The loss for this batch is [0.5 * -log(0.087), 0.5 * -log(0.237)]
# = [1.3862, 1.9401]
np_loss, np_backprop = self._npXent(np.array(features), np.array(labels))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, -0.75], [0.0321, -0.4129, -0.2632,
0.6439]]),
np_backprop,
rtol=1.e-3,
atol=1.e-3)
self.assertAllClose(
np.array([1.3862, 1.9401]), np_loss, rtol=1.e-3, atol=1.e-3)
def testShapeBroadcast(self):
np_f = np.array([[1., 2., 3., 4.],
[1., 2., 3., 4.]]).astype(np.float32)
np_l = np.array([[0., 0., 0., 1.],
[0., .5, .5, 0.]]).astype(np.float32)
np_loss, np_backprop = self._npXent(np_f, np_l)
tf_f = constant_op.constant(
np.array([[1., 2., 3., 4.]]).astype(np.float32))
tf_l = constant_op.constant(
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float32))
for use_gpu in [False, True]:
with self.cached_session(use_gpu=use_gpu) as sess:
loss, backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
tf_f, tf_l)
tf_loss, tf_backprop = self.evaluate([loss, backprop])
self.assertAllCloseAccordingToType(np_loss, tf_loss)
self.assertAllCloseAccordingToType(np_backprop, tf_backprop)
# TODO(b/123860949): The values are constant folded for XLA, so placeholders
# are needed.
@test_util.run_deprecated_v1
def testFeatureBroadcast(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([[0., 0., 0., 1.]]).astype(np.float16),
with_placeholders=True)
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([[0.], [2.]]).astype(np.float16),
with_placeholders=True)
@test_util.run_deprecated_v1
def testShapeMismatch(self):
with self.cached_session():
with self.assertRaises(ValueError):
gen_nn_ops.softmax_cross_entropy_with_logits(
[[0., 1.], [2., 3.]], [[0., 1., 0.], [1., 0., 0.]])
@test_util.run_deprecated_v1
def testNotMatrix(self):
with self.cached_session():
with self.assertRaises(ValueError):
gen_nn_ops.softmax_cross_entropy_with_logits([0., 1., 2., 3.],
[0., 1., 0., 1.])
def testHalf(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16),
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float16))
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32),
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float32))
def testDouble(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64),
np.array([[0., 0., 0., 1.], [0., .5, .5, 0.]]).astype(np.float64))
@test_util.run_deprecated_v1
def testGradient(self):
with self.cached_session() as sess:
l = constant_op.constant(
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5],
shape=[3, 4],
dtype=dtypes.float64,
name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
x = nn_ops.softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
err = gradient_checker.compute_gradient_error(f, [3, 4], x, [3])
# Check that no extra computation performed. When only first derivative is requested,
# second derivative must not be computed. So when there is no second derivative,
# there is no `BatchMatMul` op in the graph.
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
self.assertNotIn("BatchMatMul", op_names)
print("cross entropy gradient err = ", err)
self.assertLess(err, 5e-8)
@test_util.run_deprecated_v1
def testGradientLabelWithV2(self):
with self.cached_session():
l = constant_op.constant(
[0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5],
shape=[3, 4],
dtype=dtypes.float64,
name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[3, 4],
dtype=dtypes.float64,
name="f")
x = nn_ops.softmax_cross_entropy_with_logits_v2(
labels=l, logits=f, name="xent")
err = gradient_checker.compute_gradient_error(l, [3, 4], x, [3])
self.assertLess(err, 5e-8)
@test_util.run_deprecated_v1
def testSecondGradient(self):
with self.cached_session() as sess:
l = constant_op.constant(
[
0.0, 0.0, 1.0 / 3, 0.0, 1.0 / 3, 0.0, 0.0, 0.0, 0.0, 0.5 / 3, 0.0,
0.5 / 3
],
shape=[12],
dtype=dtypes.float64,
name="l")
f = constant_op.constant(
[0.1, 0.2, 0.3, 0.4, 0.1, 0.4, 0.9, 1.6, 0.1, 0.8, 2.7, 6.4],
shape=[12],
dtype=dtypes.float64,
name="f")
x = nn_ops.softmax_cross_entropy_with_logits(
labels=l, logits=f, name="xent")
loss = math_ops.reduce_sum(x)
gradients = gradients_impl.gradients(loss, [f])[0]
err = gradient_checker.compute_gradient_error(f, [12], gradients, [12])
# Check that second derivative is calculated.
# (it is equivalent to being `BatchMatMul` op in the graph because of implementation of xentropy grad)
op_names = [
op.op_def.name for op in sess.graph.get_operations() if op.op_def
]
if compat.forward_compatible(2019, 4, 25):
self.assertIn("BatchMatMulV2", op_names)
else:
self.assertIn("BatchMatMul", op_names)
print("cross entropy hessian err = ", err)
self.assertLess(err, 5e-8)
def testWrapper(self):
features = np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(
np.float32)
labels = np.array([[[0., 0., 0., 1.], [0., 1., 0., 0.]],
[[0., 0.5, 0.5, 0.], [0.5, 0.5, 0., 0.]],
[[0., 1., 0., 0.], [0., 0., 1., 0.]]]).astype(
np.float32)
self._testXentWrapper(features, labels, dim=0, use_gpu=False)
self._testXentWrapper(features, labels, dim=0, use_gpu=True)
self._testXentWrapper(features, labels, dim=1, use_gpu=False)
self._testXentWrapper(features, labels, dim=1, use_gpu=True)
self._testXentWrapper(features, labels, dim=-1, use_gpu=False)
self._testXentWrapper(features, labels, dim=-1, use_gpu=True)
def testZeroDimension(self):
features = np.zeros([0, 2, 4]).astype(np.float32)
labels = np.zeros([0, 2, 4]).astype(np.float32)
np_loss, _ = self._npXent(features, labels)
with self.session(use_gpu=True) as sess:
loss = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=features)
tf_loss = self.evaluate(loss)
self.assertAllEqual(np_loss, tf_loss)
class XentBenchmark(test.Benchmark):
def benchmarkZeroDimension(self):
for (m, n, p, use_gpu) in itertools.product(
[128],
[10, 100, 1000, 10000, 100000],
[0.001, 0.01, 0.5, 0.99, 1.0],
[False]):
k = int(p * n)
if k == 0:
continue
name = "zero_dimension_m_%d_n_%d_k_%g_use_gpu_%s" % (m, n, k, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
labels = array_ops.zeros([0, 2, 4], dtype=dtypes.float32)
logits = array_ops.zeros([0, 2, 4], dtype=dtypes.float32)
op = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
with session.Session() as sess:
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
throughput = gb_processed_input / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
def benchmarkSingleClass(self):
for (m, n, p, use_gpu) in itertools.product(
[128],
[10, 100, 1000, 10000, 100000],
[0.001, 0.01, 0.5, 0.99, 1.0],
[False]):
k = int(p * n)
if k == 0:
continue
name = "single_class_m_%d_n_%d_k_%g_use_gpu_%s" % (m, n, k, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
labels = constant_op.constant([[1.], [-1.], [0.]],
dtype=dtypes.float32)
logits = constant_op.constant([[-1.], [0.], [1.]],
dtype=dtypes.float32)
op = nn_ops.softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
with session.Session() as sess:
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
throughput = gb_processed_input / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
from oslo_config import cfg
from oslo_config import types
from kolla.version import version_info as version
BASE_OS_DISTRO = ['centos', 'rhel', 'ubuntu', 'oraclelinux', 'debian']
BASE_ARCH = ['x86_64', 'ppc64le', 'aarch64']
DEFAULT_BASE_TAGS = {
'centos': '7',
'rhel': '7',
'oraclelinux': '7-slim',
'debian': 'stretch-backports',
'ubuntu': '18.04',
}
DISTRO_RELEASE = {
'centos': '7',
'rhel': '7',
'oraclelinux': '7',
'debian': 'stretch-backports',
'ubuntu': '18.04',
}
# This is noarch repository so we will use it on all architectures
DELOREAN = \
"https://trunk.rdoproject.org/centos7/current-passed-ci/delorean.repo"
DELOREAN_DEPS = "https://trunk.rdoproject.org/centos7/delorean-deps.repo"
INSTALL_TYPE_CHOICES = ['binary', 'source', 'rdo', 'rhos']
# TODO(mandre) check for file integrity instead of downloading from an HTTPS
# source
TARBALLS_BASE = "https://tarballs.openstack.org"
_PROFILE_OPTS = [
cfg.ListOpt('infra',
default=[
'ceph',
'certmonger',
'cron',
'elasticsearch',
'etcd',
'fluentd',
'haproxy',
'keepalived',
'kibana',
'kolla-toolbox',
'logstash',
'mariadb',
'memcached',
'mongodb',
'opendaylight',
'openvswitch',
'ptp',
'qdrouterd',
'rabbitmq',
'redis',
'rsyslog',
'skydive',
'storm',
'tgtd',
],
help='Infra images'),
cfg.ListOpt('main',
default=[
'ceilometer',
'cinder',
'glance',
'heat',
'horizon',
'iscsi',
'keystone',
'neutron',
'nova-',
'swift',
],
help='Main images'),
cfg.ListOpt('aux',
default=[
'almanach',
'aodh',
'blazar',
'cloudkitty',
'congress',
'designate',
'dragonflow',
'ec2-api',
'freezer',
'gnocchi',
'influxdb',
'ironic',
'kafka',
'karbor',
'kuryr',
'magnum',
'manila',
'mistral',
'monasca',
'murano',
'novajoin',
'octavia',
'panko',
'rally',
'redis',
'sahara',
'searchlight',
'senlin',
'solum',
'tacker',
'telegraf',
'trove',
'vitrage',
'zaqar',
'zookeeper',
'zun',
],
help='Aux Images'),
cfg.ListOpt('default',
default=[
'chrony',
'cron',
'kolla-toolbox',
'fluentd',
'glance',
'haproxy',
'heat',
'horizon',
'keepalived',
'keystone',
'mariadb',
'memcached',
'neutron',
'nova-',
'openvswitch',
'rabbitmq',
],
help='Default images'),
cfg.ListOpt('gate',
default=[
'chrony',
'cron',
'fluentd',
'glance',
'haproxy',
'horizon',
'keepalived',
'keystone',
'kolla-toolbox',
'mariadb',
'memcached',
'neutron',
'nova-',
'openvswitch',
'rabbitmq',
],
help='Gate images')
]
hostarch = os.uname()[4]
_CLI_OPTS = [
cfg.StrOpt('base', short='b', default='centos',
choices=BASE_OS_DISTRO,
help='The distro type of the base image.'),
cfg.StrOpt('base-tag', default='latest',
help='The base distro image tag'),
cfg.StrOpt('base-image',
help='The base image name. Default is the same with base.'),
cfg.StrOpt('base-arch', default=hostarch,
choices=BASE_ARCH,
help='The base architecture. Default is same as host.'),
cfg.BoolOpt('debug', short='d', default=False,
help='Turn on debugging log level'),
cfg.BoolOpt('skip-parents', default=False,
help='Do not rebuild parents of matched images'),
cfg.BoolOpt('skip-existing', default=False,
help='Do not rebuild images present in the docker cache'),
cfg.DictOpt('build-args',
help='Set docker build time variables'),
cfg.BoolOpt('keep', default=False,
help='Keep failed intermediate containers'),
cfg.BoolOpt('list-dependencies', short='l',
help='Show image dependencies (filtering supported)'),
cfg.BoolOpt('list-images',
help='Show all available images (filtering supported)'),
cfg.StrOpt('namespace', short='n', default='kolla',
help='The Docker namespace name'),
cfg.StrOpt('network_mode', default=None,
help='The network mode for Docker build. Example: host'),
cfg.BoolOpt('cache', default=True,
help='Use the Docker cache when building'),
cfg.MultiOpt('profile', types.String(), short='p',
help=('Build a pre-defined set of images, see [profiles]'
' section in config. The default profiles are:'
' {}'.format(', '.join(
[opt.name for opt in _PROFILE_OPTS])
))),
cfg.BoolOpt('push', default=False,
help='Push images after building'),
cfg.IntOpt('push-threads', default=1, min=1,
help=('The number of threads to user while pushing'
' Images. Note: Docker can not handle threading'
' push properly')),
cfg.IntOpt('retries', short='r', default=3, min=0,
help='The number of times to retry while building'),
cfg.MultiOpt('regex', types.String(), positional=True,
help=('Build only images matching regex and its'
' dependencies')),
cfg.StrOpt('registry',
help=('The docker registry host. The default registry host'
' is Docker Hub')),
cfg.StrOpt('save-dependency',
help=('Path to the file to store the docker image'
' dependency in Graphviz dot format')),
cfg.StrOpt('format', short='f', default='json',
choices=['json', 'none'],
help='Format to write the final results in'),
cfg.StrOpt('tarballs-base', default=TARBALLS_BASE,
help='Base url to OpenStack tarballs'),
cfg.StrOpt('type', short='t', default='binary',
choices=INSTALL_TYPE_CHOICES,
dest='install_type',
help=('The method of the OpenStack install.')),
cfg.IntOpt('threads', short='T', default=8, min=1,
help=('The number of threads to use while building.'
' (Note: setting to one will allow real time'
' logging)')),
cfg.StrOpt('tag', default=version.cached_version_string(),
help='The Docker tag'),
cfg.BoolOpt('template-only', default=False,
help="Don't build images. Generate Dockerfile only"),
cfg.IntOpt('timeout', default=120,
help='Time in seconds after which any operation times out'),
cfg.MultiOpt('template-override', types.String(),
help='Path to template override file'),
cfg.MultiOpt('docker-dir', types.String(),
help='Path to additional docker file template directory',
short='D', default=[]),
cfg.StrOpt('logs-dir', help='Path to logs directory'),
cfg.BoolOpt('pull', default=True,
help='Attempt to pull a newer version of the base image'),
cfg.StrOpt('work-dir', help=('Path to be used as working directory.'
' By default, a temporary dir is created')),
cfg.BoolOpt('squash', default=False,
help=('Squash the image layers. WARNING: it will consume lots'
' of disk IO. "docker-squash" tool is required, install'
' it by "pip install docker-squash"')),
]
_BASE_OPTS = [
cfg.StrOpt('maintainer',
default='Kolla Project (https://launchpad.net/kolla)',
help='Content of the maintainer label'),
cfg.ListOpt('rpm_setup_config', default=[DELOREAN, DELOREAN_DEPS],
help=('Comma separated list of .rpm or .repo file(s) '
'or URL(s) to install before building containers')),
cfg.StrOpt('apt_sources_list', help=('Path to custom sources.list')),
cfg.StrOpt('apt_preferences', help=('Path to custom apt/preferences')),
cfg.BoolOpt('squash-cleanup', default=True,
help='Remove source image from Docker after squashing'),
cfg.StrOpt('squash-tmp-dir',
help='Temporary directory to be used during squashing')
]
SOURCES = {
'openstack-base': {
'type': 'url',
'location': ('$tarballs_base/requirements/'
'requirements-master.tar.gz')},
'almanach-base': {
'type': 'url',
'location': ('$tarballs_base/almanach/'
'almanach-master.tar.gz')},
'aodh-base': {
'type': 'url',
'location': ('$tarballs_base/aodh/'
'aodh-master.tar.gz')},
'barbican-base': {
'type': 'url',
'location': ('$tarballs_base/barbican/'
'barbican-master.tar.gz')},
'bifrost-base': {
'type': 'url',
'location': ('$tarballs_base/bifrost/'
'bifrost-master.tar.gz')},
'blazar-base': {
'type': 'url',
'location': ('$tarballs_base/blazar/'
'blazar-master.tar.gz')},
'ceilometer-base': {
'type': 'url',
'location': ('$tarballs_base/ceilometer/'
'ceilometer-master.tar.gz')},
'ceilometer-base-plugin-panko': {
'type': 'url',
'location': ('$tarballs_base/panko/'
'panko-master.tar.gz')},
'cinder-base': {
'type': 'url',
'location': ('$tarballs_base/cinder/'
'cinder-master.tar.gz')},
'congress-base': {
'type': 'url',
'location': ('$tarballs_base/congress/'
'congress-master.tar.gz')},
'cloudkitty-base': {
'type': 'url',
'location': ('$tarballs_base/cloudkitty/'
'cloudkitty-master.tar.gz')},
'crane': {
'type': 'git',
'reference': 'master',
'location': ('https://github.com/pulp/crane.git')},
'designate-base': {
'type': 'url',
'location': ('$tarballs_base/designate/'
'designate-master.tar.gz')},
'dragonflow-base': {
'type': 'url',
'location': ('$tarballs_base/dragonflow/'
'dragonflow-master.tar.gz')},
'ec2-api': {
'type': 'url',
'location': ('$tarballs_base/ec2-api/'
'ec2-api-master.tar.gz')},
'freezer-api': {
'type': 'url',
'location': ('$tarballs_base/freezer-api/'
'freezer-api-master.tar.gz')},
'freezer-base': {
'type': 'url',
'location': ('$tarballs_base/freezer/'
'freezer-master.tar.gz')},
'glance-base': {
'type': 'url',
'location': ('$tarballs_base/glance/'
'glance-master.tar.gz')},
'gnocchi-base': {
'type': 'git',
'reference': 'master',
'location': ('https://github.com/gnocchixyz/'
'gnocchi.git')},
'heat-base': {
'type': 'url',
'location': ('$tarballs_base/heat/'
'heat-master.tar.gz')},
'horizon': {
'type': 'url',
'location': ('$tarballs_base/horizon/'
'horizon-master.tar.gz')},
'horizon-plugin-blazar-dashboard': {
'type': 'url',
'location': ('$tarballs_base/blazar-dashboard/'
'blazar-dashboard-master.tar.gz')},
'horizon-plugin-congress-dashboard': {
'type': 'url',
'location': ('$tarballs_base/congress-dashboard/'
'congress-dashboard-master.tar.gz')},
'horizon-plugin-cloudkitty-dashboard': {
'type': 'url',
'location': ('$tarballs_base/cloudkitty-dashboard/'
'cloudkitty-dashboard-master.tar.gz')},
'horizon-plugin-designate-dashboard': {
'type': 'url',
'location': ('$tarballs_base/designate-dashboard/'
'designate-dashboard-master.tar.gz')},
'horizon-plugin-fwaas-dashboard': {
'type': 'url',
'location': ('$tarballs_base/neutron-fwaas-dashboard/'
'neutron-fwaas-dashboard-master.tar.gz')},
'horizon-plugin-freezer-web-ui': {
'type': 'url',
'location': ('$tarballs_base/freezer-web-ui/'
'freezer-web-ui-master.tar.gz')},
'horizon-plugin-heat-dashboard': {
'type': 'url',
'location': ('$tarballs_base/heat-dashboard/'
'heat-dashboard-master.tar.gz')},
'horizon-plugin-ironic-ui': {
'type': 'url',
'location': ('$tarballs_base/ironic-ui/'
'ironic-ui-master.tar.gz')},
'horizon-plugin-karbor-dashboard': {
'type': 'url',
'location': ('$tarballs_base/karbor-dashboard/'
'karbor-dashboard-master.tar.gz')},
'horizon-plugin-magnum-ui': {
'type': 'url',
'location': ('$tarballs_base/magnum-ui/'
'magnum-ui-master.tar.gz')},
'horizon-plugin-manila-ui': {
'type': 'url',
'location': ('$tarballs_base/manila-ui/'
'manila-ui-master.tar.gz')},
'horizon-plugin-mistral-dashboard': {
'type': 'url',
'location': ('$tarballs_base/mistral-dashboard/'
'mistral-dashboard-master.tar.gz')},
'horizon-plugin-monasca-ui': {
'type': 'url',
'location': ('$tarballs_base/monasca-ui/'
'monasca-ui-master.tar.gz')},
'horizon-plugin-murano-dashboard': {
'type': 'url',
'location': ('$tarballs_base/murano-dashboard/'
'murano-dashboard-master.tar.gz')},
'horizon-plugin-neutron-lbaas-dashboard': {
'type': 'url',
'location': ('$tarballs_base/neutron-lbaas-dashboard/'
'neutron-lbaas-dashboard-master.tar.gz')},
'horizon-plugin-neutron-vpnaas-dashboard': {
'type': 'url',
'location': ('$tarballs_base/neutron-vpnaas-dashboard/'
'neutron-vpnaas-dashboard-master.tar.gz')},
'horizon-plugin-octavia-dashboard': {
'type': 'url',
'location': ('$tarballs_base/octavia-dashboard/'
'octavia-dashboard-master.tar.gz')},
'horizon-plugin-sahara-dashboard': {
'type': 'url',
'location': ('$tarballs_base/sahara-dashboard/'
'sahara-dashboard-master.tar.gz')},
'horizon-plugin-searchlight-ui': {
'type': 'url',
'location': ('$tarballs_base/searchlight-ui/'
'searchlight-ui-master.tar.gz')},
'horizon-plugin-senlin-dashboard': {
'type': 'url',
'location': ('$tarballs_base/senlin-dashboard/'
'senlin-dashboard-master.tar.gz')},
'horizon-plugin-solum-dashboard': {
'type': 'url',
'location': ('$tarballs_base/solum-dashboard/'
'solum-dashboard-master.tar.gz')},
'horizon-plugin-tacker-dashboard': {
'type': 'url',
'location': ('$tarballs_base/tacker-horizon/'
'tacker-horizon-master.tar.gz')},
'horizon-plugin-trove-dashboard': {
'type': 'url',
'location': ('$tarballs_base/trove-dashboard/'
'trove-dashboard-master.tar.gz')},
'horizon-plugin-vitrage-dashboard': {
'type': 'url',
'location': ('$tarballs_base/vitrage-dashboard/'
'vitrage-dashboard-master.tar.gz')},
'horizon-plugin-watcher-dashboard': {
'type': 'url',
'location': ('$tarballs_base/watcher-dashboard/'
'watcher-dashboard-master.tar.gz')},
'horizon-plugin-zaqar-ui': {
'type': 'url',
'location': ('$tarballs_base/zaqar-ui/'
'zaqar-ui-master.tar.gz')},
'horizon-plugin-zun-ui': {
'type': 'url',
'location': ('$tarballs_base/zun-ui/'
'zun-ui-master.tar.gz')},
'ironic-base': {
'type': 'url',
'location': ('$tarballs_base/ironic/'
'ironic-master.tar.gz')},
'ironic-inspector': {
'type': 'url',
'location': ('$tarballs_base/ironic-inspector/'
'ironic-inspector-master.tar.gz')},
'karbor-base': {
'type': 'url',
'location': ('$tarballs_base/karbor/'
'karbor-master.tar.gz')},
'keystone-base': {
'type': 'url',
'location': ('$tarballs_base/keystone/'
'keystone-master.tar.gz')},
'kuryr-base': {
'type': 'url',
'location': ('$tarballs_base/kuryr/'
'kuryr-master.tar.gz')},
'kuryr-libnetwork': {
'type': 'url',
'location': ('$tarballs_base/kuryr-libnetwork/'
'kuryr-libnetwork-master.tar.gz')},
'magnum-base': {
'type': 'url',
'location': ('$tarballs_base/magnum/'
'magnum-master.tar.gz')},
'manila-base': {
'type': 'url',
'location': ('$tarballs_base/manila/'
'manila-master.tar.gz')},
'mistral-base': {
'type': 'url',
'location': ('$tarballs_base/mistral/'
'mistral-master.tar.gz')},
'mistral-base-plugin-tacker': {
'type': 'url',
'location': ('$tarballs_base/tacker/'
'tacker-master.tar.gz')},
'monasca-agent': {
'type': 'url',
'location': ('$tarballs_base/monasca-agent/'
'monasca-agent-master.tar.gz')},
'monasca-api': {
'type': 'url',
'location': ('$tarballs_base/monasca-api/'
'monasca-api-master.tar.gz')},
'monasca-log-api': {
'type': 'url',
'location': ('$tarballs_base/monasca-log-api/'
'monasca-log-api-master.tar.gz')},
'monasca-notification': {
'type': 'url',
'location': ('$tarballs_base/monasca-notification/'
'monasca-notification-master.tar.gz')},
'monasca-persister': {
'type': 'url',
'location': ('$tarballs_base/monasca-persister/'
'monasca-persister-master.tar.gz')},
'monasca-statsd': {
'type': 'url',
'location': ('$tarballs_base/monasca-statsd/'
'monasca-statsd-master.tar.gz')},
# FIXME(dszumski): Use openstack tar when infra is fixed
'monasca-thresh': {
'type': 'url',
'location': ('https://github.com/openstack/monasca-thresh/archive/'
'master.tar.gz')},
'monasca-thresh-additions-monasca-common': {
'type': 'url',
'location': ('$tarballs_base/monasca-common/'
'monasca-common-master.tar.gz')},
'murano-base': {
'type': 'url',
'location': ('$tarballs_base/murano/'
'murano-master.tar.gz')},
'neutron-base': {
'type': 'url',
'location': ('$tarballs_base/neutron/'
'neutron-master.tar.gz')},
'neutron-base-plugin-neutron-fwaas': {
'type': 'url',
'location': ('$tarballs_base/neutron-fwaas/'
'neutron-fwaas-master.tar.gz')},
'neutron-base-plugin-networking-ansible': {
'type': 'url',
'location': ('$tarballs_base/networking-ansible/'
'networking-ansible-master.tar.gz')},
'neutron-base-plugin-networking-baremetal': {
'type': 'url',
'location': ('$tarballs_base/networking-baremetal/'
'networking-baremetal-master.tar.gz')},
'neutron-base-plugin-networking-generic-switch': {
'type': 'url',
'location': ('$tarballs_base/networking-generic-switch/'
'networking-generic-switch-master.tar.gz')},
'neutron-base-plugin-networking-sfc': {
'type': 'url',
'location': ('$tarballs_base/networking-sfc/'
'networking-sfc-master.tar.gz')},
'neutron-base-plugin-vmware-nsx': {
'type': 'url',
'location': ('$tarballs_base/vmware-nsx/'
'vmware-nsx-master.tar.gz')},
'neutron-base-plugin-vpnaas-agent': {
'type': 'url',
'location': ('$tarballs_base/neutron-vpnaas/'
'neutron-vpnaas-master.tar.gz')},
'neutron-bgp-dragent': {
'type': 'url',
'location': ('$tarballs_base/neutron-dynamic-routing/'
'neutron-dynamic-routing-master.tar.gz')},
'neutron-lbaas-agent': {
'type': 'url',
'location': ('$tarballs_base/neutron-lbaas/'
'neutron-lbaas-master.tar.gz')},
'neutron-server-opendaylight-plugin-networking-odl': {
'type': 'url',
'location': ('$tarballs_base/networking-odl/'
'networking-odl-master.tar.gz')},
'neutron-server-opendaylight-plugin-networking-bgpvpn': {
'type': 'url',
'location': ('$tarballs_base/networking-bgpvpn/'
'networking-bgpvpn-master.tar.gz')},
'neutron-server-opendaylight-plugin-networking-l2gw': {
'type': 'url',
'location': ('$tarballs_base/networking-l2gw/'
'networking-l2gw-master.tar.gz')},
'neutron-server-opendaylight-plugin-networking-sfc': {
'type': 'url',
'location': ('$tarballs_base/networking-sfc/'
'networking-sfc-master.tar.gz')},
'neutron-server-plugin-networking-infoblox': {
'type': 'url',
'location': ('$tarballs_base/networking-infoblox/'
'networking-infoblox-master.tar.gz')},
'neutron-server-plugin-neutron-dynamic-routing': {
'type': 'url',
'location': ('$tarballs_base/neutron-dynamic-routing/'
'neutron-dynamic-routing-master.tar.gz')},
'neutron-server-plugin-neutron-lbaas': {
'type': 'url',
'location': ('$tarballs_base/neutron-lbaas/'
'neutron-lbaas-master.tar.gz')},
'neutron-server-plugin-vmware-nsxlib': {
'type': 'url',
'location': ('$tarballs_base/vmware-nsxlib/'
'vmware-nsxlib-master.tar.gz')},
'neutron-vpnaas-agent': {
'type': 'url',
'location': ('$tarballs_base/neutron-vpnaas/'
'neutron-vpnaas-master.tar.gz')},
'neutron-server-ovn-plugin-networking-ovn': {
'type': 'url',
'location': ('$tarballs_base/networking-ovn/'
'networking-ovn-master.tar.gz')},
'neutron-metadata-agent-ovn-plugin-networking-ovn': {
'type': 'url',
'location': ('$tarballs_base/networking-ovn/'
'networking-ovn-master.tar.gz')},
'nova-base': {
'type': 'url',
'location': ('$tarballs_base/nova/'
'nova-master.tar.gz')},
'nova-base-plugin-blazar': {
'type': 'url',
'location': ('$tarballs_base/blazar-nova/'
'blazar-nova-master.tar.gz')},
'nova-base-plugin-mksproxy': {
'type': 'url',
'location': ('$tarballs_base/nova-mksproxy/'
'nova-mksproxy-master.tar.gz')},
'novajoin-base': {
'type': 'url',
'location': ('$tarballs_base/novajoin/'
'novajoin-master.tar.gz')},
'octavia-base': {
'type': 'url',
'location': ('$tarballs_base/octavia/'
'octavia-master.tar.gz')},
'panko-base': {
'type': 'url',
'location': ('$tarballs_base/panko/'
'panko-master.tar.gz')},
'tempest-plugin-tempest-conf': {
'type': 'url',
'location': ('$tarballs_base/python-tempestconf/'
'python-tempestconf-master.tar.gz')},
'tempest-plugin-barbican': {
'type': 'url',
'location': ('$tarballs_base/barbican-tempest-plugin/'
'barbican-tempest-plugin-master.tar.gz')},
'tempest-plugin-blazar': {
'type': 'url',
'location': ('$tarballs_base/blazar-tempest-plugin/'
'blazar-tempest-plugin-master.tar.gz')},
'tempest-plugin-cinder': {
'type': 'url',
'location': ('$tarballs_base/cinder-tempest-plugin/'
'cinder-tempest-plugin-master.tar.gz')},
'tempest-plugin-congress': {
'type': 'url',
'location': ('$tarballs_base/congress-tempest-plugin/'
'congress-tempest-plugin-master.tar.gz')},
'tempest-plugin-ec2api': {
'type': 'url',
'location': ('$tarballs_base/ec2api-tempest-plugin/'
'ec2api-tempest-plugin-master.tar.gz')},
'tempest-plugin-heat': {
'type': 'url',
'location': ('$tarballs_base/heat-tempest-plugin/'
'heat-tempest-plugin-master.tar.gz')},
'tempest-plugin-ironic': {
'type': 'url',
'location': ('$tarballs_base/ironic-tempest-plugin/'
'ironic-tempest-plugin-master.tar.gz')},
'tempest-plugin-keystone': {
'type': 'url',
'location': ('$tarballs_base/keystone-tempest-plugin/'
'keystone-tempest-plugin-master.tar.gz')},
'tempest-plugin-magnum': {
'type': 'url',
'location': ('$tarballs_base/magnum-tempest-plugin/'
'magnum-tempest-plugin-master.tar.gz')},
'tempest-plugin-manila': {
'type': 'url',
'location': ('$tarballs_base/manila-tempest-plugin/'
'manila-tempest-plugin-master.tar.gz')},
'tempest-plugin-mistral': {
'type': 'url',
'location': ('$tarballs_base/mistral-tempest-plugin/'
'mistral-tempest-plugin-master.tar.gz')},
'tempest-plugin-monasca': {
'type': 'url',
'location': ('$tarballs_base/monasca-tempest-plugin/'
'monasca-tempest-plugin-master.tar.gz')},
'tempest-plugin-murano': {
'type': 'url',
'location': ('$tarballs_base/murano-tempest-plugin/'
'murano-tempest-plugin-master.tar.gz')},
'tempest-plugin-neutron': {
'type': 'url',
'location': ('$tarballs_base/neutron-tempest-plugin/'
'neutron-tempest-plugin-master.tar.gz')},
'tempest-plugin-patrole': {
'type': 'url',
'location': ('$tarballs_base/patrole/'
'patrole-master.tar.gz')},
'tempest-plugin-telemetry': {
'type': 'url',
'location': ('$tarballs_base/telemetry-tempest-plugin/'
'telemetry-tempest-plugin-master.tar.gz')},
'tempest-plugin-tripleo-common': {
'type': 'url',
'location': ('$tarballs_base/tripleo-common-tempest-plugin/'
'tripleo-common-tempest-plugin-master.tar.gz')},
'tempest-plugin-trove': {
'type': 'url',
'location': ('$tarballs_base/trove-tempest-plugin/'
'trove-tempest-plugin-master.tar.gz')},
'tempest-plugin-vitrage': {
'type': 'url',
'location': ('$tarballs_base/vitrage-tempest-plugin/'
'vitrage-tempest-plugin-master.tar.gz')},
'tempest-plugin-watcher': {
'type': 'url',
'location': ('$tarballs_base/watcher-tempest-plugin/'
'watcher-tempest-plugin-master.tar.gz')},
'tempest-plugin-zaqar': {
'type': 'url',
'location': ('$tarballs_base/zaqar-tempest-plugin/'
'zaqar-tempest-plugin-master.tar.gz')},
'rally': {
'type': 'url',
'location': ('$tarballs_base/rally/'
'rally-master.tar.gz')},
'sahara-base': {
'type': 'url',
'location': ('$tarballs_base/sahara/'
'sahara-master.tar.gz')},
'searchlight-base': {
'type': 'url',
'location': ('$tarballs_base/searchlight/'
'searchlight-master.tar.gz')},
'senlin-base': {
'type': 'url',
'location': ('$tarballs_base/senlin/'
'senlin-master.tar.gz')},
'solum-base': {
'type': 'url',
'location': ('$tarballs_base/solum/'
'solum-master.tar.gz')},
'swift-base': {
'type': 'url',
'location': ('$tarballs_base/swift/'
'swift-master.tar.gz')},
'tacker-base': {
'type': 'url',
'location': ('$tarballs_base/tacker/'
'tacker-master.tar.gz')},
'tacker-base-plugin-networking-sfc': {
'type': 'url',
'location': ('$tarballs_base/networking-sfc/'
'networking-sfc-master.tar.gz')},
'tempest': {
'type': 'url',
'location': ('$tarballs_base/tempest/'
'tempest-master.tar.gz')},
'tripleoclient': {
'type': 'url',
'location': ('$tarballs_base/python-tripleoclient/'
'tripleoclient-master.tar.gz')},
'tripleo-ui': {
'type': 'url',
'location': ('$tarballs_base/tripleo-ui/'
'tripleo-ui-latest.tar.gz')},
'trove-base': {
'type': 'url',
'location': ('$tarballs_base/trove/'
'trove-master.tar.gz')},
'vitrage-base': {
'type': 'url',
'location': ('$tarballs_base/vitrage/'
'vitrage-master.tar.gz')},
'vmtp': {
'type': 'url',
'location': ('$tarballs_base/vmtp/'
'vmtp-master.tar.gz')},
'watcher-base': {
'type': 'url',
'location': ('$tarballs_base/watcher/'
'watcher-master.tar.gz')},
'zaqar': {
'type': 'url',
'location': ('$tarballs_base/zaqar/'
'zaqar-master.tar.gz')},
'zun-base': {
'type': 'url',
'location': ('$tarballs_base/zun/'
'zun-master.tar.gz')}
}
# NOTE(SamYaple): Only increment the UID. Never reuse old or removed UIDs.
# Starting point 42400+ was chosen arbitrarily to ensure no conflicts
USERS = {
'kolla-user': {
'uid': 42400,
'gid': 42400,
},
'ansible-user': {
'uid': 42401,
'gid': 42401,
},
'aodh-user': {
'uid': 42402,
'gid': 42402,
},
'barbican-user': {
'uid': 42403,
'gid': 42403,
},
'bifrost-user': {
'uid': 42404,
'gid': 42404,
},
'ceilometer-user': {
'uid': 42405,
'gid': 42405,
},
'chrony-user': {
'uid': 42406,
'gid': 42406,
},
'cinder-user': {
'uid': 42407,
'gid': 42407,
},
'cloudkitty-user': {
'uid': 42408,
'gid': 42408,
},
'collectd-user': {
'uid': 42409,
'gid': 42409,
},
'congress-user': {
'uid': 42410,
'gid': 42410,
},
'designate-user': {
'uid': 42411,
'gid': 42411,
},
'elasticsearch-user': {
'uid': 42412,
'gid': 42412,
},
'etcd-user': {
'uid': 42413,
'gid': 42413,
},
'freezer-user': {
'uid': 42414,
'gid': 42414,
},
'glance-user': {
'uid': 42415,
'gid': 42415,
},
'gnocchi-user': {
'uid': 42416,
'gid': 42416,
},
'grafana-user': {
'uid': 42417,
'gid': 42417,
},
'heat-user': {
'uid': 42418,
'gid': 42418,
},
'horizon-user': {
'uid': 42420,
'gid': 42420,
},
'influxdb-user': {
'uid': 42421,
'gid': 42421,
},
'ironic-user': {
'uid': 42422,
'gid': 42422,
},
'kafka-user': {
'uid': 42423,
'gid': 42423,
},
'keystone-user': {
'uid': 42425,
'gid': 42425,
},
'kibana-user': {
'uid': 42426,
'gid': 42426,
},
'qemu-user': {
'uid': 42427,
'gid': 42427,
},
'magnum-user': {
'uid': 42428,
'gid': 42428,
},
'manila-user': {
'uid': 42429,
'gid': 42429,
},
'mistral-user': {
'uid': 42430,
'gid': 42430,
},
'monasca-user': {
'uid': 42431,
'gid': 42431,
},
'mongodb-user': {
'uid': 42432,
'gid': 65534,
},
'murano-user': {
'uid': 42433,
'gid': 42433,
},
'mysql-user': {
'uid': 42434,
'gid': 42434,
},
'neutron-user': {
'uid': 42435,
'gid': 42435,
},
'nova-user': {
'uid': 42436,
'gid': 42436,
},
'octavia-user': {
'uid': 42437,
'gid': 42437,
},
'panko-user': {
'uid': 42438,
'gid': 42438,
},
'rabbitmq-user': {
'uid': 42439,
'gid': 42439,
},
'rally-user': {
'uid': 42440,
'gid': 42440,
},
'sahara-user': {
'uid': 42441,
'gid': 42441,
},
'searchlight-user': {
'uid': 42442,
'gid': 42442,
},
'senlin-user': {
'uid': 42443,
'gid': 42443,
},
'solum-user': {
'uid': 42444,
'gid': 42444,
},
'swift-user': {
'uid': 42445,
'gid': 42445,
},
'tacker-user': {
'uid': 42446,
'gid': 42446,
},
'td-agent-user': {
'uid': 42447,
'gid': 42447,
},
'telegraf-user': {
'uid': 42448,
'gid': 42448,
},
'trove-user': {
'uid': 42449,
'gid': 42449,
},
'vmtp-user': {
'uid': 42450,
'gid': 42450,
},
'watcher-user': {
'uid': 42451,
'gid': 42451,
},
'zaqar-user': {
'uid': 42452,
'gid': 42452,
},
'zookeeper-user': {
'uid': 42453,
'gid': 42453,
},
'haproxy-user': {
'uid': 42454,
'gid': 42454,
},
'ceph-user': {
'uid': 64045,
'gid': 64045,
},
'memcached-user': {
'uid': 42457,
'gid': 42457,
},
'karbor-user': {
'uid': 42458,
'gid': 42458,
},
'vitrage-user': {
'uid': 42459,
'gid': 42459,
},
'redis-user': {
'uid': 42460,
'gid': 42460,
},
'ironic-inspector-user': {
'uid': 42461,
'gid': 42461,
},
'odl-user': {
'uid': 42462,
'gid': 42462,
},
'zun-user': {
'uid': 42463,
'gid': 42463,
},
'dragonflow-user': {
'uid': 42464,
'gid': 42464,
},
'qdrouterd-user': {
'uid': 42465,
'gid': 42465,
},
'ec2api-user': {
'uid': 42466,
'gid': 42466,
},
'sensu-user': {
'uid': 42467,
'gid': 42467,
},
'skydive-user': {
'uid': 42468,
'gid': 42468,
},
'kuryr-user': {
'uid': 42469,
'gid': 42469,
},
'novajoin-user': {
'uid': 42470,
'gid': 42470,
},
'blazar-user': {
'uid': 42471,
'gid': 42471,
},
'prometheus-user': {
'uid': 42472,
'gid': 42472,
},
'libvirt-user': {
'uid': 42473, # unused user, but we need the group for socket access
'gid': 42473,
},
'fluentd-user': {
'uid': 42474,
'gid': 42474,
},
'almanach-user': {
'uid': 42475,
'gid': 42475,
},
'openvswitch-user': {
'uid': 42476, # unused user
'gid': 42476,
},
'hugetlbfs-user': {
'uid': 42477, # unused user, but we need the group for vhost socket
'gid': 42477,
},
'logstash-user': {
'uid': 42478,
'gid': 42478,
},
'storm-user': {
'uid': 42479,
'gid': 42479,
},
'tempest-user': {
'uid': 42480,
'gid': 42480,
}
}
def get_source_opts(type_=None, location=None, reference=None):
return [cfg.StrOpt('type', choices=['local', 'git', 'url'],
default=type_,
help='Source location type'),
cfg.StrOpt('location', default=location,
help='The location for source install'),
cfg.StrOpt('reference', default=reference,
help=('Git reference to pull, commit sha, tag '
'or branch name'))]
def get_user_opts(uid, gid):
return [
cfg.IntOpt('uid', default=uid, help='The user id'),
cfg.IntOpt('gid', default=gid, help='The group id'),
]
def gen_all_user_opts():
for name, params in USERS.items():
uid = params['uid']
gid = params['gid']
yield name, get_user_opts(uid, gid)
def gen_all_source_opts():
for name, params in SOURCES.items():
type_ = params['type']
location = params['location']
reference = params.get('reference')
yield name, get_source_opts(type_, location, reference)
def list_opts():
return itertools.chain([(None, _CLI_OPTS),
(None, _BASE_OPTS),
('profiles', _PROFILE_OPTS)],
gen_all_source_opts(),
gen_all_user_opts(),
)
def parse(conf, args, usage=None, prog=None,
default_config_files=None):
conf.register_cli_opts(_CLI_OPTS)
conf.register_opts(_BASE_OPTS)
conf.register_opts(_PROFILE_OPTS, group='profiles')
for name, opts in gen_all_source_opts():
conf.register_opts(opts, name)
for name, opts in gen_all_user_opts():
conf.register_opts(opts, name)
conf(args=args,
project='kolla',
usage=usage,
prog=prog,
version=version.cached_version_string(),
default_config_files=default_config_files)
# NOTE(jeffrey4l): set the default base tag based on the
# base option
conf.set_default('base_tag', DEFAULT_BASE_TAGS.get(conf.base))
if not conf.base_image:
conf.base_image = conf.base
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import unittest, time
from rapidsms.backends.backend import Backend
from rapidsms.message import Message
from harness import MockRouter
class TestBackend(unittest.TestCase):
def setUp (self):
self.router = MockRouter()
self.backend = Backend(self.router)
self.router.add_backend(self.backend)
def test__properties (self):
self.assertEquals(self.backend.title, "backend")
self.assertEquals(self.backend.router, self.router)
self.assertFalse(self.backend.running)
def test_start_stop (self):
self.router.start()
time.sleep(0.5)
self.assertTrue(self.backend.running, "backend starts when router starts")
self.router.stop()
time.sleep(2.5)
self.assertFalse(self.backend.running, "backend stops when router stops")
def test_message (self):
msg = self.backend.message("0000", "Good morning!")
self.assertEquals(type(msg), Message, "message() returns a message")
def test_route (self):
msg = self.backend.message("0000", "Good morning!")
self.backend.route(msg)
self.assertTrue(self.router.message_waiting, "backend sends to router")
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.