max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
fortnitepy/ext/commands/bot.py | gfdb/fortnitepy | 127 | 5159 | <filename>fortnitepy/ext/commands/bot.py<gh_stars>100-1000
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import logging
import inspect
import asyncio
import types
import sys
import importlib
import collections
import traceback
from typing import Any, List, Optional, Mapping, Set
from fortnitepy.client import Client
from fortnitepy.auth import Auth
from fortnitepy.typedefs import MaybeCoro, ListOrTuple
from ._types import _BaseCommand
from .errors import (ExtensionFailed, ExtensionMissingEntryPoint,
ExtensionNotLoaded, ExtensionAlreadyLoaded,
ExtensionNotFound, CheckFailure, CommandError,
CommandNotFound)
from .core import GroupMixin
from .cog import Cog
from .view import StringView
from .context import Context
from .help import HelpCommand, FortniteHelpCommand
from .typedefs import Message
log = logging.getLogger(__name__)
def _is_submodule(parent: str, child: str) -> bool:
return parent == child or child.startswith(parent + ".")
class _DefaultRepr:
def __repr__(self) -> str:
return '<default-help-command>'
_default = _DefaultRepr()
class Bot(GroupMixin, Client):
"""Represents a fortnite bot.
This class is a subclass of :class:`fortnitepy.Client` and as a result
anything that you can do with a :class:`fortnitepy.Client` you can do with
this bot.
This class also subclasses :class:`.GroupMixin` to provide the
functionality to manage commands.
Attributes
-----------
command_prefix
The command prefix is what the message content must contain initially
to have a command invoked. This prefix could either be a string to
indicate what the prefix should be, or a callable that takes in the bot
as its first parameter and :class:`fortnitepy.FriendMessage` or
:class:`fortnitepy.PartyMessage` as its second parameter and returns
the prefix. This is to facilitate "dynamic" command prefixes. This
callable can be either a regular function or a coroutine.
An empty string as the prefix always matches, enabling prefix-less
command invocation.
The command prefix could also be an iterable of strings indicating that
multiple checks for the prefix should be used and the first one to
match will be the invocation prefix. You can get this prefix via
:attr:`.Context.prefix`. To avoid confusion empty iterables are not
allowed.
.. note::
When passing multiple prefixes be careful to not pass a prefix
that matches a longer prefix occurring later in the sequence. For
example, if the command prefix is ``('!', '!?')`` the ``'!?'``
prefix will never be matched to any message as the previous one
matches messages starting with ``!?``. This is especially important
when passing an empty string, it should always be last as no prefix
after it will be matched.
case_insensitive: :class:`bool`
Whether the commands should be case insensitive. Defaults to ``False``.
This attribute does not carry over to groups. You must set it to every
group if you require group commands to be case insensitive as well.
description: :class:`str`
The content prefixed into the default help message.
help_command: Optional[:class:`.HelpCommand`]
The help command implementation to use. This can be dynamically
set at runtime. To remove the help command pass ``None``. For more
information on implementing a help command, see
:ref:`ext_commands_help_command`.
owner_id: Optional[:class:`int`]
The user ID that owns the bot. This is used by :meth:`.is_owner()`
and checks that call this method.
owner_ids: Optional[Collection[:class:`int`]]
The user IDs that owns the bot. This is similar to `owner_id`.
For performance reasons it is recommended to use a :class:`set`
for the collection. You cannot set both `owner_id` and `owner_ids`.
This is used by :meth:`.is_owner()` and checks that call this method.
"""
def __init__(self, command_prefix: Any, auth: Auth, *,
help_command: Optional[HelpCommand] = _default,
description: Optional[str] = None,
**kwargs: Any) -> None:
kwargs['case_insensitive'] = kwargs.get('case_insensitive', False)
super().__init__(auth, **kwargs)
self.command_prefix = command_prefix
self.description = inspect.cleandoc(description) if description else ''
self.owner_id = kwargs.get('owner_id')
self.owner_ids = kwargs.get('owner_ids', set())
if self.owner_id and self.owner_ids:
raise TypeError('Both owner_id and owner_ids are set.')
if (self.owner_ids and not isinstance(self.owner_ids,
collections.abc.Collection)):
raise TypeError(
'owner_ids must be a collection not '
'{0.__class__!r}'.format(self.owner_ids)
)
self.__cogs = {}
self.__extensions = {}
self._checks = []
self._check_once = []
self._help_command = None
self._before_invoke = None
self._after_invoke = None
if help_command is _default:
self.help_command = FortniteHelpCommand()
else:
self.help_command = help_command
self.add_event_handler('friend_message', self.process_commands)
self.add_event_handler('party_message', self.process_commands)
def register_methods(self) -> None:
for _, obj in inspect.getmembers(self):
if isinstance(obj, _BaseCommand):
obj.instance = self
if obj.parent is None:
try:
self.add_command(obj)
except CommandError:
traceback.print_exc()
continue
super().register_methods()
async def close(self, *,
close_http: bool = True,
dispatch_close: bool = True) -> None:
if dispatch_close:
await asyncio.gather(
self.dispatch_and_wait_event('before_close'),
self.dispatch_and_wait_event('close'),
)
for extension in tuple(self.__extensions):
try:
self.unload_extension(extension)
except Exception:
pass
for cog in tuple(self.__cogs):
try:
self.remove_cog(cog)
except Exception:
pass
await self._close(
close_http=close_http,
dispatch_close=dispatch_close
)
def check(self, func: MaybeCoro) -> MaybeCoro:
r"""A decorator that adds a check globally to every command.
.. note::
This function can either be a regular function or a coroutine.
This function takes a single parameter, :class:`.Context`, and can
only raise exceptions inherited from :exc:`.CommandError`.
Example
-------
.. code-block:: python3
@bot.check
def global_check(ctx):
# Allows only party commands.
return ctx.party is not None
"""
self.add_check(func)
return func
def add_check(self, func: MaybeCoro, *,
call_once: bool = False) -> None:
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`.check`
and :meth:`.check_once`.
Parameters
----------
func
The function that was used as a global check.
call_once: :class:`bool`
If the function should only be called once per
:meth:`Command.invoke` call.
"""
if call_once:
self._check_once.append(func)
else:
self._checks.append(func)
def remove_check(self, func: MaybeCoro, *,
call_once: bool = False) -> None:
"""Removes a global check from the bot.
Parameters
----------
func
The function to remove from the global checks.
call_once: :class:`bool`
If the function was added with ``call_once=True`` in
the :meth:`.Bot.add_check` call or using :meth:`.check_once`.
"""
list_ = self._check_once if call_once else self._checks
try:
list_.remove(func)
except ValueError:
pass
def check_once(self, func: MaybeCoro) -> MaybeCoro:
r"""A decorator that adds a "call once" global check to the bot.
Unlike regular global checks, this one is called only once
per :meth:`Command.invoke` call.
Regular global checks are called whenever a command is called
or :meth:`.Command.can_run` is called. This type of check
bypasses that and ensures that it's called only once, even inside
the default help command.
.. note::
This function can either be a regular function or a coroutine.
This function takes a single parameter, :class:`.Context`, and can
only raise exceptions inherited from :exc:`.CommandError`.
Example
-------
.. code-block:: python3
@bot.check_once
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func, call_once=True)
return func
async def can_run(self, ctx: Context, *,
call_once: bool = False) -> bool:
data = self._check_once if call_once else self._checks
if len(data) == 0:
return True
for func in data:
if asyncio.iscoroutinefunction(func):
res = await func(ctx)
else:
res = func(ctx)
if not res:
return False
return True
async def is_owner(self, user_id: str) -> bool:
"""|coro|
Checks if a user id is the owner of the bot.
Parameters
----------
user_id: :class:`str`
The user id to check for.
Returns
-------
:class:`bool`
Whether the user is the owner.
"""
if self.owner_id:
return user_id == self.owner_id
else:
return user_id in self.owner_ids
def before_invoke(self, coro: MaybeCoro) -> MaybeCoro:
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke`
hooks are only called if all checks and argument parsing
procedures pass without error. If any check or argument parsing
procedures fail then the hooks are not called.
Parameters
----------
coro
The coroutine to register as the pre-invoke hook.
Raises
------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The pre-invoke hook must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro: MaybeCoro) -> MaybeCoro:
r"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
Similar to :meth:`~.Bot.before_invoke`\, this is not called unless
checks and argument parsing procedures succeed. This hook is,
however, **always** called regardless of the internal command
callback raising an error (i.e. :exc:`.CommandInvokeError`\).
This makes it ideal for clean-up scenarios.
Parameters
----------
coro:
The coroutine to register as the post-invoke hook.
Raises
------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The post-invoke hook must be a coroutine.')
self._after_invoke = coro
return coro
def add_cog(self, cog: Cog) -> None:
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
Parameters
----------
cog: :class:`.Cog`
The cog to register to the bot.
Raises
------
TypeError
The cog does not inherit from :class:`.Cog`.
CommandError
An error happened during loading.
"""
if not isinstance(cog, Cog):
raise TypeError('Cogs must derive from Cog.')
cog = cog._inject(self)
self.__cogs[cog.__cog_name__] = cog
def remove_cog(self, name: str) -> None:
"""Removes a cog from the bot.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then this method has no effect.
Parameters
----------
name: :class:`str`
The name of the cog to remove.
"""
cog = self.__cogs.pop(name, None)
if cog is None:
return
help_command = self.help_command
if help_command and help_command.cog is cog:
help_command.cog = None
cog._eject(self)
def get_cog(self, name: str) -> Optional[Cog]:
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
Parameters
-----------
name: :class:`str`
The name of the cog you are requesting.
This is equivalent to the name passed via keyword
argument in class creation or the class name if unspecified.
"""
return self.__cogs.get(name)
@property
def cogs(self) -> Mapping[str, Cog]:
"""Mapping[:class:`str`, :class:`Cog`]: A read-only mapping of cog
name to cog.
"""
return types.MappingProxyType(self.__cogs)
def _remove_module_references(self, name: str) -> None:
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.__cogs.copy().items():
if _is_submodule(name, cog.__module__):
self.remove_cog(cogname)
# remove all the commands from the module
for cmd in self.all_commands.copy().values():
if cmd.module is not None and _is_submodule(name, cmd.module):
if isinstance(cmd, GroupMixin):
cmd.recursively_remove_all_commands()
self.remove_command(cmd.name)
# remove all the listeners from the module
for event_list in self._events.copy().values():
remove = []
for index, event in enumerate(event_list):
if (event.__module__ is not None
and _is_submodule(name, event.__module__)):
remove.append(index)
for index in reversed(remove):
del event_list[index]
def _call_module_finalizers(self, lib: object, key: str) -> None:
try:
func = getattr(lib, 'cog_teardown')
except AttributeError:
pass
else:
try:
func(self)
except Exception:
pass
finally:
self.__extensions.pop(key, None)
sys.modules.pop(key, None)
name = lib.__name__
for module in list(sys.modules.keys()):
if _is_submodule(name, module):
del sys.modules[module]
def _load_from_module_spec(self, spec: types.ModuleType,
key: str) -> None:
# precondition: key not in self.__extensions
lib = importlib.util.module_from_spec(spec)
sys.modules[key] = lib
try:
spec.loader.exec_module(lib)
except Exception as e:
del sys.modules[key]
raise ExtensionFailed(key, e) from e
try:
setup = getattr(lib, 'extension_setup')
except AttributeError:
del sys.modules[key]
raise ExtensionMissingEntryPoint(key)
try:
setup(self)
except Exception as e:
del sys.modules[key]
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, key)
raise ExtensionFailed(key, e) from e
else:
self.__extensions[key] = lib
def load_extension(self, name: str) -> None:
"""Loads an extension.
An extension is a python module that contains commands, cogs, or
listeners.
An extension must have a global function, ``extension_setup`` defined
as the entry point on what to do when the extension is loaded. This
entry point must have a single argument, the ``bot``.
Parameters
----------
name: :class:`str`
The extension name to load. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
------
ExtensionNotFound
The extension could not be imported.
ExtensionAlreadyLoaded
The extension is already loaded.
ExtensionMissingEntryPoint
The extension does not have a extension_setup function.
ExtensionFailed
The extension or its setup function had an execution error.
"""
if name in self.__extensions:
raise ExtensionAlreadyLoaded(name)
spec = importlib.util.find_spec(name)
if spec is None:
raise ExtensionNotFound(name)
self._load_from_module_spec(spec, name)
def unload_extension(self, name: str) -> None:
"""Unloads an extension.
When the extension is unloaded, all commands, listeners, and cogs are
removed from the bot and the module is un-imported.
The extension can provide an optional global function,
``cog_teardown``, to do miscellaneous clean-up if necessary. This
function takes a single parameter, the ``bot``, similar to
``extension_setup`` from :meth:`~.Bot.load_extension`.
Parameters
------------
name: :class:`str`
The extension name to unload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
"""
lib = self.__extensions.get(name)
if lib is None:
raise ExtensionNotLoaded(name)
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
def reload_extension(self, name: str) -> None:
"""Atomically reloads an extension.
This replaces the extension with the same extension, only refreshed.
This is equivalent to a :meth:`unload_extension` followed by
a :meth:`load_extension` except done in an atomic way. That is, if an
operation fails mid-reload then the bot will roll-back to the prior
working state.
Parameters
------------
name: :class:`str`
The extension name to reload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
ExtensionNotFound
The extension could not be imported.
ExtensionMissingEntryPoint
The extension does not have a extension_setup function.
ExtensionFailed
The extension setup function had an execution error.
"""
lib = self.__extensions.get(name)
if lib is None:
raise ExtensionNotLoaded(name)
# get the previous module states from sys modules
modules = {
name: module
for name, module in sys.modules.items()
if _is_submodule(lib.__name__, name)
}
try:
# Unload and then load the module...
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
self.load_extension(name)
except Exception:
# if the load failed, the remnants should have been
# cleaned from the load_extension function call
# so let's load it from our old compiled library.
lib.extension_setup(self)
self.__extensions[name] = lib
# revert sys.modules back to normal and raise back to caller
sys.modules.update(modules)
raise
@property
def extensions(self) -> Mapping[str, types.ModuleType]:
"""Mapping[:class:`str`, :class:`py:types.ModuleType`]: A read-only
mapping of extension name to extension.
"""
return types.MappingProxyType(self.__extensions)
@property
def help_command(self) -> Optional[HelpCommand]:
return self._help_command
@help_command.setter
def help_command(self, value: Optional[HelpCommand]) -> None:
if value is not None:
if not isinstance(value, HelpCommand):
raise TypeError('help_command must be a subclass '
'of HelpCommand')
if self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = value
value._add_to_bot(self)
elif self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = None
else:
self._help_command = None
async def get_prefix(self, message: Message) -> Any:
"""|coro|
Retrieves the prefix the bot is listening to with the message as
a context.
Parameters
----------
message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`]
The message context to get the prefix of.
Returns
--------
Union[List[:class:`str`], :class:`str`]
A list of prefixes or a single prefix that the bot is
listening for.
""" # noqa
prefix = ret = self.command_prefix
if callable(prefix):
if asyncio.iscoroutinefunction(prefix):
ret = await prefix(self, message)
else:
ret = prefix(self, message)
if not isinstance(ret, str):
try:
ret = list(ret)
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.abc.Iterable):
raise
raise TypeError('command_prefix must be plain string, '
'iterable of strings, or callable '
'returning either of these, not '
'{}'.format(ret.__class__.__name__))
if not ret:
raise ValueError('Iterable command_prefix must contain at '
'least one prefix')
return ret
async def get_context(self, message: Message, *,
cls: Context = Context) -> Context:
r"""|coro|
Returns the invocation context from the message.
This is a more low-level counter-part for :meth:`.process_commands`
to allow users more fine grained control over the processing.
The returned context is not guaranteed to be a valid invocation
context, :attr:`.Context.valid` must be checked to make sure it is.
If the context is not valid then it is not a valid candidate to be
invoked under :meth:`~.Bot.invoke`.
Parameters
----------
message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`]
The message to get the invocation context from.
cls
The factory class that will be used to create the context.
By default, this is :class:`.Context`. Should a custom
class be provided, it must be similar enough to :class:`.Context`\'s
interface.
Returns
-------
:class:`.Context`
The invocation context. The type of this can change via the
``cls`` parameter.
""" # noqa
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
prefix = await self.get_prefix(message)
invoked_prefix = prefix
if isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
try:
if message.content.startswith(tuple(prefix)):
for element in prefix:
if view.skip_string(element):
invoked_prefix = element
break
else:
invoked_prefix = None
else:
return ctx
except TypeError:
if not isinstance(prefix, list):
raise TypeError('get_prefix must return either a string '
'or a list of string, not '
'{}'.format(prefix.__class__.__name__))
for value in prefix:
if not isinstance(value, str):
raise TypeError('Iterable command_prefix or list '
'returned from get_prefix must '
'contain only strings, not '
'{}'.format(value.__class__.__name__))
raise
invoker = view.get_word()
ctx.invoked_with = invoker
ctx.prefix = invoked_prefix
ctx.command = self.all_commands.get(invoker)
return ctx
def _print_error(self, ctx: Context, error: Exception) -> None:
print(
'Ignoring exception in command {}:'.format(ctx.command),
file=sys.stderr
)
traceback.print_exception(
type(error),
error,
error.__traceback__,
file=sys.stderr
)
async def wait_for_futures(self, futures: ListOrTuple, *,
check: Optional[callable] = None,
timeout: Optional[int] = None,
cancel: bool = False) -> None:
def _cancel_futs(pending_futures: Set[asyncio.Future]) -> None:
for p in pending_futures:
if not p.cancelled():
p.cancel()
pending = futures
while pending:
done, pending = await asyncio.wait(
pending,
return_when=asyncio.FIRST_COMPLETED,
timeout=timeout
)
# Set should only contain one value
for future in done:
if check is None or check(future):
if cancel:
_cancel_futs(pending)
return future
async def _wait_for_error_return(self, futures: List[asyncio.Future],
ctx: Context,
error: Exception) -> None:
def check(future):
return future.result() is False
ret = await self.wait_for_futures(futures, check=check)
if isinstance(ret, asyncio.Future):
self._print_error(ctx, error)
def dispatch_error(self, ctx: Context, error: Exception) -> None:
if self._event_has_handler('command_error'):
futures = self.dispatch_event('command_error', ctx, error)
asyncio.ensure_future(self._wait_for_error_return(
futures,
ctx,
error
))
else:
self._print_error(ctx, error)
async def invoke(self, ctx: Context) -> None:
"""|coro|
Invokes the command given under the invocation context and
handles all the internal event dispatch mechanisms.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to invoke.
"""
if ctx.command is not None:
self.dispatch_event('command', ctx)
try:
if await self.can_run(ctx, call_once=True):
await ctx.command.invoke(ctx)
else:
raise CheckFailure('The global check once functions '
'failed.')
except CommandError as exc:
await ctx.command.dispatch_error(ctx, exc)
else:
self.dispatch_event('command_completion', ctx)
elif ctx.invoked_with:
exc = CommandNotFound('Command "{}" is not found'
''.format(ctx.invoked_with))
self.dispatch_error(ctx, exc)
async def process_commands(self, message: Message) -> None:
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called automatically when a new
message is received.
This is built using other low level tools, and is equivalent to a
call to :meth:`~.Bot.get_context` followed by a call to
:meth:`~.Bot.invoke`.
Parameters
-----------
message: Union[:class:`fortnitepy.FriendMessage`, :class:`fortnitepy.PartyMessage`]
The message to process commands for.
""" # noqa
if message.author.id == self.user.id:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
|
src/data_loading.py | katerakelly/pytorch-maml | 565 | 5185 | <gh_stars>100-1000
import numpy as np
import random
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import Sampler
import torchvision.transforms as transforms
from dataset import Omniglot, MNIST
'''
Helpers for loading class-balanced few-shot tasks
from datasets
'''
class ClassBalancedSampler(Sampler):
'''
Samples class-balanced batches from 'num_cl' pools each
of size 'num_inst'
If 'batch_cutoff' is None, indices for iterating over batches
of the entire dataset will be returned
Otherwise, indices for the number of batches up to the batch_cutoff
will be returned
(This is to allow sampling with replacement across training iterations)
'''
def __init__(self, num_cl, num_inst, batch_cutoff=None):
self.num_cl = num_cl
self.num_inst = num_inst
self.batch_cutoff = batch_cutoff
def __iter__(self):
'''return a single list of indices, assuming that items will be grouped by class '''
# First construct batches of 1 instance per class
batches = [[i+j*self.num_inst for i in torch.randperm(self.num_inst)] for j in range(self.num_cl)]
batches = [[batches[j][i] for j in range(self.num_cl)] for i in range(self.num_inst)]
# Shuffle within each batch so that classes don't always appear in same order
for sublist in batches:
random.shuffle(sublist)
if self.batch_cutoff is not None:
random.shuffle(batches)
batches = batches[:self.batch_cutoff]
batches = [item for sublist in batches for item in sublist]
return iter(batches)
def __len__(self):
return 1
def get_data_loader(task, batch_size=1, split='train'):
# NOTE: batch size here is # instances PER CLASS
if task.dataset == 'mnist':
normalize = transforms.Normalize(mean=[0.13066, 0.13066, 0.13066], std=[0.30131, 0.30131, 0.30131])
dset = MNIST(task, transform=transforms.Compose([transforms.ToTensor(), normalize]), split=split)
else:
normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426])
dset = Omniglot(task, transform=transforms.Compose([transforms.ToTensor(), normalize]), split=split)
sampler = ClassBalancedSampler(task.num_cl, task.num_inst, batch_cutoff = (None if split != 'train' else batch_size))
loader = DataLoader(dset, batch_size=batch_size*task.num_cl, sampler=sampler, num_workers=1, pin_memory=True)
return loader
|
observations/r/bomsoi.py | hajime9652/observations | 199 | 5203 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def bomsoi(path):
"""Southern Oscillation Index Data
The Southern Oscillation Index (SOI) is the difference in barometric
pressure at sea level between Tahiti and Darwin. Annual SOI and
Australian rainfall data, for the years 1900-2001, are given.
Australia's annual mean rainfall is an area-weighted average of the
total annual precipitation at approximately 370 rainfall stations around
the country.
This data frame contains the following columns:
Year
a numeric vector
Jan
average January SOI values for each year
Feb
average February SOI values for each year
Mar
average March SOI values for each year
Apr
average April SOI values for each year
May
average May SOI values for each year
Jun
average June SOI values for each year
Jul
average July SOI values for each year
Aug
average August SOI values for each year
Sep
average September SOI values for each year
Oct
average October SOI values for each year
Nov
average November SOI values for each year
Dec
average December SOI values for each year
SOI
a numeric vector consisting of average annual SOI values
avrain
a numeric vector consisting of a weighted average annual rainfall at
a large number of Australian sites
NTrain
Northern Territory rain
northRain
north rain
seRain
southeast rain
eastRain
east rain
southRain
south rain
swRain
southwest rain
Australian Bureau of Meteorology web pages:
http://www.bom.gov.au/climate/change/rain02.txt and
http://www.bom.gov.au/climate/current/soihtm1.shtml
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `bomsoi.csv`.
Returns:
Tuple of np.ndarray `x_train` with 106 rows and 21 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'bomsoi.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/DAAG/bomsoi.csv'
maybe_download_and_extract(path, url,
save_file_name='bomsoi.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
mol_dqn/experimental/multi_obj.py | deepneuralmachine/google-research | 23,901 | 5208 | <reponame>deepneuralmachine/google-research<gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Generates molecules that satisfy two targets.
Target1: SAS
Target2: QED
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
from absl import app
from absl import flags
from rdkit import Chem
from rdkit.Chem import QED
from rdkit.Contrib import SA_Score
from tensorflow.compat.v1 import gfile
from mol_dqn.chemgraph.mcts import deep_q_networks
from mol_dqn.chemgraph.mcts import molecules as molecules_mdp
from mol_dqn.chemgraph.mcts import run_dqn
from mol_dqn.chemgraph.tensorflow import core
flags.DEFINE_float('target_sas', 1, 'The target SAS of the molecule.')
flags.DEFINE_float('target_qed', 0.5, 'The target QED of the molecule.')
flags.DEFINE_float('gamma', 0.999, 'discount')
FLAGS = flags.FLAGS
class MultiObjectiveRewardMolecule(molecules_mdp.Molecule):
"""Defines the subclass of generating a molecule with a specific reward.
The reward is defined as a 1-D vector with 2 entries: similarity and QED
reward = (similarity_score, qed_score)
"""
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0, 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0, 0.0
qed_value = QED.qed(mol)
sas = SA_Score.sascorer.calculateScore(mol)
return -abs(sas - FLAGS.target_sas), -abs(qed_value - FLAGS.target_qed)
def soft_cst(v, l, r):
if l <= v <= r:
return 1
return -min(abs(l - v), abs(r - v))
class Molecule(molecules_mdp.Molecule):
"""SAS and QED reward molecule."""
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0, 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0, 0.0
qed_value = QED.qed(mol)
sas = SA_Score.sascorer.calculateScore(mol)
# c1 = soft_cst(sas, FLAGS.target_sas - 0.2, FLAGS.target_sas + 0.2)
# c2 = soft_cst(qed_value, FLAGS.target_qed - 0.1, FLAGS.target_qed + 0.1)
# # if c1 < 0 and c2 < 0:
# # return - c1 * c2
# # else:
# # return c1 * c2
return (soft_cst(sas, FLAGS.target_sas - 0.2, FLAGS.target_sas + 0.2) +
soft_cst(qed_value, FLAGS.target_qed - 0.1,
FLAGS.target_qed + 0.1)) * FLAGS.gamma**(
self.max_steps - self._counter)
def main(argv):
del argv
if FLAGS.hparams is not None:
with gfile.Open(FLAGS.hparams, 'r') as f:
hparams = deep_q_networks.get_hparams(**json.load(f))
else:
hparams = deep_q_networks.get_hparams()
hparams.add_hparam('target_qed', FLAGS.target_qed)
hparams.add_hparam('target_sas', FLAGS.target_sas)
environment = Molecule(
atom_types=set(hparams.atom_types),
init_mol='CCc1c(C)[nH]c2CCC(CN3CCOCC3)C(=O)c12',
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allow_bonds_between_rings=False,
allowed_ring_sizes={3, 4, 5, 6},
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.DeepQNetwork(
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
run_dqn.run_training(
hparams=hparams,
environment=environment,
dqn=dqn,
)
core.write_hparams(hparams, os.path.join(FLAGS.model_dir, 'config.json'))
if __name__ == '__main__':
app.run(main)
|
src/generate_class_specific_samples.py | HesterLim/pytorch-cnn-visualizations | 6,725 | 5212 | """
Created on Thu Oct 26 14:19:44 2017
@author: <NAME> - github.com/utkuozbulak
"""
import os
import numpy as np
import torch
from torch.optim import SGD
from torchvision import models
from misc_functions import preprocess_image, recreate_image, save_image
class ClassSpecificImageGeneration():
"""
Produces an image that maximizes a certain class with gradient ascent
"""
def __init__(self, model, target_class):
self.mean = [-0.485, -0.456, -0.406]
self.std = [1/0.229, 1/0.224, 1/0.225]
self.model = model
self.model.eval()
self.target_class = target_class
# Generate a random image
self.created_image = np.uint8(np.random.uniform(0, 255, (224, 224, 3)))
# Create the folder to export images if not exists
if not os.path.exists('../generated/class_'+str(self.target_class)):
os.makedirs('../generated/class_'+str(self.target_class))
def generate(self, iterations=150):
"""Generates class specific image
Keyword Arguments:
iterations {int} -- Total iterations for gradient ascent (default: {150})
Returns:
np.ndarray -- Final maximally activated class image
"""
initial_learning_rate = 6
for i in range(1, iterations):
# Process image and return variable
self.processed_image = preprocess_image(self.created_image, False)
# Define optimizer for the image
optimizer = SGD([self.processed_image], lr=initial_learning_rate)
# Forward
output = self.model(self.processed_image)
# Target specific class
class_loss = -output[0, self.target_class]
if i % 10 == 0 or i == iterations-1:
print('Iteration:', str(i), 'Loss',
"{0:.2f}".format(class_loss.data.numpy()))
# Zero grads
self.model.zero_grad()
# Backward
class_loss.backward()
# Update image
optimizer.step()
# Recreate image
self.created_image = recreate_image(self.processed_image)
if i % 10 == 0 or i == iterations-1:
# Save image
im_path = '../generated/class_'+str(self.target_class)+'/c_'+str(self.target_class)+'_'+'iter_'+str(i)+'.png'
save_image(self.created_image, im_path)
return self.processed_image
if __name__ == '__main__':
target_class = 130 # Flamingo
pretrained_model = models.alexnet(pretrained=True)
csig = ClassSpecificImageGeneration(pretrained_model, target_class)
csig.generate()
|
features/steps/section.py | revvsales/python-docx-1 | 3,031 | 5217 | <reponame>revvsales/python-docx-1<filename>features/steps/section.py
# encoding: utf-8
"""
Step implementations for section-related features
"""
from __future__ import absolute_import, print_function, unicode_literals
from behave import given, then, when
from docx import Document
from docx.enum.section import WD_ORIENT, WD_SECTION
from docx.section import Section
from docx.shared import Inches
from helpers import test_docx
# given ====================================================
@given("a Section object as section")
def given_a_Section_object_as_section(context):
context.section = Document(test_docx("sct-section-props")).sections[-1]
@given("a Section object {with_or_without} a distinct first-page header as section")
def given_a_Section_object_with_or_without_first_page_header(context, with_or_without):
section_idx = {"with": 1, "without": 0}[with_or_without]
context.section = Document(test_docx("sct-first-page-hdrftr")).sections[section_idx]
@given('a section collection containing 3 sections')
def given_a_section_collection_containing_3_sections(context):
document = Document(test_docx('doc-access-sections'))
context.sections = document.sections
@given('a section having known page dimension')
def given_a_section_having_known_page_dimension(context):
document = Document(test_docx('sct-section-props'))
context.section = document.sections[-1]
@given('a section having known page margins')
def given_a_section_having_known_page_margins(context):
document = Document(test_docx('sct-section-props'))
context.section = document.sections[0]
@given('a section having start type {start_type}')
def given_a_section_having_start_type(context, start_type):
section_idx = {
'CONTINUOUS': 0,
'NEW_PAGE': 1,
'ODD_PAGE': 2,
'EVEN_PAGE': 3,
'NEW_COLUMN': 4,
}[start_type]
document = Document(test_docx('sct-section-props'))
context.section = document.sections[section_idx]
@given('a section known to have {orientation} orientation')
def given_a_section_having_known_orientation(context, orientation):
section_idx = {
'landscape': 0,
'portrait': 1
}[orientation]
document = Document(test_docx('sct-section-props'))
context.section = document.sections[section_idx]
# when =====================================================
@when("I assign {bool_val} to section.different_first_page_header_footer")
def when_I_assign_value_to_section_different_first_page_hdrftr(context, bool_val):
context.section.different_first_page_header_footer = eval(bool_val)
@when('I set the {margin_side} margin to {inches} inches')
def when_I_set_the_margin_side_length(context, margin_side, inches):
prop_name = {
'left': 'left_margin',
'right': 'right_margin',
'top': 'top_margin',
'bottom': 'bottom_margin',
'gutter': 'gutter',
'header': 'header_distance',
'footer': 'footer_distance',
}[margin_side]
new_value = Inches(float(inches))
setattr(context.section, prop_name, new_value)
@when('I set the section orientation to {orientation}')
def when_I_set_the_section_orientation(context, orientation):
new_orientation = {
'WD_ORIENT.PORTRAIT': WD_ORIENT.PORTRAIT,
'WD_ORIENT.LANDSCAPE': WD_ORIENT.LANDSCAPE,
'None': None,
}[orientation]
context.section.orientation = new_orientation
@when('I set the section page height to {y} inches')
def when_I_set_the_section_page_height_to_y_inches(context, y):
context.section.page_height = Inches(float(y))
@when('I set the section page width to {x} inches')
def when_I_set_the_section_page_width_to_x_inches(context, x):
context.section.page_width = Inches(float(x))
@when('I set the section start type to {start_type}')
def when_I_set_the_section_start_type_to_start_type(context, start_type):
new_start_type = {
'None': None,
'CONTINUOUS': WD_SECTION.CONTINUOUS,
'EVEN_PAGE': WD_SECTION.EVEN_PAGE,
'NEW_COLUMN': WD_SECTION.NEW_COLUMN,
'NEW_PAGE': WD_SECTION.NEW_PAGE,
'ODD_PAGE': WD_SECTION.ODD_PAGE,
}[start_type]
context.section.start_type = new_start_type
# then =====================================================
@then('I can access a section by index')
def then_I_can_access_a_section_by_index(context):
sections = context.sections
for idx in range(3):
section = sections[idx]
assert isinstance(section, Section)
@then('I can iterate over the sections')
def then_I_can_iterate_over_the_sections(context):
sections = context.sections
actual_count = 0
for section in sections:
actual_count += 1
assert isinstance(section, Section)
assert actual_count == 3
@then('len(sections) is 3')
def then_len_sections_is_3(context):
sections = context.sections
assert len(sections) == 3, (
'expected len(sections) of 3, got %s' % len(sections)
)
@then("section.different_first_page_header_footer is {bool_val}")
def then_section_different_first_page_header_footer_is(context, bool_val):
actual = context.section.different_first_page_header_footer
expected = eval(bool_val)
assert actual == expected, (
"section.different_first_page_header_footer is %s" % actual
)
@then("section.even_page_footer is a _Footer object")
def then_section_even_page_footer_is_a_Footer_object(context):
actual = type(context.section.even_page_footer).__name__
expected = "_Footer"
assert actual == expected, "section.even_page_footer is a %s object" % actual
@then("section.even_page_header is a _Header object")
def then_section_even_page_header_is_a_Header_object(context):
actual = type(context.section.even_page_header).__name__
expected = "_Header"
assert actual == expected, "section.even_page_header is a %s object" % actual
@then("section.first_page_footer is a _Footer object")
def then_section_first_page_footer_is_a_Footer_object(context):
actual = type(context.section.first_page_footer).__name__
expected = "_Footer"
assert actual == expected, "section.first_page_footer is a %s object" % actual
@then("section.first_page_header is a _Header object")
def then_section_first_page_header_is_a_Header_object(context):
actual = type(context.section.first_page_header).__name__
expected = "_Header"
assert actual == expected, "section.first_page_header is a %s object" % actual
@then("section.footer is a _Footer object")
def then_section_footer_is_a_Footer_object(context):
actual = type(context.section.footer).__name__
expected = "_Footer"
assert actual == expected, "section.footer is a %s object" % actual
@then("section.header is a _Header object")
def then_section_header_is_a_Header_object(context):
actual = type(context.section.header).__name__
expected = "_Header"
assert actual == expected, "section.header is a %s object" % actual
@then("section.{propname}.is_linked_to_previous is True")
def then_section_hdrftr_prop_is_linked_to_previous_is_True(context, propname):
actual = getattr(context.section, propname).is_linked_to_previous
expected = True
assert actual == expected, (
"section.%s.is_linked_to_previous is %s" % (propname, actual)
)
@then('the reported {margin_side} margin is {inches} inches')
def then_the_reported_margin_is_inches(context, margin_side, inches):
prop_name = {
'left': 'left_margin',
'right': 'right_margin',
'top': 'top_margin',
'bottom': 'bottom_margin',
'gutter': 'gutter',
'header': 'header_distance',
'footer': 'footer_distance',
}[margin_side]
expected_value = Inches(float(inches))
actual_value = getattr(context.section, prop_name)
assert actual_value == expected_value
@then('the reported page orientation is {orientation}')
def then_the_reported_page_orientation_is_orientation(context, orientation):
expected_value = {
'WD_ORIENT.LANDSCAPE': WD_ORIENT.LANDSCAPE,
'WD_ORIENT.PORTRAIT': WD_ORIENT.PORTRAIT,
}[orientation]
assert context.section.orientation == expected_value
@then('the reported page width is {x} inches')
def then_the_reported_page_width_is_width(context, x):
assert context.section.page_width == Inches(float(x))
@then('the reported page height is {y} inches')
def then_the_reported_page_height_is_11_inches(context, y):
assert context.section.page_height == Inches(float(y))
@then('the reported section start type is {start_type}')
def then_the_reported_section_start_type_is_type(context, start_type):
expected_start_type = {
'CONTINUOUS': WD_SECTION.CONTINUOUS,
'EVEN_PAGE': WD_SECTION.EVEN_PAGE,
'NEW_COLUMN': WD_SECTION.NEW_COLUMN,
'NEW_PAGE': WD_SECTION.NEW_PAGE,
'ODD_PAGE': WD_SECTION.ODD_PAGE,
}[start_type]
assert context.section.start_type == expected_start_type
|
coltran/run.py | DionysisChristopoulos/google-research | 23,901 | 5220 | <filename>coltran/run.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ColTran: Training and Continuous Evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
from ml_collections import config_flags
import tensorflow as tf
import tensorflow_datasets as tfds
from coltran import datasets
from coltran.models import colorizer
from coltran.models import upsampler
from coltran.utils import train_utils
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=missing-docstring
# pylint: disable=not-callable
# pylint: disable=g-long-lambda
flags.DEFINE_enum('mode', 'train', [
'train', 'eval_train', 'eval_valid', 'eval_test'], 'Operation mode.')
flags.DEFINE_string('logdir', '/tmp/svt', 'Main directory for logs.')
flags.DEFINE_string('master', 'local',
'BNS name of the TensorFlow master to use.')
flags.DEFINE_enum('accelerator_type', 'GPU', ['CPU', 'GPU', 'TPU'],
'Hardware type.')
flags.DEFINE_enum('dataset', 'imagenet', ['imagenet', 'custom'], 'Dataset')
flags.DEFINE_string('data_dir', None, 'Data directory for custom images.')
flags.DEFINE_string('tpu_worker_name', 'tpu_worker', 'Name of the TPU worker.')
flags.DEFINE_string(
'pretrain_dir', None, 'Finetune from a pretrained checkpoint.')
flags.DEFINE_string('summaries_log_dir', 'summaries', 'Summaries parent.')
flags.DEFINE_integer('steps_per_summaries', 100, 'Steps per summaries.')
flags.DEFINE_integer('devices_per_worker', 1, 'Number of devices per worker.')
flags.DEFINE_integer('num_workers', 1, 'Number workers.')
config_flags.DEFINE_config_file(
'config',
default='test_configs/colorizer.py',
help_string='Training configuration file.')
FLAGS = flags.FLAGS
def restore_checkpoint(model, ema, strategy, latest_ckpt=None, optimizer=None):
if optimizer is None:
ckpt_func = functools.partial(
train_utils.create_checkpoint, models=model, ema=ema)
else:
ckpt_func = functools.partial(
train_utils.create_checkpoint, models=model, ema=ema,
optimizer=optimizer)
checkpoint = train_utils.with_strategy(ckpt_func, strategy)
if latest_ckpt:
logging.info('Restoring from pretrained directory: %s', latest_ckpt)
train_utils.with_strategy(lambda: checkpoint.restore(latest_ckpt), strategy)
return checkpoint
def is_tpu():
return FLAGS.accelerator_type == 'TPU'
def loss_on_batch(inputs, model, config, training=False):
"""Loss on a batch of inputs."""
logits, aux_output = model.get_logits(
inputs_dict=inputs, train_config=config, training=training)
loss, aux_loss_dict = model.loss(
targets=inputs, logits=logits, train_config=config, training=training,
aux_output=aux_output)
loss_factor = config.get('loss_factor', 1.0)
loss_dict = collections.OrderedDict()
loss_dict['loss'] = loss
total_loss = loss_factor * loss
for aux_key, aux_loss in aux_loss_dict.items():
aux_loss_factor = config.get(f'{aux_key}_loss_factor', 1.0)
loss_dict[aux_key] = aux_loss
total_loss += aux_loss_factor * aux_loss
loss_dict['total_loss'] = total_loss
extra_info = collections.OrderedDict([
('scalar', loss_dict),
])
return total_loss, extra_info
def train_step(config,
model,
optimizer,
metrics,
ema=None,
strategy=None):
"""Training StepFn."""
def step_fn(inputs):
"""Per-Replica StepFn."""
with tf.GradientTape() as tape:
loss, extra = loss_on_batch(inputs, model, config, training=True)
scaled_loss = loss
if strategy:
scaled_loss /= float(strategy.num_replicas_in_sync)
grads = tape.gradient(scaled_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
for metric_key, metric in metrics.items():
metric.update_state(extra['scalar'][metric_key])
if ema is not None:
ema.apply(model.trainable_variables)
return loss
return train_utils.step_with_strategy(step_fn, strategy)
def build(config, batch_size, is_train=False):
optimizer = train_utils.build_optimizer(config)
ema_vars = []
downsample = config.get('downsample', False)
downsample_res = config.get('downsample_res', 64)
h, w = config.resolution
if config.model.name == 'coltran_core':
if downsample:
h, w = downsample_res, downsample_res
zero = tf.zeros((batch_size, h, w, 3), dtype=tf.int32)
model = colorizer.ColTranCore(config.model)
model(zero, training=is_train)
c = 1 if is_train else 3
if config.model.name == 'color_upsampler':
if downsample:
h, w = downsample_res, downsample_res
zero_slice = tf.zeros((batch_size, h, w, c), dtype=tf.int32)
zero = tf.zeros((batch_size, h, w, 3), dtype=tf.int32)
model = upsampler.ColorUpsampler(config.model)
model(zero, inputs_slice=zero_slice, training=is_train)
elif config.model.name == 'spatial_upsampler':
zero_slice = tf.zeros((batch_size, h, w, c), dtype=tf.int32)
zero = tf.zeros((batch_size, h, w, 3), dtype=tf.int32)
model = upsampler.SpatialUpsampler(config.model)
model(zero, inputs_slice=zero_slice, training=is_train)
ema_vars = model.trainable_variables
ema = train_utils.build_ema(config, ema_vars)
return model, optimizer, ema
###############################################################################
## Train.
###############################################################################
def train(logdir):
config = FLAGS.config
steps_per_write = FLAGS.steps_per_summaries
train_utils.write_config(config, logdir)
strategy, batch_size = train_utils.setup_strategy(
config, FLAGS.master,
FLAGS.devices_per_worker, FLAGS.mode, FLAGS.accelerator_type)
def input_fn(input_context=None):
read_config = None
if input_context is not None:
read_config = tfds.ReadConfig(input_context=input_context)
dataset = datasets.get_dataset(
name=FLAGS.dataset,
config=config,
batch_size=config.batch_size,
subset='train',
read_config=read_config,
data_dir=FLAGS.data_dir)
return dataset
# DATASET CREATION.
logging.info('Building dataset.')
train_dataset = train_utils.dataset_with_strategy(input_fn, strategy)
data_iterator = iter(train_dataset)
# MODEL BUILDING
logging.info('Building model.')
model, optimizer, ema = train_utils.with_strategy(
lambda: build(config, batch_size, True), strategy)
model.summary(120, print_fn=logging.info)
# METRIC CREATION.
metrics = {}
metric_keys = ['loss', 'total_loss']
metric_keys += model.metric_keys
for metric_key in metric_keys:
func = functools.partial(tf.keras.metrics.Mean, metric_key)
curr_metric = train_utils.with_strategy(func, strategy)
metrics[metric_key] = curr_metric
# CHECKPOINTING LOGIC.
if FLAGS.pretrain_dir is not None:
pretrain_ckpt = tf.train.latest_checkpoint(FLAGS.pretrain_dir)
assert pretrain_ckpt
# Load the entire model without the optimizer from the checkpoints.
restore_checkpoint(model, ema, strategy, pretrain_ckpt, optimizer=None)
# New tf.train.Checkpoint instance with a reset optimizer.
checkpoint = restore_checkpoint(
model, ema, strategy, latest_ckpt=None, optimizer=optimizer)
else:
latest_ckpt = tf.train.latest_checkpoint(logdir)
checkpoint = restore_checkpoint(
model, ema, strategy, latest_ckpt, optimizer=optimizer)
checkpoint = tf.train.CheckpointManager(
checkpoint, directory=logdir, checkpoint_name='model', max_to_keep=10)
if optimizer.iterations.numpy() == 0:
checkpoint_name = checkpoint.save()
logging.info('Saved checkpoint to %s', checkpoint_name)
train_summary_dir = os.path.join(logdir, 'train_summaries')
writer = tf.summary.create_file_writer(train_summary_dir)
start_time = time.time()
logging.info('Start Training.')
# This hack of wrapping up multiple train steps with a tf.function call
# speeds up training significantly.
# See: https://www.tensorflow.org/guide/tpu#improving_performance_by_multiple_steps_within_tffunction # pylint: disable=line-too-long
@tf.function
def train_multiple_steps(iterator, steps_per_epoch):
train_step_f = train_step(config, model, optimizer, metrics, ema,
strategy)
for _ in range(steps_per_epoch):
train_step_f(iterator)
while optimizer.iterations.numpy() < config.get('max_train_steps', 1000000):
num_train_steps = optimizer.iterations
for metric_key in metric_keys:
metrics[metric_key].reset_states()
start_run = time.time()
train_multiple_steps(data_iterator, tf.convert_to_tensor(steps_per_write))
steps_per_sec = steps_per_write / (time.time() - start_run)
with writer.as_default():
for metric_key, metric in metrics.items():
metric_np = metric.result().numpy()
tf.summary.scalar(metric_key, metric_np, step=num_train_steps)
if metric_key == 'total_loss':
logging.info('Loss: %.3f bits/dim, Speed: %.3f steps/second',
metric_np, steps_per_sec)
if time.time() - start_time > config.save_checkpoint_secs:
checkpoint_name = checkpoint.save()
logging.info('Saved checkpoint to %s', checkpoint_name)
start_time = time.time()
###############################################################################
## Evaluating.
###############################################################################
def evaluate(logdir, subset):
"""Executes the evaluation loop."""
config = FLAGS.config
strategy, batch_size = train_utils.setup_strategy(
config, FLAGS.master,
FLAGS.devices_per_worker, FLAGS.mode, FLAGS.accelerator_type)
def input_fn(_=None):
return datasets.get_dataset(
name=config.dataset,
config=config,
batch_size=config.eval_batch_size,
subset=subset)
model, optimizer, ema = train_utils.with_strategy(
lambda: build(config, batch_size, False), strategy)
metric_keys = ['loss', 'total_loss']
# metric_keys += model.metric_keys
metrics = {}
for metric_key in metric_keys:
func = functools.partial(tf.keras.metrics.Mean, metric_key)
curr_metric = train_utils.with_strategy(func, strategy)
metrics[metric_key] = curr_metric
checkpoints = train_utils.with_strategy(
lambda: train_utils.create_checkpoint(model, optimizer, ema),
strategy)
dataset = train_utils.dataset_with_strategy(input_fn, strategy)
def step_fn(batch):
_, extra = loss_on_batch(batch, model, config, training=False)
for metric_key in metric_keys:
curr_metric = metrics[metric_key]
curr_scalar = extra['scalar'][metric_key]
curr_metric.update_state(curr_scalar)
num_examples = config.eval_num_examples
eval_step = train_utils.step_with_strategy(step_fn, strategy)
ckpt_path = None
wait_max = config.get(
'eval_checkpoint_wait_secs', config.save_checkpoint_secs * 100)
is_ema = True if ema else False
eval_summary_dir = os.path.join(
logdir, 'eval_{}_summaries_pyk_{}'.format(subset, is_ema))
writer = tf.summary.create_file_writer(eval_summary_dir)
while True:
ckpt_path = train_utils.wait_for_checkpoint(logdir, ckpt_path, wait_max)
logging.info(ckpt_path)
if ckpt_path is None:
logging.info('Timed out waiting for checkpoint.')
break
train_utils.with_strategy(
lambda: train_utils.restore(model, checkpoints, logdir, ema),
strategy)
data_iterator = iter(dataset)
num_steps = num_examples // batch_size
for metric_key, metric in metrics.items():
metric.reset_states()
logging.info('Starting evaluation.')
done = False
for i in range(0, num_steps, FLAGS.steps_per_summaries):
start_run = time.time()
for k in range(min(num_steps - i, FLAGS.steps_per_summaries)):
try:
if k % 10 == 0:
logging.info('Step: %d', (i + k + 1))
eval_step(data_iterator)
except (StopIteration, tf.errors.OutOfRangeError):
done = True
break
if done:
break
bits_per_dim = metrics['loss'].result()
logging.info('Bits/Dim: %.3f, Speed: %.3f seconds/step, Step: %d/%d',
bits_per_dim,
(time.time() - start_run) / FLAGS.steps_per_summaries,
i + k + 1, num_steps)
# logging.info('Final Bits/Dim: %.3f', bits_per_dim)
with writer.as_default():
for metric_key, metric in metrics.items():
curr_scalar = metric.result().numpy()
tf.summary.scalar(metric_key, curr_scalar, step=optimizer.iterations)
def main(_):
logging.info('Logging to %s.', FLAGS.logdir)
if FLAGS.mode == 'train':
logging.info('[main] I am the trainer.')
try:
train(FLAGS.logdir)
# During TPU Preemeption, the coordinator hangs with the error below.
# the exception forces the coordinator to fail, and it will be restarted.
except (tf.errors.UnavailableError, tf.errors.CancelledError):
os._exit(os.EX_TEMPFAIL) # pylint: disable=protected-access
elif FLAGS.mode.startswith('train'):
logging.info('[main] I am the trainer.')
train(os.path.join(FLAGS.logdir, FLAGS.mode))
elif FLAGS.mode == 'eval_train':
logging.info('[main] I am the training set evaluator.')
evaluate(FLAGS.logdir, subset='train')
elif FLAGS.mode == 'eval_valid':
logging.info('[main] I am the validation set evaluator.')
evaluate(FLAGS.logdir, subset='valid')
elif FLAGS.mode == 'eval_test':
logging.info('[main] I am the test set evaluator.')
evaluate(FLAGS.logdir, subset='test')
else:
raise ValueError(
'Unknown mode {}. '
'Must be one of [train, eval_train, eval_valid, eval_test]'.format(
FLAGS.mode))
if __name__ == '__main__':
app.run(main)
|
pyctcdecode/__init__.py | kensho-technologies/pyctcdecode | 203 | 5237 | # Copyright 2021-present Kensho Technologies, LLC.
from .alphabet import Alphabet # noqa
from .decoder import BeamSearchDecoderCTC, build_ctcdecoder # noqa
from .language_model import LanguageModel # noqa
__package_name__ = "pyctcdecode"
__version__ = "0.3.0"
|
platypush/backend/joystick/linux/__init__.py | BlackLight/platypush | 228 | 5239 | import array
import struct
import time
from fcntl import ioctl
from typing import IO
from platypush.backend import Backend
from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, \
JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent
class JoystickLinuxBackend(Backend):
"""
This backend intercepts events from joystick devices through the native Linux API implementation.
It is loosely based on https://gist.github.com/rdb/8864666, which itself uses the
`Linux kernel joystick API <https://www.kernel.org/doc/Documentation/input/joystick-api.txt>`_ to interact with
the devices.
Triggers:
* :class:`platypush.message.event.joystick.JoystickConnectedEvent` when the joystick is connected.
* :class:`platypush.message.event.joystick.JoystickDisconnectedEvent` when the joystick is disconnected.
* :class:`platypush.message.event.joystick.JoystickButtonPressedEvent` when a joystick button is pressed.
* :class:`platypush.message.event.joystick.JoystickButtonReleasedEvent` when a joystick button is released.
* :class:`platypush.message.event.joystick.JoystickAxisEvent` when an axis value of the joystick changes.
"""
# These constants were borrowed from linux/input.h
axis_names = {
0x00: 'x',
0x01: 'y',
0x02: 'z',
0x03: 'rx',
0x04: 'ry',
0x05: 'rz',
0x06: 'throttle',
0x07: 'rudder',
0x08: 'wheel',
0x09: 'gas',
0x0a: 'brake',
0x10: 'hat0x',
0x11: 'hat0y',
0x12: 'hat1x',
0x13: 'hat1y',
0x14: 'hat2x',
0x15: 'hat2y',
0x16: 'hat3x',
0x17: 'hat3y',
0x18: 'pressure',
0x19: 'distance',
0x1a: 'tilt_x',
0x1b: 'tilt_y',
0x1c: 'tool_width',
0x20: 'volume',
0x28: 'misc',
}
button_names = {
0x120: 'trigger',
0x121: 'thumb',
0x122: 'thumb2',
0x123: 'top',
0x124: 'top2',
0x125: 'pinkie',
0x126: 'base',
0x127: 'base2',
0x128: 'base3',
0x129: 'base4',
0x12a: 'base5',
0x12b: 'base6',
0x12f: 'dead',
0x130: 'a',
0x131: 'b',
0x132: 'c',
0x133: 'x',
0x134: 'y',
0x135: 'z',
0x136: 'tl',
0x137: 'tr',
0x138: 'tl2',
0x139: 'tr2',
0x13a: 'select',
0x13b: 'start',
0x13c: 'mode',
0x13d: 'thumbl',
0x13e: 'thumbr',
0x220: 'dpad_up',
0x221: 'dpad_down',
0x222: 'dpad_left',
0x223: 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0: 'dpad_left',
0x2c1: 'dpad_right',
0x2c2: 'dpad_up',
0x2c3: 'dpad_down',
}
def __init__(self, device: str = '/dev/input/js0', *args, **kwargs):
"""
:param device: Joystick device to monitor (default: ``/dev/input/js0``).
"""
super().__init__(*args, **kwargs)
self.device = device
self._axis_states = {}
self._button_states = {}
self._axis_map = []
self._button_map = []
def _init_joystick(self, dev: IO):
# Get the device name.
buf = array.array('B', [0] * 64)
ioctl(dev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tobytes().rstrip(b'\x00').decode('utf-8')
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(dev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(dev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(dev, 0x80406a32, buf) # JSIOCGAXMAP
for axis in buf[:num_axes]:
axis_name = self.axis_names.get(axis, 'unknown(0x%02x)' % axis)
self._axis_map.append(axis_name)
self._axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(dev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = self.button_names.get(btn, 'unknown(0x%03x)' % btn)
self._button_map.append(btn_name)
self._button_states[btn_name] = 0
self.bus.post(JoystickConnectedEvent(device=self.device, name=js_name, axes=self._axis_map,
buttons=self._button_map))
def run(self):
super().run()
self.logger.info(f'Opening {self.device}...')
while not self.should_stop():
# Open the joystick device.
try:
jsdev = open(self.device, 'rb')
self._init_joystick(jsdev)
except Exception as e:
self.logger.debug(f'Joystick device on {self.device} not available: {e}')
time.sleep(5)
continue
# Joystick event loop
while not self.should_stop():
try:
evbuf = jsdev.read(8)
if evbuf:
_, value, evt_type, number = struct.unpack('IhBB', evbuf)
if evt_type & 0x80: # Initial state notification
continue
if evt_type & 0x01:
button = self._button_map[number]
if button:
self._button_states[button] = value
evt_class = JoystickButtonPressedEvent if value else JoystickButtonReleasedEvent
# noinspection PyTypeChecker
self.bus.post(evt_class(device=self.device, button=button))
if evt_type & 0x02:
axis = self._axis_map[number]
if axis:
fvalue = value / 32767.0
self._axis_states[axis] = fvalue
# noinspection PyTypeChecker
self.bus.post(JoystickAxisEvent(device=self.device, axis=axis, value=fvalue))
except OSError as e:
self.logger.warning(f'Connection to {self.device} lost: {e}')
self.bus.post(JoystickDisconnectedEvent(device=self.device))
break
|
src/modules/sensors/vehicle_magnetometer/mag_compensation/python/mag_compensation.py | SaxionMechatronics/Firmware | 4,224 | 5240 | <reponame>SaxionMechatronics/Firmware
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File: mag_compensation.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/baumanta
Description:
Computes linear coefficients for mag compensation from thrust and current
Usage:
python mag_compensation.py /path/to/log/logfile.ulg current --instance 1
Remark:
If your logfile does not contain some of the topics, e.g.battery_status/current_a
you will have to comment out the corresponding parts in the script
"""
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from pyulog import ULog
from pyulog.px4 import PX4ULog
from pylab import *
import numpy as np
import textwrap as tw
import argparse
#arguments
parser = argparse.ArgumentParser(description='Calculate compensation parameters from ulog')
parser.add_argument('logfile', type=str, nargs='?', default=[],
help='full path to ulog file')
parser.add_argument('type', type=str, nargs='?', choices=['current', 'thrust'], default=[],
help='Power signal used for compensation, supported is "current" or "thrust".')
parser.add_argument('--instance', type=int, nargs='?', default=0,
help='instance of the current or thrust signal to use (0 or 1)')
args = parser.parse_args()
log_name = args.logfile
comp_type = args.type
comp_instance = args.instance
#Load the log data (produced by pyulog)
log = ULog(log_name)
pxlog = PX4ULog(log)
def get_data(topic_name, variable_name, index):
try:
dataset = log.get_dataset(topic_name, index)
return dataset.data[variable_name]
except:
return []
def ms2s_list(time_ms_list):
if len(time_ms_list) > 0:
return 1e-6 * time_ms_list
else:
return time_ms_list
# Select msgs and copy into arrays
armed = get_data('vehicle_status', 'arming_state', 0)
t_armed = ms2s_list(get_data('vehicle_status', 'timestamp', 0))
if comp_type == "thrust":
power = get_data('vehicle_rates_setpoint', 'thrust_body[2]', comp_instance)
power_t = ms2s_list(get_data('vehicle_rates_setpoint', 'timestamp', comp_instance))
comp_type_param = 1
factor = 1
unit = "[G]"
elif comp_type == "current":
power = get_data('battery_status', 'current_a', comp_instance)
power = np.true_divide(power, 1000) #kA
power_t = ms2s_list(get_data('battery_status', 'timestamp', comp_instance))
comp_type_param = 2 + comp_instance
factor = -1
unit = "[G/kA]"
else:
print("unknown compensation type {}. Supported is either 'thrust' or 'current'.".format(comp_type))
sys.exit(1)
if len(power) == 0:
print("could not retrieve power signal from log, zero data points")
sys.exit(1)
mag0X_body = get_data('sensor_mag', 'x', 0)
mag0Y_body = get_data('sensor_mag', 'y', 0)
mag0Z_body = get_data('sensor_mag', 'z', 0)
t_mag0 = ms2s_list(get_data('sensor_mag', 'timestamp', 0))
mag0_ID = get_data('sensor_mag', 'device_id', 0)
mag1X_body = get_data('sensor_mag', 'x', 1)
mag1Y_body = get_data('sensor_mag', 'y', 1)
mag1Z_body = get_data('sensor_mag', 'z', 1)
t_mag1 = ms2s_list(get_data('sensor_mag', 'timestamp', 1))
mag1_ID = get_data('sensor_mag', 'device_id', 1)
mag2X_body = get_data('sensor_mag', 'x', 2)
mag2Y_body = get_data('sensor_mag', 'y', 2)
mag2Z_body = get_data('sensor_mag', 'z', 2)
t_mag2 = ms2s_list(get_data('sensor_mag', 'timestamp', 2))
mag2_ID = get_data('sensor_mag', 'device_id', 2)
mag3X_body = get_data('sensor_mag', 'x', 3)
mag3Y_body = get_data('sensor_mag', 'y', 3)
mag3Z_body = get_data('sensor_mag', 'z', 3)
t_mag3 = ms2s_list(get_data('sensor_mag', 'timestamp', 3))
mag3_ID = get_data('sensor_mag', 'device_id', 3)
magX_body = []
magY_body = []
magZ_body = []
mag_id = []
t_mag = []
if len(mag0X_body) > 0:
magX_body.append(mag0X_body)
magY_body.append(mag0Y_body)
magZ_body.append(mag0Z_body)
t_mag.append(t_mag0)
mag_id.append(mag0_ID[0])
if len(mag1X_body) > 0:
magX_body.append(mag1X_body)
magY_body.append(mag1Y_body)
magZ_body.append(mag1Z_body)
t_mag.append(t_mag1)
mag_id.append(mag1_ID[0])
if len(mag2X_body) > 0:
magX_body.append(mag2X_body)
magY_body.append(mag2Y_body)
magZ_body.append(mag2Z_body)
t_mag.append(t_mag2)
mag_id.append(mag2_ID[0])
if len(mag3X_body) > 0:
magX_body.append(mag3X_body)
magY_body.append(mag3Y_body)
magZ_body.append(mag3Z_body)
t_mag.append(t_mag3)
mag_id.append(mag3_ID[0])
n_mag = len(magX_body)
#log index does not necessarily match mag calibration instance number
calibration_instance = []
instance_found = False
for idx in range(n_mag):
instance_found = False
for j in range(4):
if mag_id[idx] == log.initial_parameters["CAL_MAG{}_ID".format(j)]:
calibration_instance.append(j)
instance_found = True
if not instance_found:
print('Mag {} calibration instance not found, run compass calibration first.'.format(mag_id[idx]))
#get first arming sequence from data
start_time = 0
stop_time = 0
for i in range(len(armed)-1):
if armed[i] == 1 and armed[i+1] == 2:
start_time = t_armed[i+1]
if armed[i] == 2 and armed[i+1] == 1:
stop_time = t_armed[i+1]
break
#cut unarmed sequences from mag data
index_start = 0
index_stop = 0
for idx in range(n_mag):
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > start_time:
index_start = i
break
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > stop_time:
index_stop = i -1
break
t_mag[idx] = t_mag[idx][index_start:index_stop]
magX_body[idx] = magX_body[idx][index_start:index_stop]
magY_body[idx] = magY_body[idx][index_start:index_stop]
magZ_body[idx] = magZ_body[idx][index_start:index_stop]
#resample data
power_resampled = []
for idx in range(n_mag):
power_resampled.append(interp(t_mag[idx], power_t, power))
#fit linear to get coefficients
px = []
py = []
pz = []
for idx in range(n_mag):
px_temp, res_x, _, _, _ = polyfit(power_resampled[idx], magX_body[idx], 1,full = True)
py_temp, res_y, _, _, _ = polyfit(power_resampled[idx], magY_body[idx], 1,full = True)
pz_temp, res_z, _, _, _ = polyfit(power_resampled[idx], magZ_body[idx], 1, full = True)
px.append(px_temp)
py.append(py_temp)
pz.append(pz_temp)
#print to console
for idx in range(n_mag):
print('Mag{} device ID {} (calibration instance {})'.format(idx, mag_id[idx], calibration_instance[idx]))
print('\033[91m \n{}-based compensation: \033[0m'.format(comp_type))
print('\nparam set CAL_MAG_COMP_TYP {}'.format(comp_type_param))
for idx in range(n_mag):
print('\nparam set CAL_MAG{}_XCOMP {:.3f}'.format(calibration_instance[idx], factor * px[idx][0]))
print('param set CAL_MAG{}_YCOMP {:.3f}'.format(calibration_instance[idx], factor * py[idx][0]))
print('param set CAL_MAG{}_ZCOMP {:.3f}'.format(calibration_instance[idx], factor * pz[idx][0]))
#plot data
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Compensation Parameter Fit \n{} \nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(1,3,1)
plt.plot(power_resampled[idx], magX_body[idx], 'yo', power_resampled[idx], px[idx][0]*power_resampled[idx]+px[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag X [G]')
plt.subplot(1,3,2)
plt.plot(power_resampled[idx], magY_body[idx], 'yo', power_resampled[idx], py[idx][0]*power_resampled[idx]+py[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Y [G]')
plt.subplot(1,3,3)
plt.plot(power_resampled[idx], magZ_body[idx], 'yo', power_resampled[idx], pz[idx][0]*power_resampled[idx]+pz[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Z [G]')
# display results
plt.figtext(0.24, 0.03, 'CAL_MAG{}_XCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * px[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.51, 0.03, 'CAL_MAG{}_YCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * py[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.79, 0.03, 'CAL_MAG{}_ZCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * pz[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
#compensation comparison plots
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Original Data vs. Compensation \n{}\nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(3,1,1)
original_x, = plt.plot(t_mag[idx], magX_body[idx], label='original')
power_x, = plt.plot(t_mag[idx],magX_body[idx] - px[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_x, power_x])
plt.xlabel('Time [s]')
plt.ylabel('Mag X corrected[G]')
plt.subplot(3,1,2)
original_y, = plt.plot(t_mag[idx], magY_body[idx], label='original')
power_y, = plt.plot(t_mag[idx],magY_body[idx] - py[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_y, power_y])
plt.xlabel('Time [s]')
plt.ylabel('Mag Y corrected[G]')
plt.subplot(3,1,3)
original_z, = plt.plot(t_mag[idx], magZ_body[idx], label='original')
power_z, = plt.plot(t_mag[idx],magZ_body[idx] - pz[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_z, power_z])
plt.xlabel('Time [s]')
plt.ylabel('Mag Z corrected[G]')
plt.show()
|
examples/qmmm/02-mcscf.py | QuESt-Calculator/pyscf | 501 | 5246 | #!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
A simple example to run MCSCF with background charges.
'''
import numpy
from pyscf import gto, scf, mcscf, qmmm
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g',
verbose=4)
numpy.random.seed(1)
coords = numpy.random.random((5,3)) * 10
charges = (numpy.arange(5) + 1.) * -.1
#
# There are two ways to add background charges to MCSCF method.
# The recommended one is to initialize it in SCF calculation. The MCSCF
# calculation takes the information from SCF objects.
#
mf = qmmm.mm_charge(scf.RHF(mol), coords, charges).run()
mc = mcscf.CASSCF(mf, 6, 6)
mc.run()
mc = mcscf.CASCI(mf, 6, 6)
mc.run()
#
# The other method is to patch the MCSCF object with the background charges.
# Note: it updates the underlying SCF object inplace.
#
mo_init = mf.mo_coeff
mf = scf.RHF(mol)
mc = mcscf.CASSCF(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
mf = scf.RHF(mol)
mc = mcscf.CASCI(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
|
ens/exceptions.py | pjryan93/web3.py | 326 | 5258 | import idna
class AddressMismatch(ValueError):
'''
In order to set up reverse resolution correctly, the ENS name should first
point to the address. This exception is raised if the name does
not currently point to the address.
'''
pass
class InvalidName(idna.IDNAError):
'''
This exception is raised if the provided name does not meet
the syntax standards specified in `EIP 137 name syntax
<https://github.com/ethereum/EIPs/blob/master/EIPS/eip-137.md#name-syntax>`_.
For example: names may not start with a dot, or include a space.
'''
pass
class UnauthorizedError(Exception):
'''
Raised if the sending account is not the owner of the name
you are trying to modify. Make sure to set ``from`` in the
``transact`` keyword argument to the owner of the name.
'''
pass
class UnownedName(Exception):
'''
Raised if you are trying to modify a name that no one owns.
If working on a subdomain, make sure the subdomain gets created
first with :meth:`~ens.main.ENS.setup_address`.
'''
pass
class BidTooLow(ValueError):
'''
Raised if you bid less than the minimum amount
'''
pass
class InvalidBidHash(ValueError):
'''
Raised if you supply incorrect data to generate the bid hash.
'''
pass
class InvalidLabel(ValueError):
'''
Raised if you supply an invalid label
'''
pass
class OversizeTransaction(ValueError):
'''
Raised if a transaction you are trying to create would cost so
much gas that it could not fit in a block.
For example: when you try to start too many auctions at once.
'''
pass
class UnderfundedBid(ValueError):
'''
Raised if you send less wei with your bid than you declared
as your intent to bid.
'''
pass
|
src/backend/schemas/vps.py | ddddhm1/LuWu | 658 | 5261 | from typing import List
from typing import Optional
from typing import Union
from models.vps import VpsStatus
from schemas.base import APIModel
from schemas.base import BasePagination
from schemas.base import BaseSchema
from schemas.base import BaseSuccessfulResponseModel
class VpsSshKeySchema(APIModel):
name: str
public_key: str = None
private_key: str = None
isp_id: int
ssh_key_id: Optional[str]
date_created: Optional[str]
fingerprint: Optional[str]
class VpsSpecPlanSchema(APIModel):
name: str
plan_code: Union[str, int]
region_codes: List = None
bandwidth: float
ram: int
vcpu: int
disk: int
price_monthly: Union[float, int, str] = None
price_hourly: Union[float, int, str] = None
price_yearly: Union[float, int, str] = None
class VpsSpecRegionSchema(APIModel):
name: str
region_code: Union[str, int]
features: List[str] = None
plan_codes: List[Union[str, int]] = []
class VpsSpecOsSchema(APIModel):
name: str
os_code: Union[str, int]
region_codes: List[Union[str, int]] = []
plan_codes: List[Union[str, int]] = []
class VpsSpecSchema(APIModel):
region: List[VpsSpecRegionSchema] = []
plan: List[VpsSpecPlanSchema] = []
os: List[VpsSpecOsSchema] = []
class VpsSpecResponse(BaseSuccessfulResponseModel):
result: VpsSpecSchema
class VpsCreateSchema(APIModel):
hostname: str
isp_id: int
region_code: str
os_code: str
plan_code: str
ssh_keys: List[str] = []
status: int = VpsStatus.init
remark: str = None
class VpsItemSchema(BaseSchema):
isp_id: int
ip: Union[int, str, None]
server_id: Optional[str]
hostname: str
os: Optional[str]
plan: Optional[str]
region: Optional[str]
status: int
status_name: str
status_msg: Optional[str]
isp_provider_name: str
class VpsItemResponse(BaseSuccessfulResponseModel):
result: VpsItemSchema
class VpsPaginationSchema(BasePagination):
items: Optional[List[VpsItemSchema]]
class VpsPaginationResponse(BaseSuccessfulResponseModel):
result: VpsPaginationSchema
class VpsSshKeyResponseSchema(BaseSuccessfulResponseModel):
result: List[VpsSshKeySchema]
|
editortools/player.py | bennettdc/MCEdit-Unified | 237 | 5281 | """Copyright (c) 2010-2012 <NAME>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
#-# Modifiedby D.C.-G. for translation purpose
from OpenGL import GL
import numpy
import os
from albow import TableView, TableColumn, Label, Button, Column, CheckBox, AttrRef, Row, ask, alert, input_text_buttons, TabPanel
from albow.table_view import TableRowView
from albow.translate import _
from config import config
from editortools.editortool import EditorTool
from editortools.tooloptions import ToolOptions
from glbackground import Panel
from glutils import DisplayList
from mceutils import loadPNGTexture, alertException, drawTerrainCuttingWire, drawCube
from operation import Operation
import pymclevel
from pymclevel.box import BoundingBox, FloatBox
from pymclevel import nbt
import logging
from player_cache import PlayerCache, ThreadRS
from nbtexplorer import loadFile, saveFile, NBTExplorerToolPanel
import pygame
log = logging.getLogger(__name__)
class PlayerRemoveOperation(Operation):
undoTag = None
def __init__(self, tool, player="Player (Single Player)"):
super(PlayerRemoveOperation, self).__init__(tool.editor, tool.editor.level)
self.tool = tool
self.player = player
self.level = self.tool.editor.level
self.canUndo = False
self.playercache = PlayerCache()
def perform(self, recordUndo=True):
if self.level.saving:
alert(_("Cannot perform action while saving is taking place"))
return
if self.player == "Player (Single Player)":
answer = ask(_("Are you sure you want to delete the default player?"), ["Yes", "Cancel"])
if answer == "Cancel":
return
self.player = "Player"
if recordUndo:
self.undoTag = self.level.getPlayerTag(self.player)
self.level.players.remove(self.player)
if self.tool.panel:
if self.player != "Player":
#self.tool.panel.players.remove(player_cache.getPlayerNameFromUUID(self.player))
#self.tool.panel.players.remove(self.playercache.getPlayerInfo(self.player)[0])
str()
else:
self.tool.panel.players.remove("Player (Single Player)")
while self.tool.panel.table.index >= len(self.tool.panel.players):
self.tool.panel.table.index -= 1
#if len(self.tool.panel.players) == 0:
# self.tool.hidePanel()
# self.tool.showPanel()
self.tool.hidePanel()
self.tool.showPanel()
self.tool.markerList.invalidate()
self.tool.movingPlayer = None
pos = self.tool.revPlayerPos[self.editor.level.dimNo][self.player]
del self.tool.playerPos[self.editor.level.dimNo][pos]
if self.player != "Player":
del self.tool.playerTexture[self.player]
else:
del self.level.root_tag["Data"]["Player"]
del self.tool.revPlayerPos[self.editor.level.dimNo][self.player]
self.canUndo = True
def undo(self):
if not (self.undoTag is None):
if self.player != "Player":
self.level.playerTagCache[self.level.getPlayerPath(self.player)] = self.undoTag
else:
self.level.root_tag["Data"]["Player"] = self.undoTag
self.level.players.append(self.player)
if self.tool.panel:
#if self.player != "Player":
# self.tool.panel.players.append(self.playercache.getPlayerInfo(self.player)[0])
#else:
# self.tool.panel.players.append("Player (Single Player)")
if "[No players]" in self.tool.panel.players:
self.tool.panel.players.remove("[No players]")
self.tool.hidePanel()
self.tool.showPanel()
self.tool.markerList.invalidate()
def redo(self):
self.perform()
class PlayerAddOperation(Operation):
playerTag = None
def __init__(self, tool):
super(PlayerAddOperation, self).__init__(tool.editor, tool.editor.level)
self.tool = tool
self.level = self.tool.editor.level
self.canUndo = False
self.playercache = PlayerCache()
def perform(self, recordUndo=True):
initial = ""
allowed_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"
while True:
self.player = input_text_buttons("Enter a Player Name: ", 160, initial=initial, allowed_chars=allowed_chars)
if self.player is None:
return
elif len(self.player) > 16:
alert("Name too long. Maximum name length is 16.")
initial = self.player
elif len(self.player) < 1:
alert("Name too short. Minimum name length is 1.")
initial = self.player
else:
break
# print 1
data = self.playercache.getPlayerInfo(self.player)
if "<Unknown UUID>" not in data and "Server not ready" not in data:
self.uuid = data[0]
self.player = data[1]
else:
action = ask("Could not get {}'s UUID. Please make sure that you are connected to the internet and that the player \"{}\" exists.".format(self.player, self.player), ["Enter UUID manually", "Cancel"])
if action != "Enter UUID manually":
return
self.uuid = input_text_buttons("Enter a Player UUID: ", 160)
if not self.uuid:
return
# print 2
self.player = self.playercache.getPlayerInfo(self.uuid)
if self.player == self.uuid.replace("-", ""):
if ask("UUID was not found. Continue anyways?") == "Cancel":
return
# print "PlayerAddOperation.perform::self.uuid", self.uuid
if self.uuid in self.level.players:
alert("Player already exists in this World.")
return
self.playerTag = self.newPlayer()
#if self.tool.panel:
# self.tool.panel.players.append(self.player)
if self.level.oldPlayerFolderFormat:
self.level.playerTagCache[self.level.getPlayerPath(self.player)] = self.playerTag
self.level.players.append(self.player)
#if self.tool.panel:
#self.tool.panel.player_UUID[self.player] = self.player
else:
self.level.playerTagCache[self.level.getPlayerPath(self.uuid)] = self.playerTag
self.level.players.append(self.uuid)
if self.tool.panel:
self.tool.panel.player_UUID["UUID"].append(self.uuid)
self.tool.panel.player_UUID["Name"].append(self.player)
self.tool.playerPos[self.editor.level.dimNo][(0,0,0)] = self.uuid
self.tool.revPlayerPos[self.editor.level.dimNo][self.uuid] = (0,0,0)
# print 3
r = self.playercache.getPlayerSkin(self.uuid, force_download=False)
if not isinstance(r, (str, unicode)):
# print 'r 1', r
r = r.join()
# print 'r 2', r
self.tool.playerTexture[self.uuid] = loadPNGTexture(r)
self.tool.markerList.invalidate()
self.tool.recordMove = False
self.tool.movingPlayer = self.uuid
if self.tool.panel:
self.tool.hidePanel()
self.tool.showPanel()
self.canUndo = True
self.playerTag.save(self.level.getPlayerPath(self.uuid))
self.tool.nonSavedPlayers.append(self.level.getPlayerPath(self.uuid))
self.tool.inOtherDimension[self.editor.level.dimNo].append(self.uuid)
def newPlayer(self):
playerTag = nbt.TAG_Compound()
playerTag['Air'] = nbt.TAG_Short(300)
playerTag['AttackTime'] = nbt.TAG_Short(0)
playerTag['DeathTime'] = nbt.TAG_Short(0)
playerTag['Fire'] = nbt.TAG_Short(-20)
playerTag['Health'] = nbt.TAG_Short(20)
playerTag['HurtTime'] = nbt.TAG_Short(0)
playerTag['Score'] = nbt.TAG_Int(0)
playerTag['FallDistance'] = nbt.TAG_Float(0)
playerTag['OnGround'] = nbt.TAG_Byte(0)
playerTag['Dimension'] = nbt.TAG_Int(self.editor.level.dimNo)
playerTag["Inventory"] = nbt.TAG_List()
playerTag['Motion'] = nbt.TAG_List([nbt.TAG_Double(0) for i in xrange(3)])
spawn = self.level.playerSpawnPosition()
spawnX = spawn[0]
spawnZ = spawn[2]
blocks = [self.level.blockAt(spawnX, i, spawnZ) for i in xrange(self.level.Height)]
i = self.level.Height
done = False
for index, b in enumerate(reversed(blocks)):
if b != 0 and not done:
i = index
done = True
spawnY = self.level.Height - i
playerTag['Pos'] = nbt.TAG_List([nbt.TAG_Double([spawnX, spawnY, spawnZ][i]) for i in xrange(3)])
playerTag['Rotation'] = nbt.TAG_List([nbt.TAG_Float(0), nbt.TAG_Float(0)])
return playerTag
def undo(self):
self.level.players.remove(self.uuid)
self.tool.movingPlayer = None
if self.tool.panel:
#self.tool.panel.players.remove(self.player)
self.tool.panel.player_UUID["UUID"].remove(self.uuid)
self.tool.panel.player_UUID["Name"].remove(self.player)
self.tool.hidePanel()
self.tool.showPanel()
if self.tool.movingPlayer is None:
del self.tool.playerPos[self.tool.revPlayerPos[self.uuid]]
else:
del self.tool.playerPos[(0,0,0)]
del self.tool.revPlayerPos[self.uuid]
del self.tool.playerTexture[self.uuid]
os.remove(self.level.getPlayerPath(self.uuid))
if self.level.getPlayerPath(self.uuid) in self.tool.nonSavedPlayers:
self.tool.nonSavedPlayers.remove(self.level.getPlayerPath(self.uuid))
self.tool.markerList.invalidate()
def redo(self):
if not (self.playerTag is None):
self.level.playerTagCache[self.level.getPlayerPath(self.uuid)] = self.playerTag
self.level.players.append(self.uuid)
if self.tool.panel:
#self.tool.panel.players.append(self.uuid)
#self.tool.panel.player_UUID[self.player] = self.uuid
self.tool.panel.player_UUID["UUID"].append(self.uuid)
self.tool.panel.player_UUID["Name"].append(self.player)
# print 4
r = self.playercache.getPlayerSkin(self.uuid)
if isinstance(r, (str, unicode)):
r = r.join()
self.tool.playerTexture[self.uuid] = loadPNGTexture(r)
self.tool.playerPos[(0,0,0)] = self.uuid
self.tool.revPlayerPos[self.uuid] = (0,0,0)
self.playerTag.save(self.level.getPlayerPath(self.uuid))
self.tool.nonSavedPlayers.append(self.level.getPlayerPath(self.uuid))
self.tool.markerList.invalidate()
class PlayerMoveOperation(Operation):
undoPos = None
redoPos = None
def __init__(self, tool, pos, player="Player", yp=(None, None)):
super(PlayerMoveOperation, self).__init__(tool.editor, tool.editor.level)
self.tool = tool
self.canUndo = False
self.pos = pos
self.player = player
self.yp = yp
def perform(self, recordUndo=True):
if self.level.saving:
alert(_("Cannot perform action while saving is taking place"))
return
try:
level = self.tool.editor.level
try:
self.undoPos = level.getPlayerPosition(self.player)
self.undoDim = level.getPlayerDimension(self.player)
self.undoYP = level.getPlayerOrientation(self.player)
except Exception as e:
log.info(_("Couldn't get player position! ({0!r})").format(e))
yaw, pitch = self.yp
if yaw is not None and pitch is not None:
level.setPlayerOrientation((yaw, pitch), self.player)
level.setPlayerPosition(self.pos, self.player)
level.setPlayerDimension(level.dimNo, self.player)
self.tool.playerPos[tuple(self.pos)] = self.player
self.tool.revPlayerPos[self.player] = self.pos
self.tool.markerList.invalidate()
self.canUndo = True
except pymclevel.PlayerNotFound as e:
print "Player move failed: ", e
def undo(self):
if not (self.undoPos is None):
level = self.tool.editor.level
try:
self.redoPos = level.getPlayerPosition(self.player)
self.redoDim = level.getPlayerDimension(self.player)
self.redoYP = level.getPlayerOrientation(self.player)
except Exception as e:
log.info(_("Couldn't get player position! ({0!r})").format(e))
level.setPlayerPosition(self.undoPos, self.player)
level.setPlayerDimension(self.undoDim, self.player)
level.setPlayerOrientation(self.undoYP, self.player)
self.tool.markerList.invalidate()
def redo(self):
if not (self.redoPos is None):
level = self.tool.editor.level
try:
self.undoPos = level.getPlayerPosition(self.player)
self.undoDim = level.getPlayerDimension(self.player)
self.undoYP = level.getPlayerOrientation(self.player)
except Exception as e:
log.info(_("Couldn't get player position! ({0!r})").format(e))
level.setPlayerPosition(self.redoPos, self.player)
level.setPlayerDimension(self.redoDim, self.player)
level.setPlayerOrientation(self.redoYP, self.player)
self.tool.markerList.invalidate()
@staticmethod
def bufferSize():
return 20
class SpawnPositionInvalid(Exception):
pass
def okayAt63(level, pos):
"""blocks 63 or 64 must be occupied"""
# return level.blockAt(pos[0], 63, pos[2]) != 0 or level.blockAt(pos[0], 64, pos[2]) != 0
return True
def okayAboveSpawn(level, pos):
"""3 blocks above spawn must be open"""
return not any([level.blockAt(pos[0], pos[1] + i, pos[2]) for i in xrange(1, 4)])
def positionValid(level, pos):
try:
return okayAt63(level, pos) and okayAboveSpawn(level, pos)
except EnvironmentError:
return False
class PlayerSpawnMoveOperation(Operation):
undoPos = None
redoPos = None
def __init__(self, tool, pos):
super(PlayerSpawnMoveOperation, self).__init__(tool.editor, tool.editor.level)
self.tool, self.pos = tool, pos
self.canUndo = False
def perform(self, recordUndo=True):
if self.level.saving:
alert(_("Cannot perform action while saving is taking place"))
return
level = self.tool.editor.level
'''
if isinstance(level, pymclevel.MCInfdevOldLevel):
if not positionValid(level, self.pos):
if config.spawn.spawnProtection.get():
raise SpawnPositionInvalid(
"You cannot have two air blocks at Y=63 and Y=64 in your spawn point's column. Additionally, you cannot have a solid block in the three blocks above your spawn point. It's weird, I know.")
'''
self.undoPos = level.playerSpawnPosition()
level.setPlayerSpawnPosition(self.pos)
self.tool.markerList.invalidate()
self.canUndo = True
def undo(self):
if self.undoPos is not None:
level = self.tool.editor.level
self.redoPos = level.playerSpawnPosition()
level.setPlayerSpawnPosition(self.undoPos)
self.tool.markerList.invalidate()
def redo(self):
if self.redoPos is not None:
level = self.tool.editor.level
self.undoPos = level.playerSpawnPosition()
level.setPlayerSpawnPosition(self.redoPos)
self.tool.markerList.invalidate()
class PlayerPositionPanel(Panel):
def __init__(self, tool):
Panel.__init__(self, name='Panel.PlayerPositionPanel')
self.tool = tool
self.player_UUID = {"UUID": [], "Name": []}
self.level = tool.editor.level
self.playercache = PlayerCache()
# Add this instance to PlayerCache 'targets'. PlayerCache generated processes will call
# this instance 'update_player' method when they have finished their execution.
self.playercache.add_target(self.update_player)
if hasattr(self.level, 'players'):
players = self.level.players or ["[No players]"]
if not self.level.oldPlayerFolderFormat:
for player in players:
if player != "Player" and player != "[No players]":
if len(player) > 4 and player[4] == "-":
os.rename(os.path.join(self.level.worldFolder.getFolderPath("playerdata"), player+".dat"), os.path.join(self.level.worldFolder.getFolderPath("playerdata"), player.replace("-", "", 1)+".dat"))
player = player.replace("-", "", 1)
# print 5
data = self.playercache.getPlayerInfo(player, use_old_data=True)
#self.player_UUID[data[0]] = data[1]
self.player_UUID["UUID"].append(data[0])
self.player_UUID["Name"].append(data[1])
#self.player_UUID[player] = data
if "Player" in players:
#self.player_UUID["Player (Single Player)"] = "Player"
self.player_UUID["UUID"].append("Player")
self.player_UUID["Name"].append("Player (Single Player)")
if "[No players]" not in players:
self.player_names = sorted(self.player_UUID.values(), key=lambda x: False if x == "Player (Single Player)" else x)
else:
self.player_UUID["UUID"].append("[No players]")
self.player_UUID["Name"].append("[No players]")
else:
players = ["Player (Single Player)"]
self.players = players
if 'Player' in self.player_UUID['UUID'] and 'Player (Single Player)' in self.player_UUID['Name']:
self.player_UUID['UUID'].insert(0, self.player_UUID['UUID'].pop(self.player_UUID['UUID'].index('Player')))
self.player_UUID['Name'].insert(0, self.player_UUID['Name'].pop(self.player_UUID['Name'].index('Player (Single Player)')))
self.pages = TabPanel()
tab_height = self.pages.tab_height
max_height = tab_height + self.tool.editor.mainViewport.height - self.tool.editor.toolbar.height - self.tool.editor.subwidgets[0].height - self.pages.margin * 2
#-# Uncomment the following line to have a maximum height for this panel.
# max_height = min(max_height, 500)
self.editNBTDataButton = Button("Edit NBT", action=self.editNBTData, tooltipText="Open the NBT Explorer to edit player's attributes and inventory")
addButton = Button("Add", action=self.tool.addPlayer)
removeButton = Button("Remove", action=self.tool.removePlayer)
gotoButton = Button("Goto", action=self.tool.gotoPlayer)
gotoCameraButton = Button("Goto View", action=self.tool.gotoPlayerCamera)
moveButton = Button("Move", action=self.tool.movePlayer)
moveToCameraButton = Button("Align to Camera", action=self.tool.movePlayerToCamera)
reloadSkin = Button("Reload Skins", action=self.tool.reloadSkins, tooltipText="This pulls skins from the online server, so this may take a while")
btns = [self.editNBTDataButton]
if not isinstance(self.level, pymclevel.leveldbpocket.PocketLeveldbWorld):
btns.extend([addButton, removeButton])
btns.extend([gotoButton, gotoCameraButton, moveButton, moveToCameraButton, reloadSkin])
btns = Column(btns, margin=0, spacing=2)
h = max_height - btns.height - self.pages.margin * 2 - 2 - self.font.get_linesize() * 2
col = Label('')
def close():
self.pages.show_page(col)
self.nbttree = NBTExplorerToolPanel(self.tool.editor, nbtObject={}, height=max_height, \
close_text="Go Back", no_header=True, close_action=close,
load_text=None)
self.nbttree.shrink_wrap()
self.nbtpage = Column([self.nbttree])
self.nbtpage.shrink_wrap()
self.pages.add_page("NBT Data", self.nbtpage)
self.pages.set_rect(map(lambda x:x+self.margin, self.nbttree._rect))
tableview = TableView(nrows=(h - (self.font.get_linesize() * 2.5)) / self.font.get_linesize(),
header_height=self.font.get_linesize(),
columns=[TableColumn("Player Name(s):", (self.nbttree.width - (self.margin * 3)) / 3),
TableColumn("Player UUID(s):", (self.nbttree.width - (self.margin * 3)))],
)
tableview.index = 0
tableview.num_rows = lambda: len(self.player_UUID["UUID"])
tableview.row_data = lambda i: (self.player_UUID["Name"][i],self.player_UUID["UUID"][i])
tableview.row_is_selected = lambda x: x == tableview.index
tableview.zebra_color = (0, 0, 0, 48)
def selectTableRow(i, evt):
tableview.index = i
tableview.click_row = selectTableRow
def mouse_down(e):
if e.button == 1 and e.num_clicks > 1:
self.editNBTData()
TableRowView.mouse_down(tableview.rows, e)
tableview.rows.mouse_down = mouse_down
tableview.rows.tooltipText = "Double-click or use the button below to edit the NBT Data."
self.table = tableview
col.set_parent(None)
self.col = col = Column([tableview, btns], spacing=2)
self.pages.add_page("Players", col, 0)
self.pages.shrink_wrap()
self.pages.show_page(col)
self.add(self.pages)
self.shrink_wrap()
self.max_height = max_height
def editNBTData(self):
player = self.selectedPlayer
if player == 'Player (Single Player)':
alert("Not yet implemented.\nUse the NBT Explorer to edit this player.")
elif player == '[No players]':
return
else:
player = self.level.getPlayerTag(self.selectedPlayer)
if player is not None:
self.pages.remove_page(self.nbtpage)
def close():
self.pages.show_page(self.col)
self.nbttree = NBTExplorerToolPanel(self.tool.editor, nbtObject=player, fileName=None,
savePolicy=-1, dataKeyName=None,
height=self.max_height, no_header=True, close_text="Go Back",
close_action=close, load_text=None,
copy_data=False)
self.nbtpage = Column([self.nbttree,])
self.nbtpage.shrink_wrap()
self.pages.add_page("NBT Data", self.nbtpage)
self.pages.show_page(self.nbtpage)
else:
alert(_("Unable to load player %s" % self.selectedPlayer()))
@property
def selectedPlayer(self):
if not self.level.oldPlayerFolderFormat:
player = self.players[self.table.index]
if player != "Player (Single Player)" and player != "[No players]" and player != "~local_player":
return self.player_UUID["UUID"][self.table.index]
else:
return player
else:
return self.players[self.table.index]
def key_down(self, evt):
self.dispatch_key('key_down', evt)
def dispatch_key(self, name, evt):
if not hasattr(evt, 'key'):
return
if name == "key_down":
keyname = self.root.getKey(evt)
if self.pages.current_page == self.col:
if keyname == "Up" and self.table.index > 0:
self.table.index -= 1
self.table.rows.scroll_to_item(self.table.index)
elif keyname == "Down" and self.table.index < len(self.players) - 1:
self.table.index += 1
self.table.rows.scroll_to_item(self.table.index)
elif keyname == 'Page down':
self.table.index = min(len(self.players) - 1, self.table.index + self.table.rows.num_rows())
elif keyname == 'Page up':
self.table.index = max(0, self.table.index - self.table.rows.num_rows())
elif keyname == 'Return':
if self.selectedPlayer:
self.editNBTData()
if self.table.rows.cell_to_item_no(0, 0) + self.table.rows.num_rows() -1 > self.table.index or self.table.rows.cell_to_item_no(0, 0) + self.table.rows.num_rows() -1 < self.table.index:
self.table.rows.scroll_to_item(self.table.index)
elif self.pages.current_page == self.nbtpage:
self.nbttree.dispatch_key(name, evt)
def update_player(self, data):
if isinstance(data, tuple):
if data[0] in self.player_UUID['UUID']:
idx = self.player_UUID['UUID'].index(data[0])
self.player_UUID['UUID'][idx] = data[0]
self.player_UUID['Name'][idx] = data[1]
class PlayerPositionTool(EditorTool):
surfaceBuild = True
toolIconName = "player"
tooltipText = "Players"
movingPlayer = None
recordMove = True
def reloadTextures(self):
self.charTex = loadPNGTexture('char.png')
@alertException
def addPlayer(self):
op = PlayerAddOperation(self)
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
@alertException
def removePlayer(self):
player = self.panel.selectedPlayer
if player != "[No players]":
op = PlayerRemoveOperation(self, player)
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
@alertException
def movePlayer(self):
if self.panel.selectedPlayer != "[No players]":
self.movingPlayer = self.panel.selectedPlayer
if self.movingPlayer == "Player (Single Player)":
self.movingPlayer = "Player"
@alertException
def movePlayerToCamera(self):
player = self.panel.selectedPlayer
if player == "Player (Single Player)":
player = "Player"
if player != "[No players]":
pos = self.editor.mainViewport.cameraPosition
y = self.editor.mainViewport.yaw
p = self.editor.mainViewport.pitch
op = PlayerMoveOperation(self, pos, player, (y, p))
self.movingPlayer = None
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
def delete_skin(self, uuid):
del self.playerTexture[uuid]
self.playerTexture[uuid] = self.charTex
@alertException
def reloadSkins(self):
#result = ask("This pulls skins from the online server, so this may take a while", ["Ok", "Cancel"])
#if result == "Ok":
try:
for player in self.editor.level.players:
if player != "Player" and player in self.playerTexture.keys():
del self.playerTexture[player]
# print 6
r = self.playercache.getPlayerSkin(player, force_download=True, instance=self)
if isinstance(r, (str, unicode)):
r = r.join()
self.playerTexture[player] = loadPNGTexture(r)
#self.markerList.call(self._drawToolMarkers)
except:
raise Exception("Could not connect to the skins server, please check your Internet connection and try again.")
def gotoPlayerCamera(self):
player = self.panel.selectedPlayer
if player == "Player (Single Player)":
player = "Player"
try:
pos = self.editor.level.getPlayerPosition(player)
y, p = self.editor.level.getPlayerOrientation(player)
self.editor.gotoDimension(self.editor.level.getPlayerDimension(player))
self.editor.mainViewport.cameraPosition = pos
self.editor.mainViewport.yaw = y
self.editor.mainViewport.pitch = p
self.editor.mainViewport.stopMoving()
self.editor.mainViewport.invalidate()
except pymclevel.PlayerNotFound:
pass
def gotoPlayer(self):
player = self.panel.selectedPlayer
if player == "Player (Single Player)":
player = "Player"
try:
if self.editor.mainViewport.pitch < 0:
self.editor.mainViewport.pitch = -self.editor.mainViewport.pitch
self.editor.mainViewport.cameraVector = self.editor.mainViewport._cameraVector()
cv = self.editor.mainViewport.cameraVector
pos = self.editor.level.getPlayerPosition(player)
pos = map(lambda p, c: p - c * 5, pos, cv)
self.editor.gotoDimension(self.editor.level.getPlayerDimension(player))
self.editor.mainViewport.cameraPosition = pos
self.editor.mainViewport.stopMoving()
except pymclevel.PlayerNotFound:
pass
def __init__(self, *args):
EditorTool.__init__(self, *args)
self.reloadTextures()
self.nonSavedPlayers = []
textureVerticesHead = numpy.array(
(
# Backside of Head
24, 16, # Bottom Left
24, 8, # Top Left
32, 8, # Top Right
32, 16, # Bottom Right
# Front of Head
8, 16,
8, 8,
16, 8,
16, 16,
#
24, 0,
16, 0,
16, 8,
24, 8,
#
16, 0,
8, 0,
8, 8,
16, 8,
#
8, 8,
0, 8,
0, 16,
8, 16,
16, 16,
24, 16,
24, 8,
16, 8,
), dtype='f4')
textureVerticesHat = numpy.array(
(
56, 16,
56, 8,
64, 8,
64, 16,
48, 16,
48, 8,
40, 8,
40, 16,
56, 0,
48, 0,
48, 8,
56, 8,
48, 0,
40, 0,
40, 8,
48, 8,
40, 8,
32, 8,
32, 16,
40, 16,
48, 16,
56, 16,
56, 8,
48, 8,
), dtype='f4')
textureVerticesHead.shape = (24, 2)
textureVerticesHat.shape = (24, 2)
textureVerticesHead *= 4
textureVerticesHead[:, 1] *= 2
textureVerticesHat *= 4
textureVerticesHat[:, 1] *= 2
self.texVerts = (textureVerticesHead, textureVerticesHat)
self.playerPos = {0:{}, -1:{}, 1:{}}
self.playerTexture = {}
self.revPlayerPos = {0:{}, -1:{}, 1:{}}
self.inOtherDimension = {0: [], 1: [], -1: []}
self.playercache = PlayerCache()
self.markerList = DisplayList()
panel = None
def showPanel(self):
if not self.panel:
self.panel = PlayerPositionPanel(self)
self.panel.centery = (self.editor.mainViewport.height - self.editor.toolbar.height) / 2 + self.editor.subwidgets[0].height
self.panel.left = self.editor.left
self.editor.add(self.panel)
def hidePanel(self):
if self.panel and self.panel.parent:
self.panel.parent.remove(self.panel)
self.panel = None
def drawToolReticle(self):
if self.movingPlayer is None:
return
pos, direction = self.editor.blockFaceUnderCursor
dim = self.editor.level.getPlayerDimension(self.movingPlayer)
pos = (pos[0], pos[1] + 2, pos[2])
x, y, z = pos
# x,y,z=map(lambda p,d: p+d, pos, direction)
GL.glEnable(GL.GL_BLEND)
GL.glColor(1.0, 1.0, 1.0, 0.5)
self.drawCharacterHead(x + 0.5, y + 0.75, z + 0.5, self.revPlayerPos[dim][self.movingPlayer], dim)
GL.glDisable(GL.GL_BLEND)
GL.glEnable(GL.GL_DEPTH_TEST)
self.drawCharacterHead(x + 0.5, y + 0.75, z + 0.5, self.revPlayerPos[dim][self.movingPlayer], dim)
drawTerrainCuttingWire(BoundingBox((x, y, z), (1, 1, 1)))
drawTerrainCuttingWire(BoundingBox((x, y - 1, z), (1, 1, 1)))
#drawTerrainCuttingWire( BoundingBox((x,y-2,z), (1,1,1)) )
GL.glDisable(GL.GL_DEPTH_TEST)
markerLevel = None
def drawToolMarkers(self):
if not config.settings.drawPlayerHeads.get():
return
if self.markerLevel != self.editor.level:
self.markerList.invalidate()
self.markerLevel = self.editor.level
self.markerList.call(self._drawToolMarkers)
def _drawToolMarkers(self):
GL.glColor(1.0, 1.0, 1.0, 0.5)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glMatrixMode(GL.GL_MODELVIEW)
for player in self.editor.level.players:
try:
pos = self.editor.level.getPlayerPosition(player)
yaw, pitch = self.editor.level.getPlayerOrientation(player)
dim = self.editor.level.getPlayerDimension(player)
self.inOtherDimension[dim].append(player)
self.playerPos[dim][pos] = player
self.revPlayerPos[dim][player] = pos
if player != "Player" and config.settings.downloadPlayerSkins.get():
# print 7
r = self.playercache.getPlayerSkin(player, force_download=False)
if not isinstance(r, (str, unicode)):
r = r.join()
self.playerTexture[player] = loadPNGTexture(r)
else:
self.playerTexture[player] = self.charTex
if dim != self.editor.level.dimNo:
continue
x, y, z = pos
GL.glPushMatrix()
GL.glTranslate(x, y, z)
GL.glRotate(-yaw, 0, 1, 0)
GL.glRotate(pitch, 1, 0, 0)
GL.glColor(1, 1, 1, 1)
self.drawCharacterHead(0, 0, 0, (x,y,z), self.editor.level.dimNo)
GL.glPopMatrix()
# GL.glEnable(GL.GL_BLEND)
drawTerrainCuttingWire(FloatBox((x - .5, y - .5, z - .5), (1, 1, 1)),
c0=(0.3, 0.9, 0.7, 1.0),
c1=(0, 0, 0, 0),
)
#GL.glDisable(GL.GL_BLEND)
except Exception, e:
print "Exception in editortools.player.PlayerPositionTool._drawToolMarkers:", repr(e)
import traceback
print traceback.format_exc()
continue
GL.glDisable(GL.GL_DEPTH_TEST)
def drawCharacterHead(self, x, y, z, realCoords=None, dim=0):
GL.glEnable(GL.GL_CULL_FACE)
origin = (x - 0.25, y - 0.25, z - 0.25)
size = (0.5, 0.5, 0.5)
box = FloatBox(origin, size)
hat_origin = (x - 0.275, y - 0.275, z - 0.275)
hat_size = (0.55, 0.55, 0.55)
hat_box = FloatBox(hat_origin, hat_size)
if realCoords is not None and self.playerPos[dim][realCoords] != "Player" and config.settings.downloadPlayerSkins.get():
drawCube(box,
texture=self.playerTexture[self.playerPos[dim][realCoords]], textureVertices=self.texVerts[0])
GL.glEnable(GL.GL_BLEND)
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
drawCube(hat_box,
texture=self.playerTexture[self.playerPos[dim][realCoords]], textureVertices=self.texVerts[1])
GL.glDisable(GL.GL_BLEND)
else:
drawCube(box,
texture=self.charTex, textureVertices=self.texVerts[0])
GL.glDisable(GL.GL_CULL_FACE)
#@property
#def statusText(self):
# if not self.panel:
# return ""
# player = self.panel.selectedPlayer
# if player == "Player":
# return "Click to move the player"
#
# return _("Click to move the player \"{0}\"").format(player)
@alertException
def mouseDown(self, evt, pos, direction):
if self.movingPlayer is None:
return
pos = (pos[0] + 0.5, pos[1] + 2.75, pos[2] + 0.5)
op = PlayerMoveOperation(self, pos, self.movingPlayer)
self.movingPlayer = None
if self.recordMove:
self.editor.addOperation(op)
addingMoving = False
else:
self.editor.performWithRetry(op) #Prevent recording of Undo when adding player
self.recordMove = True
addingMoving = True
if op.canUndo and not addingMoving:
self.editor.addUnsavedEdit()
def keyDown(self, evt):
keyname = evt.dict.get('keyname', None) or self.editor.get_root().getKey(evt)
if not self.recordMove:
if not pygame.key.get_focused():
return
if keyname == "Escape":
self.recordMove = True
if self.panel and self.panel.__class__ == PlayerPositionPanel:
self.panel.key_down(evt)
def keyUp(self, evt):
pass
def levelChanged(self):
self.markerList.invalidate()
@alertException
def toolSelected(self):
self.showPanel()
self.movingPlayer = None
@alertException
def toolReselected(self):
if self.panel:
self.gotoPlayer()
class PlayerSpawnPositionOptions(ToolOptions):
def __init__(self, tool):
ToolOptions.__init__(self, name='Panel.PlayerSpawnPositionOptions')
self.tool = tool
self.spawnProtectionCheckBox = CheckBox(ref=AttrRef(tool, "spawnProtection"))
self.spawnProtectionLabel = Label("Spawn Position Safety")
self.spawnProtectionLabel.mouse_down = self.spawnProtectionCheckBox.mouse_down
tooltipText = "Minecraft will randomly move your spawn point if you try to respawn in a column where there are no blocks at Y=63 and Y=64. Only uncheck this box if Minecraft is changed."
self.spawnProtectionLabel.tooltipText = self.spawnProtectionCheckBox.tooltipText = tooltipText
row = Row((self.spawnProtectionCheckBox, self.spawnProtectionLabel))
col = Column((Label("Spawn Point Options"), row, Button("OK", action=self.dismiss)))
self.add(col)
self.shrink_wrap()
class PlayerSpawnPositionTool(PlayerPositionTool):
surfaceBuild = True
toolIconName = "playerspawn"
tooltipText = "Move Spawn Point\nRight-click for options"
def __init__(self, *args):
PlayerPositionTool.__init__(self, *args)
self.optionsPanel = PlayerSpawnPositionOptions(self)
def toolEnabled(self):
return self.editor.level.dimNo == 0
def showPanel(self):
self.panel = Panel(name='Panel.PlayerSpawnPositionTool')
button = Button("Goto Spawn", action=self.gotoSpawn)
self.panel.add(button)
self.panel.shrink_wrap()
self.panel.left = self.editor.left
self.panel.centery = self.editor.centery
self.editor.add(self.panel)
def gotoSpawn(self):
cv = self.editor.mainViewport.cameraVector
pos = self.editor.level.playerSpawnPosition()
pos = map(lambda p, c: p - c * 5, pos, cv)
self.editor.mainViewport.cameraPosition = pos
self.editor.mainViewport.stopMoving()
@property
def statusText(self):
return "Click to set the spawn position."
spawnProtection = config.spawn.spawnProtection.property()
def drawToolReticle(self):
pos, direction = self.editor.blockFaceUnderCursor
x, y, z = map(lambda p, d: p + d, pos, direction)
color = (1.0, 1.0, 1.0, 0.5)
if isinstance(self.editor.level, pymclevel.MCInfdevOldLevel) and self.spawnProtection:
if not positionValid(self.editor.level, (x, y, z)):
color = (1.0, 0.0, 0.0, 0.5)
GL.glColor(*color)
GL.glEnable(GL.GL_BLEND)
self.drawCage(x, y, z)
self.drawCharacterHead(x + 0.5, y + 0.5, z + 0.5)
GL.glDisable(GL.GL_BLEND)
GL.glEnable(GL.GL_DEPTH_TEST)
self.drawCage(x, y, z)
self.drawCharacterHead(x + 0.5, y + 0.5, z + 0.5)
color2 = map(lambda a: a * 0.4, color)
drawTerrainCuttingWire(BoundingBox((x, y, z), (1, 1, 1)), color2, color)
GL.glDisable(GL.GL_DEPTH_TEST)
def _drawToolMarkers(self):
x, y, z = self.editor.level.playerSpawnPosition()
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
GL.glEnable(GL.GL_BLEND)
color = config.selectionColors.black.get() + (0.35,)
GL.glColor(*color)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glLineWidth(2.0)
drawCube(FloatBox((x, y, z), (1, 1, 1)))
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
drawCube(FloatBox((x, y, z), (1, 1, 1)))
GL.glDisable(GL.GL_BLEND)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glColor(1.0, 1.0, 1.0, 1.0)
self.drawCage(x, y, z)
self.drawCharacterHead(x + 0.5, y + 0.5 + 0.125 * numpy.sin(self.editor.frames * 0.05), z + 0.5)
GL.glDisable(GL.GL_DEPTH_TEST)
def drawCage(self, x, y, z):
cageTexVerts = numpy.array(pymclevel.MCInfdevOldLevel.materials.blockTextures[52, 0])
pixelScale = 0.5 if self.editor.level.materials.name in ("Pocket", "Alpha") else 1.0
texSize = 16 * pixelScale
cageTexVerts = cageTexVerts.astype(float) * pixelScale
cageTexVerts = numpy.array(
[((tx, ty), (tx + texSize, ty), (tx + texSize, ty + texSize), (tx, ty + texSize)) for (tx, ty) in
cageTexVerts], dtype='float32')
GL.glEnable(GL.GL_ALPHA_TEST)
drawCube(BoundingBox((x, y, z), (1, 1, 1)), texture=pymclevel.alphaMaterials.terrainTexture,
textureVertices=cageTexVerts)
GL.glDisable(GL.GL_ALPHA_TEST)
@alertException
def mouseDown(self, evt, pos, direction):
pos = map(lambda p, d: p + d, pos, direction)
op = PlayerSpawnMoveOperation(self, pos)
try:
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
self.markerList.invalidate()
except SpawnPositionInvalid, e:
if "Okay" != ask(str(e), responses=["Okay", "Fix it for me!"]):
level = self.editor.level
status = ""
if not okayAt63(level, pos):
level.setBlockAt(pos[0], 63, pos[2], 1)
status += _("Block added at y=63.\n")
if 59 < pos[1] < 63:
pos[1] = 63
status += _("Spawn point moved upward to y=63.\n")
if not okayAboveSpawn(level, pos):
if pos[1] > 63 or pos[1] < 59:
lpos = (pos[0], pos[1] - 1, pos[2])
if level.blockAt(*pos) == 0 and level.blockAt(*lpos) != 0 and okayAboveSpawn(level, lpos):
pos = lpos
status += _("Spawn point shifted down by one block.\n")
if not okayAboveSpawn(level, pos):
for i in xrange(1, 4):
level.setBlockAt(pos[0], pos[1] + i, pos[2], 0)
status += _("Blocks above spawn point cleared.\n")
self.editor.invalidateChunks([(pos[0] // 16, pos[2] // 16)])
op = PlayerSpawnMoveOperation(self, pos)
try:
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
self.markerList.invalidate()
except SpawnPositionInvalid, e:
alert(str(e))
return
if len(status):
alert(_("Spawn point fixed. Changes: \n\n") + status)
@alertException
def toolReselected(self):
self.gotoSpawn()
|
labgraph/graphs/node_test_harness.py | Yunusbcr/labgraph | 124 | 5289 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import asyncio
import functools
import inspect
from contextlib import contextmanager
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Generic,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from ..messages.message import Message
from ..util.testing import get_event_loop
from .config import Config
from .method import AsyncPublisher
from .node import Node
from .state import State
from .topic import Topic
N = TypeVar("N", bound=Node) # Node type
T = TypeVar("T", bound=Tuple[Topic, Message]) # Type yielded by async functions
class NodeTestHarness(Generic[N]):
"""
Utility class for testing Labgraph nodes. This allows a user to test some behavior
of a node in an asyncio event loop, with the harness taking care of setting up and
cleaning up the node.
Args:
node_type: The type of node this harness will test.
"""
def __init__(self, node_type: Type[N]) -> None:
self.node_type: Type[N] = node_type
@contextmanager
def get_node(
self, config: Optional[Config] = None, state: Optional[State] = None
) -> Iterator[N]:
"""
Context manager to create, configure and yield a node of specified type.
Node is cleaned up when the context manager exits.
Args:
config: The configuration to set on the node, if provided.
state: The state to set on the Node, if provided.
"""
node = None
try:
node = self.node_type(config=config, state=state)
node.setup()
yield node
finally:
if node is not None:
node.cleanup()
@overload
def run_with_harness(
node_type: Type[N],
fn: Callable[[N], AsyncIterable[T]],
config: Optional[Config],
state: Optional[State],
max_num_results: Optional[int] = None,
) -> List[T]:
...
@overload
def run_with_harness(
node_type: Type[N],
fn: Callable[[N], Awaitable[T]],
config: Optional[Config],
state: Optional[State],
) -> T:
...
def run_with_harness(node_type, fn, config=None, state=None, max_num_results=None):
"""
Runs an async function on a new node of the provided type using `NodeTestHarness`.
Args:
node_type: The type of node to create.
fn:
The async function to run. An instance of a node typed `node_type` will be
provided to the function as an argument.
config: The configuration to set on the node, if provided.
state: The state to set on the node, if provided.
max_num_results:
If `fn` is an async generator, the maximum number of results it will generate.
If this is `None`, then the generator can produce an unbounded number of
results.
"""
# Check whether the max_num_results argument was improperly provided
_check_max_num_results_arg(run_with_harness.__name__, fn, max_num_results)
test_harness = NodeTestHarness(node_type=node_type)
with test_harness.get_node(config=config, state=state) as node:
return run_async(fn, args=[node], max_num_results=max_num_results)
@overload
def run_async(
fn: Callable[..., Awaitable[T]],
args: Optional[Sequence[Any]] = None,
kwargs: Optional[Mapping[str, Any]] = None,
) -> T:
...
@overload
def run_async(
fn: Callable[..., AsyncIterable[T]],
args: Optional[Sequence[Any]] = None,
kwargs: Optional[Mapping[str, Any]] = None,
max_num_results: Optional[int] = None,
) -> List[T]:
...
def run_async(fn, args=None, kwargs=None, max_num_results=None):
"""
Runs an async function to completion. Uses the current thread's event loop. Blocks
until the async function has finished executing. Forwards all arguments after `fn`
to the async function.
Args:
fn: The async function to run.
args: Positional arguments to forward to the function.
kwargs: Keyword arguments to forward to the function.
max_num_results:
If `fn` is an async generator, the maximum number of results it will generate.
If this is `None`, then the generator can produce an unbounded number of
results.
"""
# Check whether the max_num_results argument was improperly provided
_check_max_num_results_arg(run_async.__name__, fn, max_num_results)
# Unwrap functools.partial so we can check whether it is async
if isinstance(fn, functools.partial):
test_fn = fn.func
else:
test_fn = fn
if inspect.isasyncgenfunction(test_fn):
return get_event_loop().run_until_complete(
_async_generator_to_list(
fn=fn,
args=args or [],
kwargs=kwargs or {},
max_num_results=max_num_results,
)
)
elif asyncio.iscoroutinefunction(test_fn):
return get_event_loop().run_until_complete(fn(*(args or []), **(kwargs or {})))
else:
raise TypeError(f"{run_async.__name__}: function '{fn}' is not async")
def _check_max_num_results_arg(
called_fn_name: str,
fn: Union[Callable[..., Awaitable[Any]], Callable[..., AsyncIterable[Any]]],
max_num_results: Optional[int] = None,
) -> None:
if not inspect.isasyncgenfunction(fn) and max_num_results is not None:
raise TypeError(
f"{called_fn_name}: function '{fn}' is not an async generator but "
"max_num_results was provided"
)
async def _async_generator_to_list(
fn: Callable[..., AsyncIterable[T]],
args: Sequence[Any],
kwargs: Mapping[str, Any],
max_num_results: Optional[int] = None,
) -> List[T]:
if max_num_results is not None and max_num_results < 0:
raise ValueError("max_num_results must be non-negative")
result = []
async for retval in fn(*args, **kwargs):
result.append(retval)
if max_num_results is not None and len(result) >= max_num_results:
return result
return result
|
extras/usd/examples/usdMakeFileVariantModelAsset/usdMakeFileVariantModelAsset.py | DougRogers-DigitalFish/USD | 3,680 | 5383 | #!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
'''
Creates a top-level, referenceable asset USD file from one or more
'variant' files, each of which can contain arbitrary scene description.
When supplying multiple files, one must also provide the name for a
variantSet that will be constructed to switch between the files.
The asset file will place the variant files behind a "payload", which will
enable consumers to defer loading and processing of the data when composed
onto a UsdStage.
The names of the created variations will be taken directly from the basename
of their corresponding input file.
'''
from __future__ import print_function
from pxr import Tf, Kind, Sdf, Usd
# ToDo:
# - handle multiple variantSets
# - layer multiple kinds of files (e.g. shading.usd over geom.usd)
# - allow output filename to be independently specifiable? (Breaks with Pixar
# convention)
# - allow variant names to be specified independently of variant file names
# - Compute and present (per-variant) UsdGeomModelAPI.extentsHint
# - Compute and author UsdModelAPI::SetPayloadAssetDependencies()
def CreateModelStage(assetName,
assetIdentifier=None,
kind=Kind.Tokens.component,
filesToReference=None,
variantSetName=None,
defaultVariantSelection=None):
# Preconditions....
if not Tf.IsValidIdentifier(assetName):
print("assetName '%s' must be a valid identifier. Aborting." %
assetName)
return None
if variantSetName and not Tf.IsValidIdentifier(variantSetName):
print("variantSetName '%s' must be a valid identifier. Aborting." %
variantSetName)
return None
if filesToReference and len(filesToReference) > 1 and not variantSetName:
# For now, we only allow multiple files to reference if we're switching
# them with a variantSet. We can relax this restriction when we can
# make internal payload arcs (bug #119960)
print("Cannot create multiple-file-reference without a variantSet. Aborting")
return None
if not Kind.Registry.IsA(kind, Kind.Tokens.model):
print("kind '%s' is not a valid model kind, which must be one of:" %
kind)
print(Kind.Registry.GetAllKinds())
return None
# Create the root file for the stage, and make it ASCII text.
# We need some nicer sugar for this.
fileName = assetName + ".usd"
rootLayer = Sdf.Layer.CreateNew(fileName, args = {'format':'usda'})
stage = Usd.Stage.Open(rootLayer)
# Name the root prim after the asset. Don't give it a type, since we
# want that to come from referenced files. Make it be the "default prim"
# so that we can reference the resulting file without specifiying a
# prim path
rootPath = Sdf.Path.absoluteRootPath
modelRootPrim = stage.DefinePrim(rootPath.AppendChild(assetName))
stage.SetDefaultPrim(modelRootPrim)
modelAPI = Usd.ModelAPI(modelRootPrim)
modelAPI.SetKind(kind)
# See http://openusd.org/docs/api/class_usd_model_a_p_i.html#details
# for more on assetInfo
modelAPI.SetAssetName(assetName)
modelAPI.SetAssetIdentifier(assetIdentifier or fileName)
# Add a class named after the asset, and make the asset inherit from it.
# This is not necessary for a valid asset, and the class-naming is a Pixar
# convention. But always having a class associated with each asset is
# extremely useful for non-destructively editing many referenced or
# instanced assets of the same type.
classPrim = stage.CreateClassPrim(rootPath.AppendChild("_class_"+assetName))
modelRootPrim.GetInherits().AddInherit(classPrim.GetPath())
if not filesToReference:
# weird edge case... we're done
return stage
elif len(filesToReference) == 1 and not variantSetName:
# The other, more plausible edge case: we're just wrapping
# some other file (e.g. alembic) in order to give it a payload
# and other proper USD trappings - no variants
modelRootPrim.GetPayloads().AddPayload(Sdf.Payload(filesToReference[0]))
return stage
# OK, we're making a variantSet, and we are going to vary the payload
# in each variant
varSet = modelRootPrim.GetVariantSet(variantSetName)
for variantFile in filesToReference:
import os
variantName = os.path.splitext(os.path.basename(variantFile))[0]
# If we didn't specify a default selection, choose the first one
if not defaultVariantSelection:
defaultVariantSelection = variantName
varSet.AddVariant(variantName)
varSet.SetVariantSelection(variantName)
# The context object makes all edits "go inside" the variant we
# just created.
with varSet.GetVariantEditContext():
modelRootPrim.GetPayloads().AddPayload(Sdf.Payload(variantFile))
# Now put the variantSet into the state we want it to be in by default
varSet.SetVariantSelection(defaultVariantSelection)
return stage
if __name__ == "__main__":
import argparse, os, sys
descr = __doc__.strip()
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
description=descr)
parser.add_argument('assetName')
parser.add_argument('variantFiles', nargs='+')
parser.add_argument(
'-k', '--kind', default='component', action='store', metavar='kind',
help="Model kind, one of: component, group, or assembly")
parser.add_argument(
'-v', '--variantSet', default='', action='store', metavar='variantSet',
help="Variantset to create to modulate variantFiles. Can be elided "
"if only one file is supplied")
parser.add_argument(
'-i', '--identifier', default='', action='store', metavar='identifier',
help="The identifier you would expect your Ar asset-resolver plugin "
"to resolve to the (installed) assetName.usd file this script creates. "
" If unspecified, defaults to assetName.usd")
parser.add_argument(
'-d', '--defaultVariantSelection', default='', action='store',
metavar='defaultVariantSelection',
help="This variant will be selected by default when the asset is "
"added to a composition. If unspecified, will be the variant for "
"'variantFile1'")
args = parser.parse_args()
if not args.assetName or args.assetName == '':
parser.error("No assetName specified")
stage = CreateModelStage(args.assetName,
assetIdentifier=args.identifier,
kind=args.kind,
filesToReference=args.variantFiles,
variantSetName=args.variantSet,
defaultVariantSelection=args.defaultVariantSelection)
if stage:
stage.GetRootLayer().Save()
exit(0)
else:
exit(1)
|
src/zvt/recorders/em/meta/em_stockhk_meta_recorder.py | vishalbelsare/zvt | 2,032 | 5389 | # -*- coding: utf-8 -*-
from zvt.contract.api import df_to_db
from zvt.contract.recorder import Recorder
from zvt.domain.meta.stockhk_meta import Stockhk
from zvt.recorders.em import em_api
class EMStockhkRecorder(Recorder):
provider = "em"
data_schema = Stockhk
def run(self):
df_south = em_api.get_tradable_list(entity_type="stockhk", hk_south=True)
df_south = df_south.set_index("code", drop=False)
df_south["south"] = True
df = em_api.get_tradable_list(entity_type="stockhk")
df = df.set_index("code", drop=False)
df_other = df.loc[~df.index.isin(df_south.index)].copy()
df_other["south"] = False
df_to_db(df=df_south, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
df_to_db(df=df_other, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)
if __name__ == "__main__":
recorder = EMStockhkRecorder()
recorder.run()
# the __all__ is generated
__all__ = ["EMStockhkRecorder"]
|
moderngl_window/resources/data.py | DavideRuzza/moderngl-window | 142 | 5424 | <reponame>DavideRuzza/moderngl-window
"""
Registry general data files
"""
from typing import Any
from moderngl_window.resources.base import BaseRegistry
from moderngl_window.meta import DataDescription
class DataFiles(BaseRegistry):
"""Registry for requested data files"""
settings_attr = "DATA_LOADERS"
def load(self, meta: DataDescription) -> Any:
"""Load data file with the configured loaders.
Args:
meta (:py:class:`~moderngl_window.meta.data.DataDescription`): the resource description
Returns:
Any: The loaded resource
"""
return super().load(meta)
data = DataFiles()
|
NLP/UNIMO/src/finetune/visual_entailment.py | zhangyimi/Research | 1,319 | 5443 | <gh_stars>1000+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for visual_entailment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import time
import numpy as np
import paddle.fluid as fluid
from model.unimo_finetune import UNIMOModel
from eval import glue_eval
from collections import OrderedDict
from utils.utils import print_eval_log
def kl_divergence_with_logits(q_logits, p_logits):
"""
symmetric KL-divergence (See SMART, Sec 3.1)
q_logits: logits
p_logits: delta_logits
"""
q = fluid.layers.softmax(input=q_logits)
p = fluid.layers.softmax(input=p_logits)
kl_qp = fluid.layers.reduce_sum(q * (fluid.layers.log(q) - fluid.layers.log(p)), -1)
kl_pq = fluid.layers.reduce_sum(p * (fluid.layers.log(p) - fluid.layers.log(q)), -1)
vat_loss = fluid.layers.mean(x=kl_qp+kl_pq)
return vat_loss
def create_model(args, config, pyreader_name="train_reader", is_train=True):
"""create_model"""
shapes = [[-1, args.max_seq_len, 1], # src_ids
[-1, args.max_seq_len, 1], # pos_ids
[-1, args.max_seq_len, 1], # sent_ids
[-1, args.max_img_len + args.max_seq_len, args.max_img_len + args.max_seq_len], # input_mask
[-1, args.max_img_len, 1], # v_mask
[-1, args.max_seq_len, 1], # t_mask
[-1, args.max_img_len, config["image_embedding_size"]], # image_embedding
[-1, args.max_img_len, 5], # image_loc
[-1, 1] # labels
]
dtypes = ['int64', 'int64', 'int64', 'float32', 'float32', 'float32', 'float32','float32', 'int64']
lod_levels = [0, 0, 0, 0, 0, 0, 0, 0, 0]
pyreader = fluid.layers.py_reader(
capacity=70,
shapes=shapes,
dtypes=dtypes,
lod_levels=lod_levels,
name=pyreader_name,
use_double_buffer=True)
(src_ids, pos_ids, sent_ids, input_mask, v_mask, t_mask, image_embedding, image_loc, labels) \
= fluid.layers.read_file(pyreader)
emb_ids = {"word_embedding": src_ids, "sent_embedding": sent_ids, "pos_embedding": pos_ids}
image_input = {"image_embedding": image_embedding, "loc_embedding": image_loc}
adv_step, adv_lr, norm_type, adv_max_norm, adv_init_mag = \
args.adv_step, args.adv_lr, args.norm_type, args.adv_max_norm, args.adv_init_mag
assert adv_step > 0 and adv_init_mag > 0
def get_loss_and_logits(text_feats, image_feats):
feats = text_feats + image_feats
cls_params_name = ["cls_out_w_0", "cls_out_b_0"]
feats = fluid.layers.fc(
input=feats,
size=2048,
param_attr=fluid.ParamAttr(
name=cls_params_name[0],
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=cls_params_name[1], initializer=fluid.initializer.Constant(0.)))
feats = fluid.layers.dropout(
x=feats,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
cls_params_name = ["cls_out_w_1", "cls_out_b_1"]
logits = fluid.layers.fc(
input=feats,
size=args.num_labels,
param_attr=fluid.ParamAttr(
name=cls_params_name[0],
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name=cls_params_name[1], initializer=fluid.initializer.Constant(0.)))
ce_loss, probs = fluid.layers.softmax_with_cross_entropy(
logits=logits, label=labels, return_softmax=True)
loss = fluid.layers.mean(x=ce_loss) / adv_step
return loss, logits, probs
def init_delta(input, mask, shape, name='text'):
real_seq_len = fluid.layers.shape(input)[1]
fake = fluid.layers.data(name=name+"_fake", shape=shape, dtype='float32')
mask_slice = fluid.layers.slice(mask, axes=[1], starts=[0], ends=fluid.layers.shape(mask)[1])
length = fluid.layers.reduce_sum(mask_slice, dim=1, keep_dim=True) * shape[-1]
# l2 norm
delta = fluid.layers.uniform_random_batch_size_like(mask, shape=fake.shape, min=-1.0, max=1.0)
delta = fluid.layers.slice(delta, axes=[1], starts=[0], ends=real_seq_len)
delta = delta * mask_slice
mag = adv_init_mag / fluid.layers.sqrt(length)
delta = delta * mag
return delta
if is_train:
text_emb_shape = [-1, args.max_seq_len, config['hidden_size']]
text_delta = init_delta(src_ids, t_mask, text_emb_shape, name='text')
image_emb_shape = [-1, args.max_img_len, config['image_embedding_size']]
image_delta = init_delta(image_embedding, v_mask, image_emb_shape, name='img')
else:
text_delta, image_delta = None, None
def pgd_with_l2(loss, delta):
# grad
delta_grad = fluid.backward.gradients(loss, delta)[0]
# l2 norm
delta_norm = fluid.layers.sqrt(fluid.layers.reduce_sum(fluid.layers.pow(fluid.layers.reshape(delta_grad, \
[fluid.layers.shape(delta_grad)[0], -1]), factor=2), dim=1, keep_dim=True))
delta_norm = fluid.layers.clamp(delta_norm, min=float(1e-8))
# pgd
delta = delta + adv_lr * delta_grad / delta_norm
# projection
if adv_max_norm > 0:
exceed_mask = (delta_norm > adv_max_norm).astype('float32')
reweights = (adv_max_norm / delta_norm) * exceed_mask + (1 - exceed_mask)
delta = delta * reweights
delta_grad.stop_gradient=True
return delta
loss = None
for iter in range(adv_step):
vl_pure = UNIMOModel(
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
vl_text = UNIMOModel(
text_adv_delta=text_delta,
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
vl_image = UNIMOModel(
image_adv_delta=image_delta,
emb_ids=emb_ids,
input_mask=input_mask,
config=config,
image_input=image_input,
weight_sharing=args.weight_sharing
)
h_pure_text, h_pure_image = vl_pure.get_pooled_output()
h_text_text, h_text_image = vl_text.get_pooled_output()
h_image_text, h_image_image = vl_image.get_pooled_output()
loss_pure, logit_pure, probs_pure = get_loss_and_logits(h_pure_text, h_pure_image)
loss_text, logit_text, probs_text = get_loss_and_logits(h_text_text, h_text_image)
loss_image, logit_image, probs_image = get_loss_and_logits(h_image_text, h_image_image)
if is_train:
text_delta = pgd_with_l2(loss_text, text_delta)
image_delta = pgd_with_l2(loss_image, image_delta)
kl_adv_text_loss = kl_divergence_with_logits(logit_pure, logit_text)
kl_adv_image_loss = kl_divergence_with_logits(logit_pure, logit_image)
cur_loss = loss_pure + loss_text + loss_image + kl_adv_text_loss + kl_adv_image_loss
loss = cur_loss if loss is None else loss + cur_loss
num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(input=probs_pure, label=labels, total=num_seqs)
graph_vars = {
"loss": loss,
"probs": probs_pure,
"accuracy": accuracy,
"labels": labels,
"num_seqs": num_seqs
}
for k, v in graph_vars.items():
v.persistable = False
return pyreader, graph_vars
def evaluate(args, exe, test_pyreader, graph_vars, eval_phase, dev_count=1, gpu_id=0):
"""evaluate"""
all_mat = []
test_pyreader.start()
time_begin = time.time()
fetch_list = [graph_vars["probs"].name, graph_vars["labels"].name]
while True:
try:
np_probs, np_labels = exe.run(fetch_list=fetch_list)
np_preds = np.argmax(np_probs, axis=1).reshape((-1, 1))
np_labels = np_labels.reshape((-1, 1))
mat = np.concatenate([np_preds, np_labels], axis=1)
all_mat.extend(mat.tolist())
except fluid.core.EOFException:
test_pyreader.reset()
break
all_mat = np.array(all_mat)
time_end = time.time()
save_file = "%s/%s.trainers_%d.part_%d.npy" % (args.eval_dir, eval_phase, dev_count, gpu_id)
np.save(save_file, all_mat)
tmp_file = "%s/%s.trainers_%d.part_%d.finish" % (args.eval_dir, eval_phase, dev_count, gpu_id)
tmp_writer = open(tmp_file, "w")
tmp_writer.close()
if gpu_id == 0:
while True:
ret = os.popen('find %s -maxdepth 1 -name "%s.trainers_%d.part_*.finish"' %
(args.eval_dir, eval_phase, dev_count)).readlines()
if len(ret) != dev_count:
time.sleep(1)
continue
else:
break
all_mats = []
save_files = glob.glob("%s/%s.trainers_%d.part_*.npy" % (args.eval_dir, eval_phase, dev_count))
for cur_save_file in save_files:
mat = np.load(cur_save_file).tolist()
all_mats.extend(mat)
all_mats = np.array(all_mats)
cur_time = str(int(time.time()))
os.system("mkdir %s/%s" % (args.eval_dir, cur_time))
os.system("mv %s/%s.trainers_%d.* %s/%s" % (args.eval_dir, eval_phase, dev_count, args.eval_dir, cur_time))
ret = OrderedDict()
ret['phase'] = eval_phase
ret['loss'] = -1
ret['data_num'] = all_mats.shape[0]
ret['used_time'] = round(time_end - time_begin, 4)
metrics = OrderedDict()
metrics["simple_accuracy"] = glue_eval.simple_accuracy
if args.eval_mertrics in metrics:
ret_metric = metrics[args.eval_mertrics](all_mats[:, 0], all_mats[:, 1])
ret.update(ret_metric)
print_eval_log(ret)
else:
raise ValueError('unsupported metric {}'.format(args.eval_mertrics))
return ret
else:
return None
|
pytorch_translate/models/__init__.py | Ayansam1152/translate | 748 | 5451 | <reponame>Ayansam1152/translate
#!/usr/bin/env python3
import importlib
import os
# automatically import any Python files in the models/ directory
for file in sorted(os.listdir(os.path.dirname(__file__))):
if file.endswith(".py") and not file.startswith("_"):
model_name = file[: file.find(".py")]
importlib.import_module("pytorch_translate.models." + model_name)
|
keras/models.py | kalyc/keras-apache-mxnet | 300 | 5455 | <filename>keras/models.py
"""Model-related utilities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import backend as K
from .utils.generic_utils import has_arg
from .utils.generic_utils import to_list
from .engine.input_layer import Input
from .engine.input_layer import InputLayer
from .engine.training import Model
from .engine.sequential import Sequential
from .engine.saving import save_model
from .engine.saving import load_model
from .engine.saving import model_from_config
from .engine.saving import model_from_yaml
from .engine.saving import model_from_json
from .engine.saving import save_mxnet_model
try:
import h5py
except ImportError:
h5py = None
def _clone_functional_model(model, input_tensors=None):
"""Clone a functional `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Model`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Model):
raise ValueError('Expected `model` argument '
'to be a `Model` instance, got ', model)
if isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a functional `Model` instance, '
'got a `Sequential` instance instead:', model)
layer_map = {} # Cache for created layers.
tensor_map = {} # Map {reference_tensor: (corresponding_tensor, mask)}
if input_tensors is None:
# Create placeholders to build the model on top of.
input_layers = []
input_tensors = []
for layer in model._input_layers:
input_tensor = Input(batch_shape=layer.batch_input_shape,
dtype=layer.dtype,
sparse=layer.sparse,
name=layer.name)
input_tensors.append(input_tensor)
# Cache newly created input layer.
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[layer] = newly_created_input_layer
for _original, _cloned in zip(model._input_layers, input_layers):
layer_map[_original] = _cloned
else:
# Make sure that all input tensors come from a Keras layer.
# If tensor comes from an input layer: cache the input layer.
input_tensors = to_list(input_tensors)
_input_tensors = []
for i, x in enumerate(input_tensors):
if not K.is_keras_tensor(x):
name = model._input_layers[i].name
input_tensor = Input(tensor=x,
name='input_wrapper_for_' + name)
_input_tensors.append(input_tensor)
# Cache newly created input layer.
original_input_layer = x._keras_history[0]
newly_created_input_layer = input_tensor._keras_history[0]
layer_map[original_input_layer] = newly_created_input_layer
else:
_input_tensors.append(x)
input_tensors = _input_tensors
for x, y in zip(model.inputs, input_tensors):
tensor_map[x] = (y, None) # tensor, mask
# Iterated over every node in the reference model, in depth order.
depth_keys = list(model._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = model._nodes_by_depth[depth]
for node in nodes:
# Recover the corresponding layer.
layer = node.outbound_layer
# Get or create layer.
if layer not in layer_map:
# Clone layer.
new_layer = layer.__class__.from_config(layer.get_config())
layer_map[layer] = new_layer
layer = new_layer
else:
# Reuse previously cloned layer.
layer = layer_map[layer]
# Don't call InputLayer multiple times.
if isinstance(layer, InputLayer):
continue
# Gather inputs to call the new layer.
reference_input_tensors = node.input_tensors
reference_output_tensors = node.output_tensors
# If all previous input tensors are available in tensor_map,
# then call node.inbound_layer on them.
computed_data = [] # List of tuples (input, mask).
for x in reference_input_tensors:
if x in tensor_map:
computed_data.append(tensor_map[x])
if len(computed_data) == len(reference_input_tensors):
# Call layer.
if node.arguments:
kwargs = node.arguments
else:
kwargs = {}
if len(computed_data) == 1:
computed_tensor, computed_mask = computed_data[0]
if has_arg(layer.call, 'mask'):
if 'mask' not in kwargs:
kwargs['mask'] = computed_mask
output_tensors = to_list(
layer(computed_tensor, **kwargs))
output_masks = to_list(
layer.compute_mask(computed_tensor,
computed_mask))
computed_tensors = [computed_tensor]
computed_masks = [computed_mask]
else:
computed_tensors = [x[0] for x in computed_data]
computed_masks = [x[1] for x in computed_data]
if has_arg(layer.call, 'mask'):
if 'mask' not in kwargs:
kwargs['mask'] = computed_masks
output_tensors = to_list(
layer(computed_tensors, **kwargs))
output_masks = to_list(
layer.compute_mask(computed_tensors,
computed_masks))
# Update tensor_map.
for x, y, mask in zip(reference_output_tensors,
output_tensors,
output_masks):
tensor_map[x] = (y, mask)
# Check that we did compute the model outputs,
# then instantiate a new model from inputs and outputs.
output_tensors = []
for x in model.outputs:
assert x in tensor_map, 'Could not compute output ' + str(x)
tensor, _ = tensor_map[x]
output_tensors.append(tensor)
return Model(input_tensors, output_tensors, name=model.name)
def _clone_sequential_model(model, input_tensors=None):
"""Clone a `Sequential` model instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Sequential`.
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Sequential` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if not isinstance(model, Sequential):
raise ValueError('Expected `model` argument '
'to be a `Sequential` model instance, '
'but got:', model)
def clone(layer):
return layer.__class__.from_config(layer.get_config())
layers = [clone(layer) for layer in model.layers]
if input_tensors is None:
return Sequential(layers=layers, name=model.name)
else:
if len(to_list(input_tensors)) != 1:
raise ValueError('To clone a `Sequential` model, we expect '
' at most one tensor '
'as part of `input_tensors`.')
x = to_list(input_tensors)[0]
if K.is_keras_tensor(x):
origin_layer = x._keras_history[0]
if isinstance(origin_layer, InputLayer):
return Sequential(layers=[origin_layer] + layers,
name=model.name)
else:
raise ValueError('Cannot clone a `Sequential` model on top '
'of a tensor that comes from a Keras layer '
'other than an `InputLayer`. '
'Use the functional API instead.')
input_tensor = Input(tensor=x,
name='input_wrapper_for_' + str(x.name))
input_layer = input_tensor._keras_history[0]
return Sequential(layers=[input_layer] + layers, name=model.name)
def clone_model(model, input_tensors=None):
"""Clone any `Model` instance.
Model cloning is similar to calling a model on new inputs,
except that it creates new layers (and thus new weights) instead
of sharing the weights of the existing layers.
# Arguments
model: Instance of `Model`
(could be a functional model or a Sequential model).
input_tensors: optional list of input tensors
to build the model upon. If not provided,
placeholders will be created.
# Returns
An instance of `Model` reproducing the behavior
of the original model, on top of new inputs tensors,
using newly instantiated weights.
# Raises
ValueError: in case of invalid `model` argument value.
"""
if isinstance(model, Sequential):
return _clone_sequential_model(model, input_tensors=input_tensors)
else:
return _clone_functional_model(model, input_tensors=input_tensors)
|
experimental/tracing/bin/diff_heap_profiler.py | BearerPipelineTest/catapult | 1,894 | 5476 | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import print_function
import argparse
import gzip
import json
import os
import shutil
import six
from six.moves import zip
_OUTPUT_DIR = 'output'
_OUTPUT_GRAPH_DIR = os.path.join(_OUTPUT_DIR, 'graph')
class Process(object):
def __init__(self):
self.pid = None
self.name = None
self.labels = None
self.types = {}
self.strings = {}
self.stackframes = {}
self.allocators = None
self.version = None
class Entry(object):
def __init__(self):
self.count = None
self.size = None
self.type = None
self.stackframe = None
class GraphDump(object):
def __init__(self):
self.pid = None
self.name = None
self.labels = None
self.heap = None
self.root = ''
self.leaks = ''
self.leak_stackframes = 0
self.leak_objects = 0
def OpenTraceFile(file_path, mode):
if file_path.endswith('.gz'):
return gzip.open(file_path, mode + 'b')
return open(file_path, mode + 't')
def FindMemoryDumps(filename):
processes = {}
with OpenTraceFile(filename, 'r') as f:
data = json.loads(f.read().decode('utf-8'))
for event in data['traceEvents']:
pid = event['pid']
if pid not in processes:
processes[pid] = Process()
processes[pid].pid = pid
process = processes[pid]
# Retrieve process informations.
if event['ph'] == 'M':
if event['name'] == 'process_name' and 'name' in event['args']:
process.name = event['args']['name']
if event['name'] == 'process_labels' and 'labels' in event['args']:
process.labels = event['args']['labels']
if event['name'] == 'typeNames':
process.types = {}
for type_id, t in six.iteritems(event['args']['typeNames']):
process.types[int(type_id)] = t
if event['name'] == 'stackFrames':
process.stackframes = {}
for stack_id, s in six.iteritems(event['args']['stackFrames']):
new_stackframe = {}
new_stackframe['name'] = s['name']
if 'parent' in s:
new_stackframe['parent'] = int(s['parent'])
process.stackframes[int(stack_id)] = new_stackframe
# Look for a detailed memory dump event.
if not ((event['name'] == 'periodic_interval' or
event['name'] == 'explicitly_triggered') and
event['args']['dumps']['level_of_detail'] == 'detailed'):
continue
# Check for a memory dump V1.
if u'heaps' in event['args']['dumps']:
# Get the first memory dump.
if not process.allocators:
process.version = 1
process.allocators = event['args']['dumps']['heaps']
# Check for a memory dump V2.
# See format: [chromium] src/base/trace_event/heap_profiler_event_writer.h
if u'heaps_v2' in event['args']['dumps']:
# Memory dump format V2 is dumping information incrementally. Update
# the cumulated indexes.
maps = event['args']['dumps']['heaps_v2']['maps']
for string in maps['strings']:
process.strings[string['id']] = string['string']
for node in maps['nodes']:
node_v1 = {}
node_v1['name'] = process.strings[node['name_sid']]
if 'parent' in node:
node_v1['parent'] = node['parent']
process.stackframes[node['id']] = node_v1
for t in maps['types']:
process.types[t['id']] = process.strings[t['name_sid']]
# Get the first memory dump.
if not process.allocators:
dump = event['args']['dumps']
process.version = 2
process.allocators = dump['heaps_v2']['allocators']
# Remove processes with incomplete memory dump.
for pid, process in processes.items():
if not (process.allocators and process.stackframes and process.types):
del processes[pid]
return processes
def ResolveMemoryDumpFields(entries, stackframes, types):
def ResolveStackTrace(stack_id, stackframes):
stackframe = stackframes[stack_id]
tail = ()
if 'parent' in stackframe:
tail = ResolveStackTrace(stackframe['parent'], stackframes)
name = stackframe['name'].replace('\r', '').replace('\n', '')
return (name,) + tail
def ResolveType(type_id, types):
return types[type_id]
for entry in entries:
# Stackframe may be -1 (18446744073709551615L) when not stackframe are
# available.
if entry.stackframe not in stackframes:
entry.stackframe = []
else:
entry.stackframe = ResolveStackTrace(entry.stackframe, stackframes)
entry.type = ResolveType(entry.type, types)
def IncrementHeapEntry(stack, count, size, typename, root):
if not stack:
root['count'] += count
root['size'] += size
if typename not in root['count_by_type']:
root['count_by_type'][typename] = 0
root['count_by_type'][typename] += count
else:
top = stack[-1]
tail = stack[:-1]
if top not in root['children']:
new_node = {}
new_node['count'] = 0
new_node['size'] = 0
new_node['children'] = {}
new_node['count_by_type'] = {}
root['children'][top] = new_node
IncrementHeapEntry(tail, count, size, typename, root['children'][top])
def CanonicalHeapEntries(root):
total_count = 0
total_size = 0
for child in six.itervalues(root['children']):
total_count += child['count']
total_size += child['size']
root['count'] -= total_count
root['size'] -= total_size
for typename in root['count_by_type']:
total_count_for_type = 0
for child in six.itervalues(root['children']):
if typename in child['count_by_type']:
total_count_for_type += child['count_by_type'][typename]
root['count_by_type'][typename] -= total_count_for_type
for child in six.itervalues(root['children']):
CanonicalHeapEntries(child)
def FindLeaks(root, stack, leaks, threshold, size_threshold):
for frame in root['children']:
FindLeaks(root['children'][frame], [frame] + stack, leaks, threshold,
size_threshold)
if root['count'] > threshold and root['size'] > size_threshold:
leaks.append({'count': root['count'],
'size': root['size'],
'count_by_type': root['count_by_type'],
'stackframes': stack})
def DumpTree(root, frame, output, threshold, size_threshold):
output.write('\n{ \"name\": \"%s\",' % frame)
if root['count'] > threshold and root['count'] > size_threshold:
output.write(' \"size\": \"%s\",' % root['size'])
output.write(' \"count\": \"%s\",' % root['count'])
output.write(' \"children\": [')
is_first = True
for child_frame, child in root['children'].items():
if is_first:
is_first = False
else:
output.write(',')
DumpTree(child, child_frame, output, threshold, size_threshold)
output.write(']')
output.write('}')
def GetEntries(heap, process):
"""
Returns all entries in a heap, after filtering out unknown entries, and doing
some post processing to extract the relevant fields.
"""
if not process:
return []
entries = []
if process.version == 1:
for raw_entry in process.allocators[heap]['entries']:
# Cumulative sizes and types are skipped. see:
# https://chromium.googlesource.com/chromium/src/+/a990af190304be5bf38b120799c594df5a293518/base/trace_event/heap_profiler_heap_dump_writer.cc#294
if 'type' not in raw_entry or not raw_entry['bt']:
continue
entry = Entry()
entry.count = int(raw_entry['count'], 16)
entry.size = int(raw_entry['size'], 16)
entry.type = int(raw_entry['type'])
entry.stackframe = int(raw_entry['bt'])
entries.append(entry)
elif process.version == 2:
raw_entries = list(zip(process.allocators[heap]['counts'],
process.allocators[heap]['sizes'],
process.allocators[heap]['types'],
process.allocators[heap]['nodes']))
for (raw_count, raw_size, raw_type, raw_stackframe) in raw_entries:
entry = Entry()
entry.count = raw_count
entry.size = raw_size
entry.type = raw_type
entry.stackframe = raw_stackframe
entries.append(entry)
# Resolve fields by looking into indexes
ResolveMemoryDumpFields(entries, process.stackframes, process.types)
return entries
def FilterProcesses(processes, filter_by_name, filter_by_labels):
remaining_processes = {}
for pid, process in six.iteritems(processes):
if filter_by_name and process.name != filter_by_name:
continue
if (filter_by_labels and
(not process.labels or filter_by_labels not in process.labels)):
continue
remaining_processes[pid] = process
return remaining_processes
def FindRelevantProcesses(start_trace, end_trace,
filter_by_name,
filter_by_labels,
match_by_labels):
# Retrieve the processes and the associated memory dump.
end_processes = FindMemoryDumps(end_trace)
end_processes = FilterProcesses(end_processes, filter_by_name,
filter_by_labels)
start_processes = None
if start_trace:
start_processes = FindMemoryDumps(start_trace)
start_processes = FilterProcesses(start_processes, filter_by_name,
filter_by_labels)
# Build a sequence of pair of processes to be compared.
processes = []
if not start_processes:
# Only keep end-processes.
for _, end_process in six.iteritems(end_processes):
processes.append((None, end_process))
elif match_by_labels:
# Processes are paired based on name/labels.
for _, end_process in six.iteritems(end_processes):
matching_start_process = None
for _, start_process in six.iteritems(start_processes):
if (start_process.name == end_process.name and
(start_process.name in ['Browser', 'GPU'] or
start_process.labels == end_process.labels)):
matching_start_process = start_process
if matching_start_process:
processes.append((matching_start_process, end_process))
else:
# Processes are paired based on their PID.
relevant_pids = set(end_processes.keys()) & set(start_processes.keys())
for pid in relevant_pids:
start_process = start_processes[pid]
end_process = end_processes[pid]
processes.append((start_process, end_process))
return processes
def BuildGraphDumps(processes, threshold, size_threshold):
"""
Build graph for a sequence of pair of processes.
If start_process is None, counts objects in end_trace.
Otherwise, counts objects present in end_trace, but not in start_process.
"""
graph_dumps = []
for (start_process, end_process) in processes:
pid = end_process.pid
name = end_process.name if end_process.name else ''
labels = end_process.labels if end_process.labels else ''
print('Process[%d] %s: %s' % (pid, name, labels))
for heap in end_process.allocators:
start_entries = GetEntries(heap, start_process)
end_entries = GetEntries(heap, end_process)
graph = GraphDump()
graph.pid = pid
graph.name = name
graph.labels = labels
graph.heap = heap
graph_dumps.append(graph)
# Do the math: diffing start and end memory dumps.
root = {}
root['count'] = 0
root['size'] = 0
root['children'] = {}
root['count_by_type'] = {}
for entry in start_entries:
if entry.type:
IncrementHeapEntry(entry.stackframe, - entry.count, - entry.size,
entry.type, root)
for entry in end_entries:
if entry.type:
IncrementHeapEntry(entry.stackframe, entry.count, entry.size,
entry.type, root)
CanonicalHeapEntries(root)
graph.root = root
# Find leaks
leaks = []
FindLeaks(root, [], leaks, threshold, size_threshold)
leaks.sort(reverse=True, key=lambda k: k['size'])
if leaks:
print(' %s: %d potential leaks found.' % (heap, len(leaks)))
graph.leaks = leaks
graph.leak_stackframes = len(leaks)
for leak in leaks:
graph.leak_objects += leak['count']
return graph_dumps
def WritePotentialLeaks(graph_dumps):
for graph in graph_dumps:
if graph.leaks:
filename = 'process_%d_%s-leaks.json' % (graph.pid, graph.heap)
output_filename = os.path.join(_OUTPUT_DIR, filename)
with open(output_filename, 'w') as output:
json.dump(graph.leaks, output)
def WriteGrahDumps(graph_dumps, threshold, size_threshold):
for graph in graph_dumps:
# Dump the remaining allocated objects tree.
filename = 'process_%d_%s-objects.json' % (graph.pid, graph.heap)
output_filename = os.path.join(_OUTPUT_GRAPH_DIR, filename)
if graph.root:
with open(output_filename, 'w') as output:
DumpTree(graph.root, '.', output, threshold, size_threshold)
graph.root = filename
def WriteIndex(graph_dumps):
output_filename = os.path.join(_OUTPUT_GRAPH_DIR, 'index.json')
with open(output_filename, 'w') as output:
json.dump([
{'pid': graph.pid,
'heap': graph.heap,
'name': graph.name,
'labels': graph.labels,
'objects': graph.root,
'potential leaks': graph.leak_stackframes,
'objects leaked': graph.leak_objects,
}
for graph in graph_dumps], output)
def WriteHTML():
# Copy the HTML page.
source = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'diff_heap_profiler.html')
destination = os.path.join(_OUTPUT_GRAPH_DIR, 'index.html')
shutil.copyfile(source, destination)
# Copy the D3 library file.
source = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir,
os.path.pardir,
os.path.pardir,
'tracing',
'third_party',
'd3',
'd3.min.js')
destination = os.path.join(_OUTPUT_GRAPH_DIR, 'd3.min.js')
shutil.copyfile(source, destination)
def Main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--flame-graph',
action='store_true',
help='Output a flame graph based on stackframe allocations')
parser.add_argument(
'--threshold',
type=int,
default=0,
help='Objects threshold for being a potential memory leak')
parser.add_argument(
'--size-threshold',
type=int,
default=0,
help='Size threshold for being a potential memory leak')
parser.add_argument(
'--filter-by-name',
type=str,
help='Only keep processes with name (i.e. Browser, Renderer, ...)')
parser.add_argument(
'--filter-by-labels',
type=str,
help='Only keep processes with matching labels')
parser.add_argument(
'--match-by-labels',
action='store_true',
help='Match processes between runs by labels')
parser.add_argument(
'trace',
nargs='+',
help='Trace files to be processed')
options = parser.parse_args()
if options.threshold == 0 and options.size_threshold == 0:
options.threshold = 1000
if len(options.trace) == 1:
end_trace = options.trace[0]
start_trace = None
else:
start_trace = options.trace[0]
end_trace = options.trace[1]
if not os.path.exists(_OUTPUT_DIR):
os.makedirs(_OUTPUT_DIR)
# Find relevant processes to be processed.
processes = FindRelevantProcesses(start_trace, end_trace,
options.filter_by_name,
options.filter_by_labels,
options.match_by_labels)
graph_dumps = BuildGraphDumps(processes, options.threshold,
options.size_threshold)
WritePotentialLeaks(graph_dumps)
if options.flame_graph:
if not os.path.exists(_OUTPUT_GRAPH_DIR):
os.makedirs(_OUTPUT_GRAPH_DIR)
WriteGrahDumps(graph_dumps, options.threshold, options.size_threshold)
WriteIndex(graph_dumps)
WriteHTML()
if __name__ == '__main__':
Main()
|
differential_privacy/run_federated.py | HanGuo97/federated | 330 | 5518 | # Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs federated training with differential privacy on various tasks."""
import functools
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
from utils import task_utils
from utils import training_utils
from utils import utils_impl
from utils.optimizers import optimizer_utils
with utils_impl.record_hparam_flags() as optimizer_flags:
# Defining optimizer flags
optimizer_utils.define_optimizer_flags('client')
optimizer_utils.define_optimizer_flags('server')
with utils_impl.record_hparam_flags() as shared_flags:
# Federated training hyperparameters
flags.DEFINE_integer('client_epochs_per_round', 1,
'Number of epochs in the client to take per round.')
flags.DEFINE_integer('client_batch_size', 20, 'Batch size on the clients.')
flags.DEFINE_integer('clients_per_round', 10,
'How many clients to sample per round.')
flags.DEFINE_integer('client_datasets_random_seed', 1,
'Random seed for client sampling.')
flags.DEFINE_integer(
'max_elements_per_client', None, 'Maximum number of '
'elements for each training client. If set to None, all '
'available examples are used.')
# Training loop configuration
flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')
flags.DEFINE_string(
'experiment_name', None, 'The name of this experiment. Will be append to '
'--root_output_dir to separate experiment results.')
flags.DEFINE_string('root_output_dir', '/tmp/fed_opt/',
'Root directory for writing experiment output.')
flags.DEFINE_integer(
'rounds_per_eval', 1,
'How often to evaluate the global model on the validation dataset.')
flags.DEFINE_integer(
'num_validation_examples', -1, 'The number of validation'
'examples to use. If set to -1, all available examples '
'are used.')
flags.DEFINE_integer('rounds_per_checkpoint', 50,
'How often to checkpoint the global model.')
with utils_impl.record_hparam_flags() as dp_flags:
# Differential privacy flags
flags.DEFINE_float(
'clip', None, 'Clip value for fixed clipping or initial clip for '
'adaptive clipping. If None, no clipping is used.')
flags.DEFINE_float('noise_multiplier', None,
'Noise multiplier. If None, non-DP aggregator is used.')
flags.DEFINE_float(
'adaptive_clip_learning_rate', None, 'Adaptive clip learning rate. If '
'None, clip adaptation is not used.')
flags.DEFINE_float('target_unclipped_quantile', 0.5,
'Target unclipped quantile.')
flags.DEFINE_boolean('uniform_weighting', False,
'Whether to weigh clients uniformly.')
# Task specification
with utils_impl.record_hparam_flags() as task_flags:
task_utils.define_task_flags()
FLAGS = flags.FLAGS
def _write_hparam_flags():
"""Returns an ordered dictionary of pertinent hyperparameter flags."""
hparam_dict = utils_impl.lookup_flag_values(shared_flags)
# Update with optimizer flags corresponding to the chosen optimizers.
opt_flag_dict = utils_impl.lookup_flag_values(optimizer_flags)
opt_flag_dict = optimizer_utils.remove_unused_flags('client', opt_flag_dict)
opt_flag_dict = optimizer_utils.remove_unused_flags('server', opt_flag_dict)
hparam_dict.update(opt_flag_dict)
# Update with task flags
task_flag_dict = utils_impl.lookup_flag_values(task_flags)
hparam_dict.update(task_flag_dict)
training_utils.write_hparams_to_csv(hparam_dict, FLAGS.root_output_dir,
FLAGS.experiment_name)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Expected no command-line arguments, '
'got: {}'.format(argv))
client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('client')
server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('server')
train_client_spec = tff.simulation.baselines.ClientSpec(
num_epochs=FLAGS.client_epochs_per_round,
batch_size=FLAGS.client_batch_size,
max_elements=FLAGS.max_elements_per_client)
task = task_utils.create_task_from_flags(train_client_spec)
logging.info('Trainable weights:')
for weight in task.model_fn().trainable_variables:
logging.info('name: %s shape: %s', weight.name, weight.shape)
if FLAGS.uniform_weighting:
client_weighting = tff.learning.ClientWeighting.UNIFORM
elif FLAGS.task == 'shakespeare_character' or FLAGS.task == 'stackoverflow_word':
def client_weighting(local_outputs):
return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)
else:
client_weighting = None
if FLAGS.noise_multiplier is None:
if FLAGS.uniform_weighting:
aggregation_factory = tff.aggregators.UnweightedMeanFactory()
else:
aggregation_factory = tff.aggregators.MeanFactory()
if FLAGS.clip is not None:
if FLAGS.clip <= 0:
raise ValueError('clip must be positive if clipping is enabled.')
if FLAGS.adaptive_clip_learning_rate is None:
clip = FLAGS.clip
else:
if FLAGS.adaptive_clip_learning_rate <= 0:
raise ValueError('adaptive_clip_learning_rate must be positive if '
'adaptive clipping is enabled.')
clip = tff.aggregators.PrivateQuantileEstimationProcess.no_noise(
initial_estimate=FLAGS.clip,
target_quantile=FLAGS.target_unclipped_quantile,
learning_rate=FLAGS.adaptive_clip_learning_rate)
aggregation_factory = tff.aggregators.clipping_factory(
clip, aggregation_factory)
else:
if not FLAGS.uniform_weighting:
raise ValueError(
'Differential privacy is only implemented for uniform weighting.')
if FLAGS.noise_multiplier <= 0:
raise ValueError('noise_multiplier must be positive if DP is enabled.')
if FLAGS.clip is None or FLAGS.clip <= 0:
raise ValueError('clip must be positive if DP is enabled.')
if FLAGS.adaptive_clip_learning_rate is None:
aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_fixed(
noise_multiplier=FLAGS.noise_multiplier,
clients_per_round=FLAGS.clients_per_round,
clip=FLAGS.clip)
else:
if FLAGS.adaptive_clip_learning_rate <= 0:
raise ValueError('adaptive_clip_learning_rate must be positive if '
'adaptive clipping is enabled.')
aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive(
noise_multiplier=FLAGS.noise_multiplier,
clients_per_round=FLAGS.clients_per_round,
initial_l2_norm_clip=FLAGS.clip,
target_unclipped_quantile=FLAGS.target_unclipped_quantile,
learning_rate=FLAGS.adaptive_clip_learning_rate)
iterative_process = tff.learning.build_federated_averaging_process(
model_fn=task.model_fn,
server_optimizer_fn=server_optimizer_fn,
client_weighting=client_weighting,
client_optimizer_fn=client_optimizer_fn,
model_update_aggregation_factory=aggregation_factory)
train_data = task.datasets.train_data.preprocess(
task.datasets.train_preprocess_fn)
training_process = (
tff.simulation.compose_dataset_computation_with_iterative_process(
train_data.dataset_computation, iterative_process))
training_selection_fn = functools.partial(
tff.simulation.build_uniform_sampling_fn(
train_data.client_ids, random_seed=FLAGS.client_datasets_random_seed),
size=FLAGS.clients_per_round)
test_data = task.datasets.get_centralized_test_data()
validation_data = test_data.take(FLAGS.num_validation_examples)
federated_eval = tff.learning.build_federated_evaluation(task.model_fn)
evaluation_selection_fn = lambda round_num: [validation_data]
def evaluation_fn(state, evaluation_data):
return federated_eval(state.model, evaluation_data)
program_state_manager, metrics_managers = training_utils.create_managers(
FLAGS.root_output_dir, FLAGS.experiment_name)
_write_hparam_flags()
state = tff.simulation.run_training_process(
training_process=training_process,
training_selection_fn=training_selection_fn,
total_rounds=FLAGS.total_rounds,
evaluation_fn=evaluation_fn,
evaluation_selection_fn=evaluation_selection_fn,
rounds_per_evaluation=FLAGS.rounds_per_eval,
program_state_manager=program_state_manager,
rounds_per_saving_program_state=FLAGS.rounds_per_checkpoint,
metrics_managers=metrics_managers)
test_metrics = federated_eval(state.model, [test_data])
for metrics_manager in metrics_managers:
metrics_manager.release(test_metrics, FLAGS.total_rounds + 1)
if __name__ == '__main__':
app.run(main)
|
designate-8.0.0/designate/tests/test_api/test_v2/test_limits.py | scottwedge/OpenStack-Stein | 145 | 5520 | <reponame>scottwedge/OpenStack-Stein
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from designate.tests.test_api.test_v2 import ApiV2TestCase
class ApiV2LimitsTest(ApiV2TestCase):
def test_get_limits(self):
response = self.client.get('/limits/')
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('max_zones', response.json)
self.assertIn('max_zone_records', response.json)
self.assertIn('max_zone_recordsets',
response.json)
self.assertIn('max_recordset_records',
response.json)
self.assertIn('min_ttl', response.json)
self.assertIn('max_zone_name_length',
response.json)
self.assertIn('max_recordset_name_length',
response.json)
self.assertIn('max_page_limit',
response.json)
absolutelimits = response.json
self.assertEqual(cfg.CONF.quota_zones, absolutelimits['max_zones'])
self.assertEqual(cfg.CONF.quota_zone_records,
absolutelimits['max_zone_recordsets'])
self.assertEqual(cfg.CONF['service:central'].min_ttl,
absolutelimits['min_ttl'])
self.assertEqual(cfg.CONF['service:central'].max_zone_name_len,
absolutelimits['max_zone_name_length'])
self.assertEqual(cfg.CONF['service:central'].max_recordset_name_len,
absolutelimits['max_recordset_name_length'])
self.assertEqual(cfg.CONF['service:api'].max_limit_v2,
absolutelimits['max_page_limit'])
|
tests/test_sync_module.py | naveengh6/blinkpy | 272 | 5531 | <reponame>naveengh6/blinkpy
"""Tests camera and system functions."""
import unittest
from unittest import mock
from blinkpy.blinkpy import Blink
from blinkpy.helpers.util import BlinkURLHandler
from blinkpy.sync_module import BlinkSyncModule, BlinkOwl
from blinkpy.camera import BlinkCamera, BlinkCameraMini
@mock.patch("blinkpy.auth.Auth.query")
class TestBlinkSyncModule(unittest.TestCase):
"""Test BlinkSyncModule functions in blinkpy."""
def setUp(self):
"""Set up Blink module."""
self.blink = Blink(motion_interval=0)
self.blink.last_refresh = 0
self.blink.urls = BlinkURLHandler("test")
self.blink.sync["test"] = BlinkSyncModule(self.blink, "test", "1234", [])
self.camera = BlinkCamera(self.blink.sync)
self.mock_start = [
{
"syncmodule": {
"id": 1234,
"network_id": 5678,
"serial": "12345678",
"status": "foobar",
}
},
{"event": True},
{},
{},
None,
{"devicestatus": {}},
]
self.blink.sync["test"].network_info = {"network": {"armed": True}}
def tearDown(self):
"""Clean up after test."""
self.blink = None
self.camera = None
self.mock_start = None
def test_bad_status(self, mock_resp):
"""Check that we mark module unavaiable on bad status."""
self.blink.sync["test"].status = None
self.blink.sync["test"].available = True
self.assertFalse(self.blink.sync["test"].online)
self.assertFalse(self.blink.sync["test"].available)
def test_bad_arm(self, mock_resp):
"""Check that we mark module unavaiable if bad arm status."""
self.blink.sync["test"].network_info = None
self.blink.sync["test"].available = True
self.assertEqual(self.blink.sync["test"].arm, None)
self.assertFalse(self.blink.sync["test"].available)
self.blink.sync["test"].network_info = {}
self.blink.sync["test"].available = True
self.assertEqual(self.blink.sync["test"].arm, None)
self.assertFalse(self.blink.sync["test"].available)
def test_get_events(self, mock_resp):
"""Test get events function."""
mock_resp.return_value = {"event": True}
self.assertEqual(self.blink.sync["test"].get_events(), True)
def test_get_events_fail(self, mock_resp):
"""Test handling of failed get events function."""
mock_resp.return_value = None
self.assertFalse(self.blink.sync["test"].get_events())
mock_resp.return_value = {}
self.assertFalse(self.blink.sync["test"].get_events())
def test_get_camera_info(self, mock_resp):
"""Test get camera info function."""
mock_resp.return_value = {"camera": ["foobar"]}
self.assertEqual(self.blink.sync["test"].get_camera_info("1234"), "foobar")
def test_get_camera_info_fail(self, mock_resp):
"""Test handling of failed get camera info function."""
mock_resp.return_value = None
self.assertEqual(self.blink.sync["test"].get_camera_info("1"), {})
mock_resp.return_value = {}
self.assertEqual(self.blink.sync["test"].get_camera_info("1"), {})
mock_resp.return_value = {"camera": None}
self.assertEqual(self.blink.sync["test"].get_camera_info("1"), {})
def test_get_network_info(self, mock_resp):
"""Test network retrieval."""
mock_resp.return_value = {"network": {"sync_module_error": False}}
self.assertTrue(self.blink.sync["test"].get_network_info())
mock_resp.return_value = {"network": {"sync_module_error": True}}
self.assertFalse(self.blink.sync["test"].get_network_info())
def test_get_network_info_failure(self, mock_resp):
"""Test failed network retrieval."""
mock_resp.return_value = {}
self.blink.sync["test"].available = True
self.assertFalse(self.blink.sync["test"].get_network_info())
self.assertFalse(self.blink.sync["test"].available)
self.blink.sync["test"].available = True
mock_resp.return_value = None
self.assertFalse(self.blink.sync["test"].get_network_info())
self.assertFalse(self.blink.sync["test"].available)
def test_check_new_videos_startup(self, mock_resp):
"""Test that check_new_videos does not block startup."""
sync_module = self.blink.sync["test"]
self.blink.last_refresh = None
self.assertFalse(sync_module.check_new_videos())
def test_check_new_videos(self, mock_resp):
"""Test recent video response."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1990-01-01T00:00:00+00:00",
}
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 0
self.assertEqual(sync_module.motion, {})
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(
sync_module.last_record["foo"],
{"clip": "/foo/bar.mp4", "time": "1990-01-01T00:00:00+00:00"},
)
self.assertEqual(sync_module.motion, {"foo": True})
mock_resp.return_value = {"media": []}
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": False})
self.assertEqual(
sync_module.last_record["foo"],
{"clip": "/foo/bar.mp4", "time": "1990-01-01T00:00:00+00:00"},
)
def test_check_new_videos_old_date(self, mock_resp):
"""Test videos return response with old date."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1970-01-01T00:00:00+00:00",
}
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 1000
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": False})
def test_check_no_motion_if_not_armed(self, mock_resp):
"""Test that motion detection is not set if module unarmed."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1990-01-01T00:00:00+00:00",
}
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 1000
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": True})
sync_module.network_info = {"network": {"armed": False}}
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": False})
def test_check_multiple_videos(self, mock_resp):
"""Test motion found even with multiple videos."""
mock_resp.return_value = {
"media": [
{
"device_name": "foo",
"media": "/foo/bar.mp4",
"created_at": "1970-01-01T00:00:00+00:00",
},
{
"device_name": "foo",
"media": "/bar/foo.mp4",
"created_at": "1990-01-01T00:00:00+00:00",
},
{
"device_name": "foo",
"media": "/foobar.mp4",
"created_at": "1970-01-01T00:00:01+00:00",
},
]
}
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.blink.last_refresh = 1000
self.assertTrue(sync_module.check_new_videos())
self.assertEqual(sync_module.motion, {"foo": True})
expected_result = {
"foo": {"clip": "/bar/foo.mp4", "time": "1990-01-01T00:00:00+00:00"}
}
self.assertEqual(sync_module.last_record, expected_result)
def test_check_new_videos_failed(self, mock_resp):
"""Test method when response is unexpected."""
mock_resp.side_effect = [None, "just a string", {}]
sync_module = self.blink.sync["test"]
sync_module.cameras = {"foo": None}
sync_module.motion["foo"] = True
self.assertFalse(sync_module.check_new_videos())
self.assertFalse(sync_module.motion["foo"])
sync_module.motion["foo"] = True
self.assertFalse(sync_module.check_new_videos())
self.assertFalse(sync_module.motion["foo"])
sync_module.motion["foo"] = True
self.assertFalse(sync_module.check_new_videos())
self.assertFalse(sync_module.motion["foo"])
def test_sync_start(self, mock_resp):
"""Test sync start function."""
mock_resp.side_effect = self.mock_start
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].name, "test")
self.assertEqual(self.blink.sync["test"].sync_id, 1234)
self.assertEqual(self.blink.sync["test"].network_id, 5678)
self.assertEqual(self.blink.sync["test"].serial, "12345678")
self.assertEqual(self.blink.sync["test"].status, "foobar")
def test_unexpected_summary(self, mock_resp):
"""Test unexpected summary response."""
self.mock_start[0] = None
mock_resp.side_effect = self.mock_start
self.assertFalse(self.blink.sync["test"].start())
def test_summary_with_no_network_id(self, mock_resp):
"""Test handling of bad summary."""
self.mock_start[0]["syncmodule"] = None
mock_resp.side_effect = self.mock_start
self.assertFalse(self.blink.sync["test"].start())
def test_summary_with_only_network_id(self, mock_resp):
"""Test handling of sparse summary."""
self.mock_start[0]["syncmodule"] = {"network_id": 8675309}
mock_resp.side_effect = self.mock_start
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].network_id, 8675309)
def test_unexpected_camera_info(self, mock_resp):
"""Test unexpected camera info response."""
self.blink.sync["test"].cameras["foo"] = None
self.mock_start[5] = None
mock_resp.side_effect = self.mock_start
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].cameras, {"foo": None})
def test_missing_camera_info(self, mock_resp):
"""Test missing key from camera info response."""
self.blink.sync["test"].cameras["foo"] = None
self.mock_start[5] = {}
self.blink.sync["test"].start()
self.assertEqual(self.blink.sync["test"].cameras, {"foo": None})
def test_sync_attributes(self, mock_resp):
"""Test sync attributes."""
self.assertEqual(self.blink.sync["test"].attributes["name"], "test")
self.assertEqual(self.blink.sync["test"].attributes["network_id"], "1234")
def test_owl_start(self, mock_resp):
"""Test owl camera instantiation."""
response = {
"name": "foo",
"id": 2,
"serial": "foobar123",
"enabled": True,
"network_id": 1,
"thumbnail": "/foo/bar",
}
self.blink.last_refresh = None
self.blink.homescreen = {"owls": [response]}
owl = BlinkOwl(self.blink, "foo", 1234, response)
self.assertTrue(owl.start())
self.assertTrue("foo" in owl.cameras)
self.assertEqual(owl.cameras["foo"].__class__, BlinkCameraMini)
|
python/flexflow_cffi_build.py | zmxdream/FlexFlow | 455 | 5561 | <filename>python/flexflow_cffi_build.py
#!/usr/bin/env python
# Copyright 2020 Stanford University, Los Alamos National Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import subprocess
def find_flexflow_header(ffhome_dir):
def try_prefix(prefix_dir):
flexflow_ch_path = os.path.join(prefix_dir, 'python', 'flexflow_c.h')
flexflow_cxxh_path = os.path.join(prefix_dir, 'include', 'model.h')
if os.path.exists(flexflow_ch_path) and os.path.exists(flexflow_cxxh_path):
flexflow_cxxh_dir = os.path.join(prefix_dir, 'include')
return flexflow_cxxh_dir, flexflow_ch_path
result = try_prefix(ffhome_dir)
if result:
return result
raise Exception('Unable to locate flexflow_c.h and flexflow.h header file')
def build(output_dir, libname, ffhome_dir):
flexflow_cxxh_dir, flexflow_ch_path = find_flexflow_header(ffhome_dir)
header = subprocess.check_output(['gcc', '-I', flexflow_cxxh_dir, '-E', '-P', flexflow_ch_path]).decode('utf-8')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'flexflow_cffi_header.py.in')) as f:
content = f.read()
content = content.format(header=repr(header), libname=repr(libname))
if output_dir is None:
output_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(output_dir, 'flexflow_cffi_header.py'), 'wb') as f:
f.write(content.encode('utf-8'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--ffhome-dir', required=True)
parser.add_argument('--libname', required=True)
parser.add_argument('--output-dir', required=False)
args = parser.parse_args()
build(args.output_dir, args.libname, args.ffhome_dir)
|
Python/Tests/TestData/ProjectHomeProjects/Subfolder/ProgramB.py | techkey/PTVS | 404 | 5591 | # ProgramB.py
print('Hello World')
|
ssod/utils/structure_utils.py | huimlight/SoftTeacher | 604 | 5596 | <reponame>huimlight/SoftTeacher<gh_stars>100-1000
import warnings
from collections import Counter, Mapping, Sequence
from numbers import Number
from typing import Dict, List
import numpy as np
import torch
from mmdet.core.mask.structures import BitmapMasks
from torch.nn import functional as F
_step_counter = Counter()
def list_concat(data_list: List[list]):
if isinstance(data_list[0], torch.Tensor):
return torch.cat(data_list)
else:
endpoint = [d for d in data_list[0]]
for i in range(1, len(data_list)):
endpoint.extend(data_list[i])
return endpoint
def sequence_concat(a, b):
if isinstance(a, Sequence) and isinstance(b, Sequence):
return a + b
else:
return None
def dict_concat(dicts: List[Dict[str, list]]):
return {k: list_concat([d[k] for d in dicts]) for k in dicts[0].keys()}
def dict_fuse(obj_list, reference_obj):
if isinstance(reference_obj, torch.Tensor):
return torch.stack(obj_list)
return obj_list
def dict_select(dict1: Dict[str, list], key: str, value: str):
flag = [v == value for v in dict1[key]]
return {
k: dict_fuse([vv for vv, ff in zip(v, flag) if ff], v) for k, v in dict1.items()
}
def dict_split(dict1, key):
group_names = list(set(dict1[key]))
dict_groups = {k: dict_select(dict1, key, k) for k in group_names}
return dict_groups
def dict_sum(a, b):
if isinstance(a, dict):
assert isinstance(b, dict)
return {k: dict_sum(v, b[k]) for k, v in a.items()}
elif isinstance(a, list):
assert len(a) == len(b)
return [dict_sum(aa, bb) for aa, bb in zip(a, b)]
else:
return a + b
def zero_like(tensor_pack, prefix=""):
if isinstance(tensor_pack, Sequence):
return [zero_like(t) for t in tensor_pack]
elif isinstance(tensor_pack, Mapping):
return {prefix + k: zero_like(v) for k, v in tensor_pack.items()}
elif isinstance(tensor_pack, torch.Tensor):
return tensor_pack.new_zeros(tensor_pack.shape)
elif isinstance(tensor_pack, np.ndarray):
return np.zeros_like(tensor_pack)
else:
warnings.warn("Unexpected data type {}".format(type(tensor_pack)))
return 0
def pad_stack(tensors, shape, pad_value=255):
tensors = torch.stack(
[
F.pad(
tensor,
pad=[0, shape[1] - tensor.shape[1], 0, shape[0] - tensor.shape[0]],
value=pad_value,
)
for tensor in tensors
]
)
return tensors
def result2bbox(result):
num_class = len(result)
bbox = np.concatenate(result)
if bbox.shape[0] == 0:
label = np.zeros(0, dtype=np.uint8)
else:
label = np.concatenate(
[[i] * len(result[i]) for i in range(num_class) if len(result[i]) > 0]
).reshape((-1,))
return bbox, label
def result2mask(result):
num_class = len(result)
mask = [np.stack(result[i]) for i in range(num_class) if len(result[i]) > 0]
if len(mask) > 0:
mask = np.concatenate(mask)
else:
mask = np.zeros((0, 1, 1))
return BitmapMasks(mask, mask.shape[1], mask.shape[2]), None
def sequence_mul(obj, multiplier):
if isinstance(obj, Sequence):
return [o * multiplier for o in obj]
else:
return obj * multiplier
def is_match(word, word_list):
for keyword in word_list:
if keyword in word:
return True
return False
def weighted_loss(loss: dict, weight, ignore_keys=[], warmup=0):
_step_counter["weight"] += 1
lambda_weight = (
lambda x: x * (_step_counter["weight"] - 1) / warmup
if _step_counter["weight"] <= warmup
else x
)
if isinstance(weight, Mapping):
for k, v in weight.items():
for name, loss_item in loss.items():
if (k in name) and ("loss" in name):
loss[name] = sequence_mul(loss[name], lambda_weight(v))
elif isinstance(weight, Number):
for name, loss_item in loss.items():
if "loss" in name:
if not is_match(name, ignore_keys):
loss[name] = sequence_mul(loss[name], lambda_weight(weight))
else:
loss[name] = sequence_mul(loss[name], 0.0)
else:
raise NotImplementedError()
return loss
|
dev/Tools/build/waf-1.7.13/waflib/extras/fc_xlf.py | jeikabu/lumberyard | 1,738 | 5598 | <gh_stars>1000+
#! /usr/bin/env python
# encoding: utf-8
# harald at klimachs.de
import re
from waflib import Utils,Errors
from waflib.Tools import fc,fc_config,fc_scan
from waflib.Configure import conf
from waflib.Tools.compiler_fc import fc_compiler
fc_compiler['aix'].insert(0, 'fc_xlf')
@conf
def find_xlf(conf):
"""Find the xlf program (will look in the environment variable 'FC')"""
fc = conf.find_program(['xlf2003_r', 'xlf2003', 'xlf95_r', 'xlf95', 'xlf90_r', 'xlf90', 'xlf_r', 'xlf'], var='FC')
fc = conf.cmd_to_list(fc)
conf.get_xlf_version(fc)
conf.env.FC_NAME='XLF'
@conf
def xlf_flags(conf):
v = conf.env
v['FCDEFINES_ST'] = '-WF,-D%s'
v['FCFLAGS_fcshlib'] = ['-qpic=small']
v['FCFLAGS_DEBUG'] = ['-qhalt=w']
v['LINKFLAGS_fcshlib'] = ['-Wl,-shared']
@conf
def xlf_modifier_platform(conf):
dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform()
xlf_modifier_func = getattr(conf, 'xlf_modifier_' + dest_os, None)
if xlf_modifier_func:
xlf_modifier_func()
@conf
def get_xlf_version(conf, fc):
"""Get the compiler version"""
cmd = fc + ['-qversion']
try:
out, err = conf.cmd_and_log(cmd, output=0)
except Errors.WafError:
conf.fatal('Could not find xlf %r' % cmd)
for v in (r"IBM XL Fortran.* V(?P<major>\d*)\.(?P<minor>\d*)",):
version_re = re.compile(v, re.I).search
match = version_re(out or err)
if match:
k = match.groupdict()
conf.env['FC_VERSION'] = (k['major'], k['minor'])
break
else:
conf.fatal('Could not determine the XLF version.')
def configure(conf):
conf.find_xlf()
conf.find_ar()
conf.fc_flags()
conf.fc_add_flags()
conf.xlf_flags()
conf.xlf_modifier_platform()
|
Codes/gracekoo/test.py | ghoslation/algorithm | 256 | 5606 | # -*- coding: utf-8 -*-
# @Time: 2020/11/8 23:47
# @Author: GraceKoo
# @File: test.py
# @Desc:
from threading import Thread
import time
def print_numbers():
time.sleep(0.2)
print("子线程结束")
if __name__ == "__main__":
t1 = Thread(target=print_numbers)
t1.setDaemon(True)
t1.start()
# print("主线程结束")
|
layers/gin_layer.py | JakeStevens/benchmarking-gnns | 275 | 5624 | <reponame>JakeStevens/benchmarking-gnns<gh_stars>100-1000
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (<NAME>, <NAME>, <NAME> and <NAME>, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
class GINLayer(nn.Module):
"""
[!] code adapted from dgl implementation of GINConv
Parameters
----------
apply_func : callable activation function/layer or None
If not None, apply this function to the updated node feature,
the :math:`f_\Theta` in the formula.
aggr_type :
Aggregator type to use (``sum``, ``max`` or ``mean``).
out_dim :
Rquired for batch norm layer; should match out_dim of apply_func if not None.
dropout :
Required for dropout of output features.
graph_norm :
boolean flag for output features normalization w.r.t. graph sizes.
batch_norm :
boolean flag for batch_norm layer.
residual :
boolean flag for using residual connection.
init_eps : optional
Initial :math:`\epsilon` value, default: ``0``.
learn_eps : bool, optional
If True, :math:`\epsilon` will be a learnable parameter.
"""
def __init__(self, apply_func, aggr_type, dropout, graph_norm, batch_norm, residual=False, init_eps=0, learn_eps=False):
super().__init__()
self.apply_func = apply_func
if aggr_type == 'sum':
self._reducer = fn.sum
elif aggr_type == 'max':
self._reducer = fn.max
elif aggr_type == 'mean':
self._reducer = fn.mean
else:
raise KeyError('Aggregator type {} not recognized.'.format(aggr_type))
self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.residual = residual
self.dropout = dropout
in_dim = apply_func.mlp.input_dim
out_dim = apply_func.mlp.output_dim
if in_dim != out_dim:
self.residual = False
# to specify whether eps is trainable or not.
if learn_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([init_eps]))
else:
self.register_buffer('eps', torch.FloatTensor([init_eps]))
self.bn_node_h = nn.BatchNorm1d(out_dim)
def forward(self, g, h, snorm_n):
h_in = h # for residual connection
g = g.local_var()
g.ndata['h'] = h
g.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
h = (1 + self.eps) * h + g.ndata['neigh']
if self.apply_func is not None:
h = self.apply_func(h)
if self.graph_norm:
h = h* snorm_n # normalize activation w.r.t. graph size
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return h
class ApplyNodeFunc(nn.Module):
"""
This class is used in class GINNet
Update the node feature hv with MLP
"""
def __init__(self, mlp):
super().__init__()
self.mlp = mlp
def forward(self, h):
h = self.mlp(h)
return h
class MLP(nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
super().__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
self.input_dim = input_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h) |
lstm-synthetic-wave-anomaly-detect.py | cse-icon-dataAnalytics/lstm-anomaly-detect | 178 | 5631 | <reponame>cse-icon-dataAnalytics/lstm-anomaly-detect
""" Inspired by example from
https://github.com/Vict0rSch/deep_learning/tree/master/keras/recurrent
Uses the TensorFlow backend
The basic idea is to detect anomalies in a time-series.
"""
import matplotlib.pyplot as plt
import numpy as np
import time
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from numpy import arange, sin, pi, random
np.random.seed(1234)
# Global hyper-parameters
sequence_length = 100
random_data_dup = 10 # each sample randomly duplicated between 0 and 9 times, see dropin function
epochs = 1
batch_size = 50
def dropin(X, y):
""" The name suggests the inverse of dropout, i.e. adding more samples. See Data Augmentation section at
http://simaaron.github.io/Estimating-rainfall-from-weather-radar-readings-using-recurrent-neural-networks/
:param X: Each row is a training sequence
:param y: Tne target we train and will later predict
:return: new augmented X, y
"""
print("X shape:", X.shape)
print("y shape:", y.shape)
X_hat = []
y_hat = []
for i in range(0, len(X)):
for j in range(0, np.random.random_integers(0, random_data_dup)):
X_hat.append(X[i, :])
y_hat.append(y[i])
return np.asarray(X_hat), np.asarray(y_hat)
def gen_wave():
""" Generate a synthetic wave by adding up a few sine waves and some noise
:return: the final wave
"""
t = np.arange(0.0, 10.0, 0.01)
wave1 = sin(2 * 2 * pi * t)
noise = random.normal(0, 0.1, len(t))
wave1 = wave1 + noise
print("wave1", len(wave1))
wave2 = sin(2 * pi * t)
print("wave2", len(wave2))
t_rider = arange(0.0, 0.5, 0.01)
wave3 = sin(10 * pi * t_rider)
print("wave3", len(wave3))
insert = round(0.8 * len(t))
wave1[insert:insert + 50] = wave1[insert:insert + 50] + wave3
return wave1 + wave2
def z_norm(result):
result_mean = result.mean()
result_std = result.std()
result -= result_mean
result /= result_std
return result, result_mean
def get_split_prep_data(train_start, train_end,
test_start, test_end):
data = gen_wave()
print("Length of Data", len(data))
# train data
print "Creating train data..."
result = []
for index in range(train_start, train_end - sequence_length):
result.append(data[index: index + sequence_length])
result = np.array(result) # shape (samples, sequence_length)
result, result_mean = z_norm(result)
print "Mean of train data : ", result_mean
print "Train data shape : ", result.shape
train = result[train_start:train_end, :]
np.random.shuffle(train) # shuffles in-place
X_train = train[:, :-1]
y_train = train[:, -1]
X_train, y_train = dropin(X_train, y_train)
# test data
print "Creating test data..."
result = []
for index in range(test_start, test_end - sequence_length):
result.append(data[index: index + sequence_length])
result = np.array(result) # shape (samples, sequence_length)
result, result_mean = z_norm(result)
print "Mean of test data : ", result_mean
print "Test data shape : ", result.shape
X_test = result[:, :-1]
y_test = result[:, -1]
print("Shape X_train", np.shape(X_train))
print("Shape X_test", np.shape(X_test))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
return X_train, y_train, X_test, y_test
def build_model():
model = Sequential()
layers = {'input': 1, 'hidden1': 64, 'hidden2': 256, 'hidden3': 100, 'output': 1}
model.add(LSTM(
input_length=sequence_length - 1,
input_dim=layers['input'],
output_dim=layers['hidden1'],
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
layers['hidden2'],
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
layers['hidden3'],
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(
output_dim=layers['output']))
model.add(Activation("linear"))
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print "Compilation Time : ", time.time() - start
return model
def run_network(model=None, data=None):
global_start_time = time.time()
if data is None:
print 'Loading data... '
# train on first 700 samples and test on next 300 samples (has anomaly)
X_train, y_train, X_test, y_test = get_split_prep_data(0, 700, 500, 1000)
else:
X_train, y_train, X_test, y_test = data
print '\nData Loaded. Compiling...\n'
if model is None:
model = build_model()
try:
print("Training...")
model.fit(
X_train, y_train,
batch_size=batch_size, nb_epoch=epochs, validation_split=0.05)
print("Predicting...")
predicted = model.predict(X_test)
print("Reshaping predicted")
predicted = np.reshape(predicted, (predicted.size,))
except KeyboardInterrupt:
print("prediction exception")
print 'Training duration (s) : ', time.time() - global_start_time
return model, y_test, 0
try:
plt.figure(1)
plt.subplot(311)
plt.title("Actual Test Signal w/Anomalies")
plt.plot(y_test[:len(y_test)], 'b')
plt.subplot(312)
plt.title("Predicted Signal")
plt.plot(predicted[:len(y_test)], 'g')
plt.subplot(313)
plt.title("Squared Error")
mse = ((y_test - predicted) ** 2)
plt.plot(mse, 'r')
plt.show()
except Exception as e:
print("plotting exception")
print str(e)
print 'Training duration (s) : ', time.time() - global_start_time
return model, y_test, predicted
run_network()
|
wouso/core/security/admin.py | AlexandruGhergut/wouso | 117 | 5671 | from django.contrib import admin
from wouso.core.security.models import Report
admin.site.register(Report)
|
latest/probe.py | Soldie/Nscan-scanner-ip | 574 | 5676 | import time
import Queue
import random
import socket
import struct
import logging
import threading
from convert import *
from protocol import ethernet, ip, tcp, udp
ETH_P_IP = 0x0800 # IP protocol
ETH_P_ALL = 0x0003 # Every packet
NSCRIPT_PATH = 'nscript' # NSCRIPT PATH
PAYLOAD = {
53:('\x5d\x0d\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x06'
'google\x03com\x00\x00\x01\x00\x01'), # 'google.com' DNS Lookup
161:('\x30\x26\x02\x01\x01\x04\x06public\xa1\x19\x02'
'\x04\x56\x9f\x5a\xdd\x02\x01\x00\x02\x01\x00\x30\x0b\x30\x09\x06'
'\x05\x2b\x06\x01\x02\x01\x05\x00'), # SNMP GetNextRequest|public|2c version|1.3.6.1.2.1
123:('\x17\x00\x02\x05'), # NTP systats commands lacks 38 null bytes (just to save bandwidth)
1900:('M-SEARCH * HTTP/1.1\r\nHOST: 192.168.127.12:1900\r\n'
'MAN: "ssdp:discover"\r\nMX: 2\r\nST: ssdp:all\r\n\r\n')
}
class Generator(object):
def __init__(self, size):
self.size = size
self.inc = size/4
if self.inc<1:
self.inc = 1
self.base = -self.inc
self.num = self.base
self.index = 0
def __iter__(self):
return self
def next(self):
if (self.num+self.inc)>=self.size:
self.next_index()
self.next_base()
self.num = self.num + self.inc
return self.num
def next_base(self):
self.base = 0
self.base-= self.index
self.num = self.base
def next_index(self):
self.index+=1
if self.index>=self.inc:
raise StopIteration
def suspend(self):
return self.size, self.inc, self.base, self.num, self.index
def resume(self, size, inc, base, num, index):
self.size = size
self.inc = inc
self.base = base
self.num = num
self.index = index
class ScriptEngine(object):
def __init__(self, imports):
self.imports = imports
self.event = threading.Event()
self.queues = {}
self.thread = []
def Load(self):
for script in self.imports:
q = Queue.Queue()
s = __import__('{}.{}'.format(NSCRIPT_PATH, script),
fromlist=[NSCRIPT_PATH])
t = threading.Thread(target=s.run,
args=(q, self.event))
self.thread.append(t)
t.setDaemon(True)
t.start()
self.queues[script] = q
def Feed(self, host, port):
for scr in self.imports:
for r in self.imports[scr]:
if port in xrange(r[0], r[1]):
self.queues[scr].put((host, port))
break
def Cleanup(self):
while Alive(self.thread):
time.sleep(10)
class nscan(object):
def __init__(self, options):
self.options = options
self.hosts = self.split(options.hosts, options.threads)
self.ports = options.ports
self.srcp = random.randint(1, 65535)#self.PickPort() # source port
self.smac = options.smac
self.dmac = options.dmac
self.ifname = options.ifname
self.siface = options.siface
self.diface = options.diface
self.banner = options.banner
self.count = options.count
self.cooldown = options.cooldown
self.queue = Queue.Queue()
if options.stype.upper()=='U':
self.stype = socket.IPPROTO_UDP
else:
self.stype = socket.IPPROTO_TCP
self.events = {
'send': threading.Event(),
'recv': threading.Event()}
self.threads = {
'send': [],
'recv': None}
def __Transport(self, src, dst=0):
if self.stype==socket.IPPROTO_TCP:
transport = tcp.TCP(src, dst)
transport.seqn = 0xDEADC0DE
else:
transport = udp.UDP(src, dst)
return transport
def __Pack(self, transport, src, dst):
if self.stype==socket.IPPROTO_TCP:
transport.payload = ''
else:
transport.payload = PAYLOAD.get(transport.dstp, '\x00\r\n\r\n')
packed = transport.pack(src, dst)
return packed + transport.payload
def __CookieCheck(self, data):
check = False
dstp = struct.unpack('!H', data[22:24])[0]
if self.stype==socket.IPPROTO_UDP:
if dstp==self.srcp:
check = True
else:
ackn = struct.unpack('!L', data[28:32])[0]
flags = struct.unpack('B', data[33])[0] & 0b010010 # SYN-ACK
if dstp==self.srcp and ackn==0xDEADC0DF and flags==18:
check = True
return check
def init(self):
generators = []
for h in self.hosts:
g = Generator(h[1]-h[0])
generators.append(g)
t = threading.Thread(target=self.send, args=(h, self.srcp, g))
t.setDaemon(True)
self.threads['send'].append(t)
t = threading.Thread(target=self.recv)
t.setDaemon(True)
self.threads['recv'] = t
if 'resume' in dir(self.options):
i = 0
for g in generators:
g.resume(*self.options.indexes[i])
i+=1
return self.threads, self.events, self.queue, generators
def run(self):
self.events['send'].set()
self.events['recv'].set()
for t in self.threads['send']:
t.start()
self.threads['recv'].start()
def send(self, hosts, srcp, gen):
if 'ppp' in self.ifname:
family = socket.AF_INET
proto = socket.IPPROTO_RAW
eth = ''
else:
family = socket.AF_PACKET
proto = ETH_P_IP
eth = ethernet.ETHER(mac2byte(self.smac), mac2byte(self.dmac), ETH_P_IP).pack()
sock = socket.socket(family, socket.SOCK_RAW, proto)
transport = self.__Transport(srcp, 0)
npacket = 0
self.events['send'].wait()
target = hosts[0]
while self.events['send'].isSet():
try:
target = hosts[0] + gen.next()
iph = ip.IP(self.diface, dec2dot(target), self.stype)
except StopIteration:
break
for port_list in self.ports:
for port in range(port_list[0], port_list[1]):
if self.events['send'].isSet():
transport.dstp = port
packet = eth + iph.pack() + self.__Pack(transport, iph.src, iph.dst) #tcph.pack(iph.src, iph.dst)
sock.sendto(packet, (dec2dot(target), 0)) # self.ifname
npacket+=1
if not npacket%self.cooldown[0]:
time.sleep(self.cooldown[1])
else:
break
logging.info('[SEND] Sent: {} packets'.format(npacket))
sock.close()
def recv(self):
sock = socket.socket(socket.AF_INET,
socket.SOCK_RAW,
self.stype)
sock.bind(('', self.srcp))
sock.settimeout(5)
self.events['recv'].wait()
counter = 0
while self.events['recv'].isSet():
try:
data, sa_ll = sock.recvfrom(65535)
if self.__CookieCheck(data):
self.queue.put(Extract(data))
counter += 1
if counter==self.count:
self.events['send'].clear()
break
except socket.timeout:
continue
sock.close()
logging.info('[RECV] Received: {} packets'.format(counter))
def split(self, hosts, n):
'''
Split host range into n parts (multithreaded)
'''
nhosts = hosts[1] - hosts[0] # number of hosts
nparts = nhosts/n + 1
host_parts = []
start = hosts[0]
while True:
if len(host_parts)<n-1:
end = start + nparts
host_parts.append((start, end))
start = end
else:
host_parts.append((start, hosts[1]))
break
return host_parts
def PickPort(self):
while True:
srcp = random.randrange(10000, 65535)
if srcp not in self.sport:
self.sport.append(srcp)
break
return srcp
def Extract(packet):
src = socket.inet_ntoa(packet[12:16])
srcp = struct.unpack('!H', packet[20:22])[0]
return src, srcp
def Alive(thread_list):
''' check if thread is alive '''
alive = False
for t in thread_list:
if t.isAlive():
alive = True
break
return alive
|
codalab/lib/path_util.py | kl-chou/codalab-worksheets | 236 | 5682 | <filename>codalab/lib/path_util.py
"""
path_util contains helpers for working with local filesystem paths.
There are a few classes of methods provided here:
Functions to normalize paths and check that they are in normal form:
normalize, check_isvalid, check_isdir, check_isfile, path_is_url
Functions to list directories and to deal with subpaths of paths:
safe_join, get_relative_path, ls, recursive_ls
Functions to read files to compute hashes, write results to stdout, etc:
getmtime, get_size, hash_directory, hash_file_contents
Functions that modify that filesystem in controlled ways:
copy, make_directory, set_write_permissions, rename, remove
"""
import errno
import hashlib
import itertools
import os
import shutil
import subprocess
import sys
from typing import Optional
from codalab.common import precondition, UsageError, parse_linked_bundle_url
from codalab.lib import file_util
from codalab.worker.file_util import get_path_size
# Block sizes and canonical strings used when hashing files.
BLOCK_SIZE = 0x40000
FILE_PREFIX = 'file'
LINK_PREFIX = 'link'
def path_error(message, path):
"""
Raised when a user-supplied path causes an exception.
"""
return UsageError(message + ': ' + path)
################################################################################
# Functions to normalize paths and check that they are in normal form.
################################################################################
def normalize(path):
"""
Return the absolute path of the location specified by the given path.
This path is returned in a "canonical form", without ~'s, .'s, ..'s.
"""
if path == '-':
return '/dev/stdin'
elif path_is_url(path):
return path
else:
return os.path.abspath(os.path.expanduser(path))
def check_isvalid(path, fn_name):
"""
Raise a PreconditionViolation if the path is not absolute or normalized.
Raise a UsageError if the file at that path does not exist.
"""
precondition(os.path.isabs(path), '%s got relative path: %s' % (fn_name, path))
# Broken symbolic links are valid paths, so we use lexists instead of exists.
if not os.path.lexists(path):
raise path_error('%s got non-existent path:' % (fn_name,), path)
def check_isdir(path, fn_name):
"""
Check that the path is valid, then raise UsageError if the path is a file.
"""
check_isvalid(path, fn_name)
if not os.path.isdir(path):
raise path_error('%s got non-directory:' % (fn_name,), path)
def check_isfile(path, fn_name):
"""
Check that the path is valid, then raise UsageError if the path is a file.
"""
check_isvalid(path, fn_name)
if os.path.isdir(path):
raise path_error('%s got directory:' % (fn_name,), path)
def path_is_url(path):
if isinstance(path, str):
for prefix in ['http', 'https', 'ftp']:
if path.startswith(prefix + '://'):
return True
return False
################################################################################
# Functions to list directories and to deal with subpaths of paths.
################################################################################
def safe_join(*paths):
"""
Join a sequence of paths but filter out any that are empty. Used for targets.
Note that os.path.join has this functionality EXCEPT at the end of the list,
which causes problems when a target subpath is empty.
"""
return os.path.join(*[_f for _f in paths if _f])
def get_relative_path(root, path):
"""
Return the relative path from root to path, which should be nested under root.
"""
precondition(path.startswith(root), '%s is not under %s' % (path, root))
return path[len(root) :]
def ls(path):
"""
Return a (list of directories, list of files) in the given directory.
"""
check_isdir(path, 'ls')
(directories, files) = ([], [])
for file_name in os.listdir(path):
if os.path.isfile(os.path.join(path, file_name)):
files.append(file_name)
else:
directories.append(file_name)
return (directories, files)
def recursive_ls(path):
"""
Return a (list of directories, list of files) in the given directory and
all of its nested subdirectories. All paths returned are absolute.
Symlinks are returned in the list of files, even if they point to directories.
This makes it possible to distinguish between real and symlinked directories
when computing the hash of a directory. This function will NOT descend into
symlinked directories.
"""
check_isdir(path, 'recursive_ls')
(directories, files) = ([], [])
for (root, _, file_names) in os.walk(path):
assert os.path.isabs(root), 'Got relative root in os.walk: %s' % (root,)
directories.append(root)
for file_name in file_names:
files.append(os.path.join(root, file_name))
# os.walk ignores symlinks to directories, but we should count them as files.
# However, we can't used the followlinks parameter, because a) we don't want
# to descend into directories and b) we could end up in an infinite loop if
# we were to pass that flag. Instead, we handle symlinks here:
for subpath in os.listdir(root):
full_subpath = os.path.join(root, subpath)
if os.path.islink(full_subpath) and os.path.isdir(full_subpath):
files.append(full_subpath)
return (directories, files)
################################################################################
# Functions to read files to compute hashes, write results to stdout, etc.
################################################################################
def getmtime(path):
"""
Like os.path.getmtime, but does not follow symlinks.
"""
return os.lstat(path).st_mtime
def get_size(path, dirs_and_files=None):
"""
Get the size (in bytes) of the file or directory at or under the given path.
Does not include symlinked files and directories.
"""
if parse_linked_bundle_url(path).uses_beam:
return get_path_size(path)
if os.path.islink(path) or not os.path.isdir(path):
return os.lstat(path).st_size
dirs_and_files = dirs_and_files or recursive_ls(path)
return sum(os.lstat(path).st_size for path in itertools.chain(*dirs_and_files))
def hash_directory(path, dirs_and_files=None):
"""
Return the hash of the contents of the folder at the given path.
This hash is independent of the path itself - if you were to move the
directory and call get_hash again, you would get the same result.
"""
if parse_linked_bundle_url(path).uses_beam:
# On Azure Blob Storage, we just use the directory size for the hashed contents.
return get_size(path)
(directories, files) = dirs_and_files or recursive_ls(path)
# Sort and then hash all directories and then compute a hash of the hashes.
# This two-level hash is necessary so that the overall hash is unambiguous -
# if we updated directory_hash with the directory names themselves, then
# we'd be hashing the concatenation of these names, which could be generated
# in multiple ways.
directory_hash = hashlib.sha1()
for directory in sorted(directories):
relative_path = get_relative_path(path, directory)
directory_hash.update(hashlib.sha1(relative_path.encode()).hexdigest().encode())
# Use a similar two-level hashing scheme for all files, but incorporate a
# hash of both the file name and contents.
file_hash = hashlib.sha1()
for file_name in sorted(files):
relative_path = get_relative_path(path, file_name)
file_hash.update(hashlib.sha1(relative_path.encode()).hexdigest().encode())
file_hash.update(hash_file_contents(file_name).encode())
# Return a hash of the two hashes.
overall_hash = hashlib.sha1(directory_hash.hexdigest().encode())
overall_hash.update(file_hash.hexdigest().encode())
return overall_hash.hexdigest()
def hash_file_contents(path):
"""
Return the hash of the file's contents, read in blocks of size BLOCK_SIZE.
"""
message = 'hash_file called with relative path: %s' % (path,)
precondition(os.path.isabs(path), message)
if os.path.islink(path):
contents_hash = hashlib.sha1(LINK_PREFIX.encode())
contents_hash.update(os.readlink(path).encode())
else:
contents_hash = hashlib.sha1(FILE_PREFIX.encode())
with open(path, 'rb') as file_handle:
while True:
data = file_handle.read(BLOCK_SIZE)
if not data:
break
contents_hash.update(data)
return contents_hash.hexdigest()
################################################################################
# Functions that modify that filesystem in controlled ways.
################################################################################
def copy(source_path: str, dest_path: str, follow_symlinks: Optional[bool] = False):
"""
Copy |source_path| to |dest_path|.
Assume dest_path doesn't exist.
|follow_symlinks|: whether to follow symlinks
Note: this only works in Linux.
"""
if os.path.exists(dest_path):
raise path_error('already exists', dest_path)
if source_path == '/dev/stdin':
with open(dest_path, 'wb') as dest:
file_util.copy(
sys.stdin,
dest,
autoflush=False,
print_status='Copying %s to %s' % (source_path, dest_path),
)
else:
if not follow_symlinks and os.path.islink(source_path):
raise path_error('not following symlinks', source_path)
if not os.path.exists(source_path):
raise path_error('does not exist', source_path)
command = [
'rsync',
'-pr%s' % ('L' if follow_symlinks else 'l'),
source_path
+ ('/' if not os.path.islink(source_path) and os.path.isdir(source_path) else ''),
dest_path,
]
if subprocess.call(command) != 0:
raise path_error('Unable to copy %s to' % source_path, dest_path)
def make_directory(path):
"""
Create the directory at the given path.
"""
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
check_isdir(path, 'make_directory')
def set_write_permissions(path):
# Recursively give give write permissions to |path|, so that we can operate
# on it.
if not os.path.islink(path): # Don't need write permissions if symlink
subprocess.call(['chmod', '-R', 'u+w', path])
def rename(old_path, new_path):
# Allow write permissions, or else the move will fail.
set_write_permissions(old_path)
subprocess.call(['mv', old_path, new_path])
def remove(path):
"""
Remove the given path, whether it is a directory, file, or link.
"""
if parse_linked_bundle_url(path).uses_beam:
from apache_beam.io.filesystems import FileSystems
if not FileSystems.exists(path):
FileSystems.delete([path])
return
check_isvalid(path, 'remove')
set_write_permissions(path) # Allow permissions
if os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
try:
shutil.rmtree(path)
except shutil.Error:
pass
else:
os.remove(path)
if os.path.exists(path):
print('Failed to remove %s' % path)
def soft_link(source, path):
"""
Create a symbolic link to source at path. This is basically the same as doing "ln -s $source $path"
"""
check_isvalid(source, 'soft_link')
os.symlink(source, path)
|
shared/templates/coreos_kernel_option/template.py | deperrone/content | 1,138 | 5686 | <reponame>deperrone/content<filename>shared/templates/coreos_kernel_option/template.py
from ssg.utils import parse_template_boolean_value
def preprocess(data, lang):
data["arg_negate"] = parse_template_boolean_value(data, parameter="arg_negate", default_value=False)
data["arg_is_regex"] = parse_template_boolean_value(data, parameter="arg_is_regex", default_value=False)
return data
|
capirca/lib/ipset.py | google-admin/capirca | 604 | 5691 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Ipset iptables generator. This is a subclass of Iptables generator.
ipset is a system inside the Linux kernel, which can very efficiently store
and match IPv4 and IPv6 addresses. This can be used to dramatically increase
performace of iptables firewall.
"""
import string
from capirca.lib import iptables
from capirca.lib import nacaddr
class Error(Exception):
"""Base error class."""
class Term(iptables.Term):
"""Single Ipset term representation."""
_PLATFORM = 'ipset'
_SET_MAX_LENGTH = 31
_POSTJUMP_FORMAT = None
_PREJUMP_FORMAT = None
_TERM_FORMAT = None
_COMMENT_FORMAT = string.Template(
'-A $filter -m comment --comment "$comment"')
_FILTER_TOP_FORMAT = string.Template('-A $filter')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This stores tuples of set name and set contents, keyed by direction.
# For example:
# { 'src': ('set_name', [ipaddr object, ipaddr object]),
# 'dst': ('set_name', [ipaddr object, ipaddr object]) }
self.addr_sets = {}
def _CalculateAddresses(self, src_addr_list, src_addr_exclude_list,
dst_addr_list, dst_addr_exclude_list):
"""Calculates source and destination address list for a term.
Since ipset is very efficient at matching large number of
addresses, we never return any exclude addresses. Instead
least positive match is calculated for both source and destination
addresses.
For source and destination address list, three cases are possible.
First case is when there are no addresses. In that case we return
_all_ips.
Second case is when there is strictly one address. In that case,
we optimize by not generating a set, and it's then the only
element of returned set.
Third case is when there are more than one address in a set.
In that case we generate a set and also return _all_ips. Note the
difference to the first case where no set is actually generated.
Args:
src_addr_list: source address list of the term.
src_addr_exclude_list: source address exclude list of the term.
dst_addr_list: destination address list of the term.
dst_addr_exclude_list: destination address exclude list of the term.
Returns:
tuple containing source address list, source address exclude list,
destination address list, destination address exclude list in
that order.
"""
target_af = self.AF_MAP[self.af]
src_addr_list = self._CalculateAddrList(src_addr_list,
src_addr_exclude_list, target_af,
'src')
dst_addr_list = self._CalculateAddrList(dst_addr_list,
dst_addr_exclude_list, target_af,
'dst')
return (src_addr_list, [], dst_addr_list, [])
def _CalculateAddrList(self, addr_list, addr_exclude_list,
target_af, direction):
"""Calculates and stores address list for target AF and direction.
Args:
addr_list: address list.
addr_exclude_list: address exclude list of the term.
target_af: target address family.
direction: direction in which address list will be used.
Returns:
calculated address list.
"""
if not addr_list:
addr_list = [self._all_ips]
addr_list = [addr for addr in addr_list if addr.version == target_af]
if addr_exclude_list:
addr_exclude_list = [addr_exclude for addr_exclude in addr_exclude_list if
addr_exclude.version == target_af]
addr_list = nacaddr.ExcludeAddrs(addr_list, addr_exclude_list)
if len(addr_list) > 1:
set_name = self._GenerateSetName(self.term.name, direction)
self.addr_sets[direction] = (set_name, addr_list)
addr_list = [self._all_ips]
return addr_list
def _GenerateAddressStatement(self, src_addr, dst_addr):
"""Returns the address section of an individual iptables rule.
See _CalculateAddresses documentation. Three cases are possible here,
and they map directly to cases in _CalculateAddresses.
First, there can be no addresses for a direction (value is _all_ips then)
In that case we return empty string.
Second there can be stricly one address. In that case we return single
address match (-s or -d).
Third case, is when the value is _all_ips but also the set for particular
direction is present. That's when we return a set match.
Args:
src_addr: ipaddr address or network object with source
address of the rule.
dst_addr: ipaddr address or network object with destination
address of the rule.
Returns:
tuple containing source and destination address statement, in
that order.
"""
src_addr_stmt = ''
dst_addr_stmt = ''
if src_addr and dst_addr:
if src_addr == self._all_ips:
if 'src' in self.addr_sets:
src_addr_stmt = ('-m set --match-set %s src' %
self.addr_sets['src'][0])
else:
src_addr_stmt = '-s %s/%d' % (src_addr.network_address,
src_addr.prefixlen)
if dst_addr == self._all_ips:
if 'dst' in self.addr_sets:
dst_addr_stmt = ('-m set --match-set %s dst' %
self.addr_sets['dst'][0])
else:
dst_addr_stmt = '-d %s/%d' % (dst_addr.network_address,
dst_addr.prefixlen)
return (src_addr_stmt, dst_addr_stmt)
def _GenerateSetName(self, term_name, suffix):
if self.af == 'inet6':
suffix += '-v6'
if len(term_name) + len(suffix) + 1 > self._SET_MAX_LENGTH:
set_name_max_lenth = self._SET_MAX_LENGTH - len(suffix) - 1
term_name = term_name[:set_name_max_lenth]
return '%s-%s' % (term_name, suffix)
class Ipset(iptables.Iptables):
"""Ipset generator."""
_PLATFORM = 'ipset'
_SET_TYPE = 'hash:net'
SUFFIX = '.ips'
_TERM = Term
_MARKER_BEGIN = '# begin:ipset-rules'
_MARKER_END = '# end:ipset-rules'
_GOOD_OPTIONS = ['nostate', 'abbreviateterms', 'truncateterms', 'noverbose',
'exists']
# TODO(vklimovs): some not trivial processing is happening inside this
# __str__, replace with explicit method
def __str__(self):
# Actual rendering happens in __str__, so it has to be called
# before we do set specific part.
iptables_output = super().__str__()
output = []
output.append(self._MARKER_BEGIN)
for (_, _, _, _, terms) in self.iptables_policies:
for term in terms:
output.extend(self._GenerateSetConfig(term))
output.append(self._MARKER_END)
output.append(iptables_output)
return '\n'.join(output)
def _GenerateSetConfig(self, term):
"""Generates set configuration for supplied term.
Args:
term: input term.
Returns:
string that is configuration of supplied term.
"""
output = []
c_str = 'create'
a_str = 'add'
if 'exists' in self.filter_options:
c_str = c_str + ' -exist'
a_str = a_str + ' -exist'
for direction in sorted(term.addr_sets, reverse=True):
set_name, addr_list = term.addr_sets[direction]
set_hashsize = 1 << len(addr_list).bit_length()
set_maxelem = set_hashsize
output.append('%s %s %s family %s hashsize %i maxelem %i' %
(c_str,
set_name,
self._SET_TYPE,
term.af,
set_hashsize,
set_maxelem))
for address in addr_list:
output.append('%s %s %s' % (a_str, set_name, address))
return output
|
stp_core/common/logging/handlers.py | andkononykhin/plenum | 148 | 5700 | <reponame>andkononykhin/plenum<filename>stp_core/common/logging/handlers.py
import logging
class CallbackHandler(logging.Handler):
def __init__(self, typestr, default_tags, callback, override_tags):
"""
Initialize the handler.
"""
super().__init__()
self.callback = callback
self.tags = default_tags
self.update_tags(override_tags or {})
self.typestr = typestr
def update_tags(self, override_tags):
self.tags.update(override_tags)
def emit(self, record):
"""
Passes the log record back to the CLI for rendering
"""
should_cb = None
attr_val = None
if hasattr(record, self.typestr):
attr_val = getattr(record, self.typestr)
should_cb = bool(attr_val)
if should_cb is None and record.levelno >= logging.INFO:
should_cb = True
if hasattr(record, 'tags'):
for t in record.tags:
if t in self.tags:
if self.tags[t]:
should_cb = True
continue
else:
should_cb = False
break
if should_cb:
self.callback(record, attr_val)
class CliHandler(CallbackHandler):
def __init__(self, callback, override_tags=None):
default_tags = {
"add_replica": True
}
super().__init__(typestr="cli",
default_tags=default_tags,
callback=callback,
override_tags=override_tags)
class DemoHandler(CallbackHandler):
def __init__(self, callback, override_tags=None):
default_tags = {
"add_replica": True
}
super().__init__(typestr="demo",
default_tags=default_tags,
callback=callback,
override_tags=override_tags)
class TestingHandler(logging.Handler):
def __init__(self, tester):
"""
Initialize the handler.
"""
super().__init__()
self.tester = tester
def emit(self, record):
"""
Captures a record.
"""
self.tester(record)
|
test/tst_vlen.py | timgates42/netcdf4-python | 574 | 5712 | import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset
import numpy as np
from numpy.testing import assert_array_equal
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
VL_NAME = 'vlen_type'
VL_BASETYPE = np.int16
DIM1_NAME = 'lon'
DIM2_NAME = 'lat'
nlons = 5; nlats = 5
VAR1_NAME = 'ragged'
VAR2_NAME = 'strings'
VAR3_NAME = 'strings_alt'
VAR4_NAME = 'string_scalar'
VAR5_NAME = 'vlen_scalar'
data = np.empty(nlats*nlons,object)
datas = np.empty(nlats*nlons,object)
nn = 0
for n in range(nlats*nlons):
nn = nn + 1
data[n] = np.arange(nn,dtype=VL_BASETYPE)
datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)])
data = np.reshape(data,(nlats,nlons))
datas = np.reshape(datas,(nlats,nlons))
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
ragged = f.createVariable(VAR1_NAME, vlen_t,\
(DIM2_NAME,DIM1_NAME))
strings = f.createVariable(VAR2_NAME, str,
(DIM2_NAME,DIM1_NAME))
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
string_scalar = f.createVariable(VAR4_NAME,str,())
vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,())
ragged[:] = data
ragged[-1,-1] = data[-1,-1]
strings[:] = datas
strings[-2,-2] = datas[-2,-2]
strings_alt[:] = datas.astype(str)
string_scalar[...] = 'foo' #issue458
vlen_scalar[...] = np.array([1,2,3],np.int16)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
v = f.variables[VAR1_NAME]
vs = f.variables[VAR2_NAME]
vs_alt = f.variables[VAR3_NAME]
assert list(f.vltypes.keys()) == [VL_NAME]
assert f.vltypes[VL_NAME].dtype == VL_BASETYPE
assert f.variables['string_scalar'][...] == 'foo'
assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16))
data2 = v[:]
data2s = vs[:]
for i in range(nlons):
for j in range(nlats):
assert_array_equal(data2[j,i], data[j,i])
assert datas[j,i] == data2s[j,i]
assert_array_equal(datas, vs_alt[:])
f.close()
class TestInvalidDataType(unittest.TestCase):
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC')
f.createDimension('x', 1)
# using assertRaisesRegext as a context manager
# only works with python >= 2.7 (issue #497)
#with self.assertRaisesRegexp(ValueError, 'strings are only supported'):
# f.createVariable('foo', str, ('x',))
try:
f.createVariable('foo', str, ('x',))
except ValueError:
pass
f.close()
os.remove(FILE_NAME)
class TestScalarVlenString(unittest.TestCase):
# issue 333
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF4')
teststring = f.createVariable('teststring', str)
stringout = "yyyymmdd_hhmmss"
teststring[()] = stringout
f.close()
f = Dataset(FILE_NAME)
assert f.variables['teststring'][:] == stringout
f.close()
os.remove(FILE_NAME)
class TestIntegerIndex(unittest.TestCase):
# issue 526
def runTest(self):
strtest = Dataset(FILE_NAME, 'w', format='NETCDF4')
strtest.createDimension('tenstrings', 10)
strtest.createVariable('tenstrings', str, ['tenstrings'])
strtest['tenstrings'][np.int32(5)] = 'asdf'
strtest['tenstrings'][6.0] = 'asdf'
strtest.close()
f = Dataset(FILE_NAME)
assert f.variables['tenstrings'][np.int32(5)] == 'asdf'
assert f.variables['tenstrings'][6.0] == 'asdf'
f.close()
os.remove(FILE_NAME)
class TestObjectArrayIndexing(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
strings_alt[:] = datas.astype(str)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
vs_alt = f.variables[VAR3_NAME]
unicode_strings = vs_alt[:]
fancy_indexed = unicode_strings[0][[1,2,4]]
assert fancy_indexed[0] == 'abc'
assert fancy_indexed[1] == 'abcd'
assert fancy_indexed[2] == 'abcdef'
f.close()
class VlenAppendTestCase(unittest.TestCase):
def setUp(self):
import netCDF4
if netCDF4.__netcdf4libversion__ < "4.4.1":
self.skip = True
try:
self.skipTest("This test requires NetCDF 4.4.1 or later.")
except AttributeError:
# workaround for Python 2.6 (skipTest(reason) is new
# in Python 2.7)
pass
else:
self.skip = False
self.file = FILE_NAME
f = Dataset(self.file, 'w')
vlen_type = f.createVLType(np.float64, 'vltest')
f.createDimension('x', None)
v = f.createVariable('vl', vlen_type, 'x')
w = f.createVariable('vl2', np.float64, 'x')
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing appending to vlen variables (issue #527)."""
# workaround for Python 2.6
if self.skip:
return
f = Dataset(self.file, 'a')
w = f.variables["vl2"]
v = f.variables["vl"]
w[0:3] = np.arange(3, dtype=np.float64)
v[0] # sometimes crashes
v[0].tolist() # sometimes crashes
v[0].size # BOOM!
f.close()
class Vlen_ScaledInts(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
nc = Dataset(self.file, 'w')
vlen_type = nc.createVLType(np.uint8, 'vltest')
nc.createDimension('x', None)
v = nc.createVariable('vl', vlen_type, 'x')
v.scale_factor = 1./254.
v.missing_value=np.array(255,np.uint8)
# random lengths between 1 and 1000
ilen = np.random.randint(1,1000,size=100)
n = 0
for nlen in ilen:
data = np.random.uniform(low=0.0, high=1.0, size=nlen)
v[n] = data
if n==99: self.data = data
n += 1
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing packing float vlens as scaled integers (issue #1003)."""
nc = Dataset(self.file)
data = nc['vl'][-1]
# check max error of compression
err = np.abs(data - self.data)
assert(err.max() < nc['vl'].scale_factor)
# turn off auto-scaling
nc.set_auto_maskandscale(False)
data = nc['vl'][-1]
assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))
nc.close()
if __name__ == '__main__':
unittest.main()
|
sonnet/src/once.py | ScriptBox99/deepmind-sonnet | 10,287 | 5713 | # Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility to run functions and methods once."""
import uuid
from sonnet.src import utils
_ONCE_PROPERTY = "_snt_once"
def _check_no_output(output):
if output is not None:
raise ValueError("@snt.once decorated functions cannot return values")
def once(f):
"""Decorator which ensures a wrapped method is only ever run once.
>>> @snt.once
... def f():
... print('Hello, world!')
>>> f()
Hello, world!
>>> f()
>>> f()
If `f` is a method then it will be evaluated once per instance:
>>> class MyObject:
... @snt.once
... def f(self):
... print('Hello, world!')
>>> o = MyObject()
>>> o.f()
Hello, world!
>>> o.f()
>>> o2 = MyObject()
>>> o2.f()
Hello, world!
>>> o.f()
>>> o2.f()
If an error is raised during execution of `f` it will be raised to the user.
Next time the method is run, it will be treated as not having run before.
Args:
f: A function to wrap which should only be called once.
Returns:
Wrapped version of `f` which will only evaluate `f` the first time it is
called.
"""
# TODO(tomhennigan) Perhaps some more human friendly identifier?
once_id = uuid.uuid4()
@utils.decorator
def wrapper(wrapped, instance, args, kwargs):
"""Decorator which ensures a wrapped method is only ever run once."""
if instance is None:
# NOTE: We can't use the weakset since you can't weakref None.
if not wrapper.seen_none:
_check_no_output(wrapped(*args, **kwargs))
wrapper.seen_none = True
return
# Get or set the `seen` set for this object.
seen = getattr(instance, _ONCE_PROPERTY, None)
if seen is None:
seen = set()
setattr(instance, _ONCE_PROPERTY, seen)
if once_id not in seen:
_check_no_output(wrapped(*args, **kwargs))
seen.add(once_id)
wrapper.seen_none = False
decorated = wrapper(f) # pylint: disable=no-value-for-parameter,assignment-from-none
decorated.__snt_once_wrapped__ = f
return decorated
|
tests/_test_progress_board.py | stjordanis/Hyperactive | 382 | 5716 | <gh_stars>100-1000
import os, glob
import subprocess
from subprocess import DEVNULL, STDOUT
abspath = os.path.abspath(__file__)
dir_ = os.path.dirname(abspath)
files = glob.glob(dir_ + "/_progress_board_tests/_test_progress_board_*.py")
for file_path in files:
file_name = str(file_path.rsplit("/", maxsplit=1)[1])
try:
print("\033[0;33;40m Testing", file_name, end="...\r")
subprocess.check_call(["pytest", file_path], stdout=DEVNULL, stderr=STDOUT)
except subprocess.CalledProcessError:
print("\033[0;31;40m Error in", file_name)
else:
print("\033[0;32;40m", file_name, "is correct")
|
allure/pytest_plugin.py | allure-framework/allure-pytest | 112 | 5726 | import uuid
import pickle
import pytest
import argparse
from collections import namedtuple
from six import text_type
from allure.common import AllureImpl, StepContext
from allure.constants import Status, AttachmentType, Severity, \
FAILED_STATUSES, Label, SKIPPED_STATUSES
from allure.utils import parent_module, parent_down_from_module, labels_of, \
all_of, get_exception_message, now, mangle_testnames
from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel
def pytest_addoption(parser):
parser.getgroup("reporting").addoption('--alluredir',
action="store",
dest="allurereportdir",
metavar="DIR",
default=None,
help="Generate Allure report in the specified directory (may not exist)")
severities = [v for (_, v) in all_of(Severity)]
def label_type(name, legal_values=set()):
"""
argparse-type factory for labelish things.
processed value is set of tuples (name, value).
:param name: of label type (for future TestLabel things)
:param legal_values: a `set` of values that are legal for this label, if any limit whatsoever
:raises ArgumentTypeError: if `legal_values` are given and there are values that fall out of that
"""
def a_label_type(string):
atoms = set(string.split(','))
if legal_values and not atoms < legal_values:
raise argparse.ArgumentTypeError('Illegal {} values: {}, only [{}] are allowed'.format(name, ', '.join(atoms - legal_values), ', '.join(legal_values)))
return set((name, v) for v in atoms)
return a_label_type
parser.getgroup("general").addoption('--allure_severities',
action="store",
dest="allureseverities",
metavar="SEVERITIES_SET",
default={},
type=label_type(name=Label.SEVERITY, legal_values=set(severities)),
help="""Comma-separated list of severity names.
Tests only with these severities will be run.
Possible values are:%s.""" % ', '.join(severities))
parser.getgroup("general").addoption('--allure_features',
action="store",
dest="allurefeatures",
metavar="FEATURES_SET",
default={},
type=label_type(name=Label.FEATURE),
help="""Comma-separated list of feature names.
Run tests that have at least one of the specified feature labels.""")
parser.getgroup("general").addoption('--allure_stories',
action="store",
dest="allurestories",
metavar="STORIES_SET",
default={},
type=label_type(name=Label.STORY),
help="""Comma-separated list of story names.
Run tests that have at least one of the specified story labels.""")
def pytest_configure(config):
reportdir = config.option.allurereportdir
if reportdir: # we actually record something
allure_impl = AllureImpl(reportdir)
testlistener = AllureTestListener(config)
pytest.allure._allurelistener = testlistener
config.pluginmanager.register(testlistener)
if not hasattr(config, 'slaveinput'):
# on xdist-master node do all the important stuff
config.pluginmanager.register(AllureAgregatingListener(allure_impl, config))
config.pluginmanager.register(AllureCollectionListener(allure_impl))
class AllureTestListener(object):
"""
Per-test listener.
Is responsible for recording in-test data and for attaching it to the test report thing.
The per-test reports are handled by `AllureAgregatingListener` at the `pytest_runtest_logreport` hook.
"""
def __init__(self, config):
self.config = config
self.environment = {}
self.test = None
# FIXME: that flag makes us pre-report failures in the makereport hook.
# it is here to cope with xdist's begavior regarding -x.
# see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish
self._magicaldoublereport = hasattr(self.config, 'slaveinput') and self.config.getvalue("maxfail")
@pytest.mark.hookwrapper
def pytest_runtest_protocol(self, item, nextitem):
try:
# for common items
description = item.function.__doc__
except AttributeError:
# for doctests that has no `function` attribute
description = item.reportinfo()[2]
self.test = TestCase(name='.'.join(mangle_testnames([x.name for x in parent_down_from_module(item)])),
description=description,
start=now(),
attachments=[],
labels=labels_of(item),
status=None,
steps=[],
id=str(uuid.uuid4())) # for later resolution in AllureAgregatingListener.pytest_sessionfinish
self.stack = [self.test]
yield
self.test = None
self.stack = []
def attach(self, title, contents, attach_type):
"""
Store attachment object in current state for later actual write in the `AllureAgregatingListener.write_attach`
"""
attach = Attach(source=contents, # we later re-save those, oh my...
title=title,
type=attach_type)
self.stack[-1].attachments.append(attach)
def dynamic_issue(self, *issues):
"""
Attaches ``issues`` to the current active case
"""
if self.test:
self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue in issues])
def description(self, description):
"""
Sets description for the test
"""
if self.test:
self.test.description = description
def start_step(self, name):
"""
Starts an new :py:class:`allure.structure.TestStep` with given ``name``,
pushes it to the ``self.stack`` and returns the step.
"""
step = TestStep(name=name,
title=name,
start=now(),
attachments=[],
steps=[])
self.stack[-1].steps.append(step)
self.stack.append(step)
return step
def stop_step(self):
"""
Stops the step at the top of ``self.stack``
"""
step = self.stack.pop()
step.stop = now()
def _fill_case(self, report, call, pyteststatus, status):
"""
Finalizes with important data
:param report: py.test's `TestReport`
:param call: py.test's `CallInfo`
:param pyteststatus: the failed/xfailed/xpassed thing
:param status: a :py:class:`allure.constants.Status` entry
"""
[self.attach(name, contents, AttachmentType.TEXT) for (name, contents) in dict(report.sections).items()]
self.test.stop = now()
self.test.status = status
if status in FAILED_STATUSES:
self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus, report),
trace=report.longrepr or hasattr(report, 'wasxfail') and report.wasxfail)
elif status in SKIPPED_STATUSES:
skip_message = type(report.longrepr) == tuple and report.longrepr[2] or report.wasxfail
trim_msg_len = 89
short_message = skip_message.split('\n')[0][:trim_msg_len]
# FIXME: see pytest.runner.pytest_runtest_makereport
self.test.failure = Failure(message=(short_message + '...' * (len(skip_message) > trim_msg_len)),
trace=status == Status.PENDING and report.longrepr or short_message != skip_message and skip_message or '')
def report_case(self, item, report):
"""
Adds `self.test` to the `report` in a `AllureAggegatingListener`-understood way
"""
parent = parent_module(item)
# we attach a four-tuple: (test module ID, test module name, test module doc, environment, TestCase)
report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid,
parent.module.__name__,
parent.module.__doc__ or '',
self.environment,
self.test)))
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
"""
Decides when to actually report things.
pytest runs this (naturally) three times -- with report.when being:
setup <--- fixtures are to be initialized in this one
call <--- when this finishes the main code has finished
teardown <--- tears down fixtures (that still possess important info)
`setup` and `teardown` are always called, but `call` is called only if `setup` passes.
See :py:func:`_pytest.runner.runtestprotocol` for proofs / ideas.
The "other side" (AllureAggregatingListener) expects us to send EXACTLY ONE test report (it wont break, but it will duplicate cases in the report -- which is bad.
So we work hard to decide exact moment when we call `_stop_case` to do that. This method may benefit from FSM (we keep track of what has already happened via self.test.status)
Expected behavior is:
FAILED when call fails and others OK
BROKEN when either setup OR teardown are broken (and call may be anything)
PENDING if skipped and xfailed
SKIPPED if skipped and not xfailed
"""
report = (yield).get_result()
status = self.config.hook.pytest_report_teststatus(report=report)
status = status and status[0]
if report.when == 'call':
if report.passed:
self._fill_case(report, call, status, Status.PASSED)
elif report.failed:
self._fill_case(report, call, status, Status.FAILED)
# FIXME: this is here only to work around xdist's stupid -x thing when in exits BEFORE THE TEARDOWN test log. Meh, i should file an issue to xdist
if self._magicaldoublereport:
# to minimize ze impact
self.report_case(item, report)
elif report.skipped:
if hasattr(report, 'wasxfail'):
self._fill_case(report, call, status, Status.PENDING)
else:
self._fill_case(report, call, status, Status.CANCELED)
elif report.when == 'setup': # setup / teardown
if report.failed:
self._fill_case(report, call, status, Status.BROKEN)
elif report.skipped:
if hasattr(report, 'wasxfail'):
self._fill_case(report, call, status, Status.PENDING)
else:
self._fill_case(report, call, status, Status.CANCELED)
elif report.when == 'teardown':
# as teardown is always called for testitem -- report our status here
if not report.passed:
if self.test.status not in FAILED_STATUSES:
# if test was OK but failed at teardown => broken
self._fill_case(report, call, status, Status.BROKEN)
else:
# mark it broken so, well, someone has idea of teardown failure
# still, that's no big deal -- test has already failed
# TODO: think about that once again
self.test.status = Status.BROKEN
# if a test isn't marked as "unreported" or it has failed, add it to the report.
if not item.get_marker("unreported") or self.test.status in FAILED_STATUSES:
self.report_case(item, report)
def pytest_runtest_setup(item):
item_labels = set((l.name, l.value) for l in labels_of(item)) # see label_type
arg_labels = set().union(item.config.option.allurefeatures,
item.config.option.allurestories,
item.config.option.allureseverities)
if arg_labels and not item_labels & arg_labels:
pytest.skip('Not suitable with selected labels: %s.' % ', '.join(text_type(l) for l in sorted(arg_labels)))
class LazyInitStepContext(StepContext):
"""
This is a step context used for decorated steps.
It provides a possibility to create step decorators, being initiated before pytest_configure, when no AllureListener initiated yet.
"""
def __init__(self, allure_helper, title):
self.allure_helper = allure_helper
self.title = title
self.step = None
@property
def allure(self):
listener = self.allure_helper.get_listener()
# if listener has `stack` we are inside a test
# record steps only when that
# FIXME: this breaks encapsulation a lot
if hasattr(listener, 'stack'):
return listener
class AllureHelper(object):
"""
This object holds various utility methods used from ``pytest.allure`` namespace, like ``pytest.allure.attach``
"""
def __init__(self):
self._allurelistener = None # FIXME: this gets injected elsewhere, like in the pytest_configure
def get_listener(self):
return self._allurelistener
def attach(self, name, contents, type=AttachmentType.TEXT): # @ReservedAssignment
"""
Attaches ``contents`` to a current context with given ``name`` and ``type``.
"""
if self._allurelistener:
self._allurelistener.attach(name, contents, type)
def label(self, name, *value):
"""
A decorator factory that returns ``pytest.mark`` for a given label.
"""
allure_label = getattr(pytest.mark, '%s.%s' % (Label.DEFAULT, name))
return allure_label(*value)
def severity(self, severity):
"""
A decorator factory that returns ``pytest.mark`` for a given allure ``level``.
"""
return self.label(Label.SEVERITY, severity)
def feature(self, *features):
"""
A decorator factory that returns ``pytest.mark`` for a given features.
"""
return self.label(Label.FEATURE, *features)
def story(self, *stories):
"""
A decorator factory that returns ``pytest.mark`` for a given stories.
"""
return self.label(Label.STORY, *stories)
def issue(self, *issues):
"""
A decorator factory that returns ``pytest.mark`` for a given issues.
"""
return self.label(Label.ISSUE, *issues)
def dynamic_issue(self, *issues):
"""
Mark test ``issues`` from inside.
"""
if self._allurelistener:
self._allurelistener.dynamic_issue(*issues)
def description(self, description):
"""
Sets description for the test
"""
if self._allurelistener:
self._allurelistener.description(description)
def testcase(self, *testcases):
"""
A decorator factory that returns ``pytest.mark`` for a given testcases.
"""
return self.label(Label.TESTCASE, *testcases)
def step(self, title):
"""
A contextmanager/decorator for steps.
TODO: when moving to python 3, rework this with ``contextlib.ContextDecorator``.
Usage examples::
import pytest
def test_foo():
with pytest.allure.step('mystep'):
assert False
@pytest.allure.step('make test data')
def make_test_data_bar():
raise ValueError('No data today')
def test_bar():
assert make_test_data_bar()
@pytest.allure.step
def make_test_data_baz():
raise ValueError('No data today')
def test_baz():
assert make_test_data_baz()
@pytest.fixture()
@pytest.allure.step('test fixture')
def steppy_fixture():
return 1
def test_baz(steppy_fixture):
assert steppy_fixture
"""
if callable(title):
return LazyInitStepContext(self, title.__name__)(title)
else:
return LazyInitStepContext(self, title)
def single_step(self, text):
"""
Writes single line to report.
"""
if self._allurelistener:
with self.step(text):
pass
def environment(self, **env_dict):
if self._allurelistener:
self._allurelistener.environment.update(env_dict)
@property
def attach_type(self):
return AttachmentType
@property
def severity_level(self):
return Severity
def __getattr__(self, attr):
"""
Provides fancy shortcuts for severity::
# these are the same
pytest.allure.CRITICAL
pytest.allure.severity(pytest.allure.severity_level.CRITICAL)
"""
if attr in dir(Severity) and not attr.startswith('_'):
return self.severity(getattr(Severity, attr))
else:
raise AttributeError
MASTER_HELPER = AllureHelper()
def pytest_namespace():
return {'allure': MASTER_HELPER}
class AllureAgregatingListener(object):
"""
Listens to pytest hooks to generate reports for common tests.
"""
def __init__(self, impl, config):
self.impl = impl
# module's nodeid => TestSuite object
self.suites = {}
def pytest_sessionfinish(self):
"""
We are done and have all the results in `self.suites`
Lets write em down.
But first we kinda-unify the test cases.
We expect cases to come from AllureTestListener -- and the have ._id field to manifest their identity.
Of all the test cases in suite.testcases we leave LAST with the same ID -- becase logreport can be sent MORE THAN ONE TIME
(namely, if the test fails and then gets broken -- to cope with the xdist's -x behavior we have to have tests even at CALL failures)
TODO: do it in a better, more efficient way
"""
for s in self.suites.values():
if s.tests: # nobody likes empty suites
s.stop = max(case.stop for case in s.tests)
known_ids = set()
refined_tests = []
for t in s.tests[::-1]:
if t.id not in known_ids:
known_ids.add(t.id)
refined_tests.append(t)
s.tests = refined_tests[::-1]
with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as f:
self.impl._write_xml(f, s)
self.impl.store_environment()
def write_attach(self, attachment):
"""
Writes attachment object from the `AllureTestListener` to the FS, fixing it fields
:param attachment: a :py:class:`allure.structure.Attach` object
"""
# OMG, that is bad
attachment.source = self.impl._save_attach(attachment.source, attachment.type)
attachment.type = attachment.type.mime_type
def pytest_runtest_logreport(self, report):
if hasattr(report, '_allure_result'):
module_id, module_name, module_doc, environment, testcase = pickle.loads(report._allure_result)
report._allure_result = None # so actual pickled data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98
self.impl.environment.update(environment)
for a in testcase.iter_attachments():
self.write_attach(a)
self.suites.setdefault(module_id, TestSuite(name=module_name,
description=module_doc,
tests=[],
labels=[],
start=testcase.start, # first case starts the suite!
stop=None)).tests.append(testcase)
CollectFail = namedtuple('CollectFail', 'name status message trace')
class AllureCollectionListener(object):
"""
Listens to pytest collection-related hooks
to generate reports for modules that failed to collect.
"""
def __init__(self, impl):
self.impl = impl
self.fails = []
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
status = Status.BROKEN
else:
status = Status.CANCELED
self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split("::"))[-1],
status=status,
message=get_exception_message(None, None, report),
trace=report.longrepr))
def pytest_sessionfinish(self):
"""
Creates a testsuite with collection failures if there were any.
"""
if self.fails:
self.impl.start_suite(name='test_collection_phase',
title='Collection phase',
description='This is the tests collection phase. Failures are modules that failed to collect.')
for fail in self.fails:
self.impl.start_case(name=fail.name.split(".")[-1])
self.impl.stop_case(status=fail.status, message=fail.message, trace=fail.trace)
self.impl.stop_suite()
|
talleres_inov_docente/figures/plot_helpers.py | jfcaballero/Tutorial-sobre-scikit-learn-abreviado | 576 | 5734 | from matplotlib.colors import ListedColormap
cm3 = ListedColormap(['#0000aa', '#ff2020', '#50ff50'])
cm2 = ListedColormap(['#0000aa', '#ff2020'])
|
sympy/tensor/tests/test_functions.py | iamabhishek0/sympy | 8,323 | 5743 | <reponame>iamabhishek0/sympy<filename>sympy/tensor/tests/test_functions.py
from sympy.tensor.functions import TensorProduct
from sympy import MatrixSymbol, Matrix, Array
from sympy.abc import x, y, z
from sympy.abc import i, j, k, l
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
C = MatrixSymbol("C", 3, 3)
def test_TensorProduct_construction():
assert TensorProduct(3, 4) == 12
assert isinstance(TensorProduct(A, A), TensorProduct)
expr = TensorProduct(TensorProduct(x, y), z)
assert expr == x*y*z
expr = TensorProduct(TensorProduct(A, B), C)
assert expr == TensorProduct(A, B, C)
expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]])
assert expr == Array([
[
[[0, -1], [1, 0]],
[[0, 0], [0, 0]]
],
[
[[0, 0], [0, 0]],
[[0, -1], [1, 0]]
]
])
def test_TensorProduct_shape():
expr = TensorProduct(3, 4, evaluate=False)
assert expr.shape == ()
assert expr.rank() == 0
expr = TensorProduct([1, 2], [x, y], evaluate=False)
assert expr.shape == (2, 2)
assert expr.rank() == 2
expr = TensorProduct(expr, expr, evaluate=False)
assert expr.shape == (2, 2, 2, 2)
assert expr.rank() == 4
expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]], evaluate=False)
assert expr.shape == (2, 2, 2, 2)
assert expr.rank() == 4
def test_TensorProduct_getitem():
expr = TensorProduct(A, B)
assert expr[i, j, k, l] == A[i, j]*B[k, l]
|
mturk/comparison_among_different_models/sample_from_models_for_comparison.py | qiaone/GIF | 322 | 5754 | import sys
sys.path.append('../../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generate_gif import generate_from_flame_sequence
from my_utils.generic_utils import save_set_of_images
from my_utils import compute_fid
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
root_out_dir = f'{cnst.output_root}sample/'
num_smpl_to_eval_on = 1000
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'],
flame_param['tex'], flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 64
flame_decoder = overlay_visualizer.deca.flame.eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx']
ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running'])
generator_1 = generator_1.eval()
# images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
flame_mesh_imgs = None
mdl_id = 'mdl2_'
if settings_for_runs[run_idx]['name'] == 'full_model':
mdl_id = 'mdl1_'
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
flm_batch = position_to_given_location(flame_decoder, flm_batch)
batch_size_true = flm_batch.shape[0]
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
# import ipdb; ipdb.set_trace()
images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy()
save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}',
images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison',
settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}',
images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)
# save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1) / 2).cpu().numpy()) |
dymos/utils/test/test_hermite.py | kaushikponnapalli/dymos | 104 | 5761 | import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from dymos.utils.hermite import hermite_matrices
class TestHermiteMatrices(unittest.TestCase):
def test_quadratic(self):
# Interpolate with values and rates provided at [-1, 1] in tau space
tau_given = [-1.0, 1.0]
tau_eval = np.linspace(-1, 1, 100)
# In time space use the boundaries [-2, 2]
dt_dtau = 4.0 / 2.0
# Provide values for y = t**2 and its time-derivative
y_given = [4.0, 4.0]
ydot_given = [-4.0, 4.0]
# Get the hermite matrices.
Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)
# Interpolate y and ydot at tau_eval points in tau space.
y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)
ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)
# Compute our function as a point of comparison.
y_computed = (tau_eval * dt_dtau)**2
ydot_computed = 2.0 * (tau_eval * dt_dtau)
# Check results
assert_almost_equal(y_i, y_computed)
assert_almost_equal(ydot_i, ydot_computed)
def test_cubic(self):
# Interpolate with values and rates provided at [-1, 1] in tau space
tau_given = [-1.0, 0.0, 1.0]
tau_eval = np.linspace(-1, 1, 101)
# In time space use the boundaries [-2, 2]
dt_dtau = 4.0 / 2.0
# Provide values for y = t**2 and its time-derivative
y_given = [-8.0, 0.0, 8.0]
ydot_given = [12.0, 0.0, 12.0]
# Get the hermite matrices.
Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)
# Interpolate y and ydot at tau_eval points in tau space.
y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)
ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)
# Compute our function as a point of comparison.
y_computed = (tau_eval * dt_dtau)**3
ydot_computed = 3.0 * (tau_eval * dt_dtau)**2
# Check results
assert_almost_equal(y_i, y_computed)
assert_almost_equal(ydot_i, ydot_computed)
if __name__ == '__main__': # pragma: no cover
unittest.main()
|
observations/r/zea_mays.py | hajime9652/observations | 199 | 5780 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def zea_mays(path):
"""Darwin's Heights of Cross- and Self-fertilized Zea May Pairs
Darwin (1876) studied the growth of pairs of zea may (aka corn)
seedlings, one produced by cross-fertilization and the other produced by
self-fertilization, but otherwise grown under identical conditions. His
goal was to demonstrate the greater vigour of the cross-fertilized
plants. The data recorded are the final height (inches, to the nearest
1/8th) of the plants in each pair.
In the *Design of Experiments*, Fisher (1935) used these data to
illustrate a paired t-test (well, a one-sample test on the mean
difference, `cross - self`). Later in the book (section 21), he used
this data to illustrate an early example of a non-parametric permutation
test, treating each paired difference as having (randomly) either a
positive or negative sign.
A data frame with 15 observations on the following 4 variables.
`pair`
pair number, a numeric vector
`pot`
pot, a factor with levels `1` `2` `3` `4`
`cross`
height of cross fertilized plant, a numeric vector
`self`
height of self fertilized plant, a numeric vector
`diff`
`cross - self` for each pair
<NAME>. (1876). *The Effect of Cross- and Self-fertilization in the
Vegetable Kingdom*, 2nd Ed. London: <NAME>.
<NAME>. and <NAME>. (1985) *Data: a collection of problems from
many fields for the student and research worker*. New York: Springer.
Data retrieved from: `https://www.stat.cmu.edu/StatDat/`
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `zea_mays.csv`.
Returns:
Tuple of np.ndarray `x_train` with 15 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'zea_mays.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/HistData/ZeaMays.csv'
maybe_download_and_extract(path, url,
save_file_name='zea_mays.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
recipes/libstudxml/all/conanfile.py | rockandsalt/conan-center-index | 562 | 5787 | from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools
from conans.errors import ConanInvalidConfiguration
import os
import shutil
required_conan_version = ">=1.33.0"
class LibStudXmlConan(ConanFile):
name = "libstudxml"
description = "A streaming XML pull parser and streaming XML serializer implementation for modern, standard C++."
topics = ("xml", "xml-parser", "serialization")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.codesynthesis.com/projects/libstudxml/"
license = "MIT"
settings = "os", "compiler", "build_type", "arch"
exports_sources = "patches/*"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("expat/2.4.1")
def validate(self):
if self.settings.compiler == "Visual Studio":
if tools.Version(self.settings.compiler.version) < "9":
raise ConanInvalidConfiguration("Visual Studio {} is not supported.".format(self.settings.compiler.version))
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def build_requirements(self):
if self.settings.compiler != "Visual Studio":
self.build_requires("gnu-config/cci.20201022")
self.build_requires("libtool/2.4.6")
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_autotools(self):
if not self._autotools:
args = ["--with-external-expat"]
if self.options.shared:
args.extend(["--enable-shared", "--disable-static"])
else:
args.extend(["--disable-shared", "--enable-static"])
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
self._autotools.configure(configure_dir=self._source_subfolder, args=args)
return self._autotools
def _build_vs(self):
vc_ver = int(tools.Version(self.settings.compiler.version).major)
sln_path = None
def get_sln_path():
return os.path.join(self._source_subfolder, "libstudxml-vc{}.sln".format(vc_ver))
sln_path = get_sln_path()
while not os.path.exists(sln_path):
vc_ver -= 1
sln_path = get_sln_path()
proj_path = os.path.join(self._source_subfolder, "xml", "libstudxml-vc{}.vcxproj".format(vc_ver))
if not self.options.shared:
tools.replace_in_file(proj_path, "DynamicLibrary", "StaticLibrary")
tools.replace_in_file(proj_path, "LIBSTUDXML_DYNAMIC_LIB", "LIBSTUDXML_STATIC_LIB")
msbuild = MSBuild(self)
msbuild.build(sln_path, platforms={"x86": "Win32"})
@property
def _user_info_build(self):
return getattr(self, "user_info_build", self.deps_user_info)
def _build_autotools(self):
shutil.copy(self._user_info_build["gnu-config"].CONFIG_SUB,
os.path.join(self._source_subfolder, "config", "config.sub"))
shutil.copy(self._user_info_build["gnu-config"].CONFIG_GUESS,
os.path.join(self._source_subfolder, "config", "config.guess"))
if self.settings.compiler.get_safe("libcxx") == "libc++":
# libc++ includes a file called 'version', and since libstudxml adds source_subfolder as an
# include dir, libc++ ends up including their 'version' file instead, causing a compile error
tools.remove_files_by_mask(self._source_subfolder, "version")
with tools.chdir(self._source_subfolder):
self.run("{} -fiv".format(tools.get_env("AUTORECONF")), win_bash=tools.os_info.is_windows)
autotools = self._configure_autotools()
autotools.make()
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if self.settings.compiler == "Visual Studio":
self._build_vs()
else:
self._build_autotools()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
if self.settings.compiler == "Visual Studio":
self.copy("xml/value-traits", dst="include", src=self._source_subfolder)
self.copy("xml/serializer", dst="include", src=self._source_subfolder)
self.copy("xml/qname", dst="include", src=self._source_subfolder)
self.copy("xml/parser", dst="include", src=self._source_subfolder)
self.copy("xml/forward", dst="include", src=self._source_subfolder)
self.copy("xml/exception", dst="include", src=self._source_subfolder)
self.copy("xml/content", dst="include", src=self._source_subfolder)
self.copy("xml/*.ixx", dst="include", src=self._source_subfolder)
self.copy("xml/*.txx", dst="include", src=self._source_subfolder)
self.copy("xml/*.hxx", dst="include", src=self._source_subfolder)
self.copy("xml/*.h", dst="include", src=self._source_subfolder)
suffix = ""
if self.settings.arch == "x86_64":
suffix = "64"
if self.options.shared:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "lib" + suffix))
self.copy("*.dll", dst="bin", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
autotools = self._configure_autotools()
autotools.install()
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "libstudxml.la")
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.names["pkg_config"] = "libstudxml"
# If built with makefile, static library mechanism is provided by their buildsystem already
if self.settings.compiler == "Visual Studio" and not self.options.shared:
self.cpp_info.defines = ["LIBSTUDXML_STATIC_LIB=1"]
|
plaso/formatters/interface.py | jonathan-greig/plaso | 1,253 | 5813 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""This file contains the event formatters interface classes.
The l2t_csv and other formats are dependent on a message field,
referred to as description_long and description_short in l2t_csv.
Plaso no longer stores these field explicitly.
A formatter, with a format string definition, is used to convert
the event object values into a formatted string that is similar
to the description_long and description_short field.
"""
import abc
import re
from plaso.formatters import logger
class EventFormatterHelper(object):
"""Base class of helper for formatting event data."""
@abc.abstractmethod
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
class BooleanEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting boolean event data.
Attributes:
input_attribute (str): name of the attribute that contains the boolean
input value.
output_attribute (str): name of the attribute where the boolean output
value should be stored.
value_if_false (str): output value if the boolean input value is False.
value_if_true (str): output value if the boolean input value is True.
"""
def __init__(
self, input_attribute=None, output_attribute=None, value_if_false=None,
value_if_true=None):
"""Initialized a helper for formatting boolean event data.
Args:
input_attribute (Optional[str]): name of the attribute that contains
the boolean input value.
output_attribute (Optional[str]): name of the attribute where the
boolean output value should be stored.
value_if_false (str): output value if the boolean input value is False.
value_if_true (str): output value if the boolean input value is True.
"""
super(BooleanEventFormatterHelper, self).__init__()
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.value_if_false = value_if_false
self.value_if_true = value_if_true
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value:
output_value = self.value_if_true
else:
output_value = self.value_if_false
event_values[self.output_attribute] = output_value
class CustomEventFormatterHelper(EventFormatterHelper):
"""Base class for a helper for custom formatting of event data."""
DATA_TYPE = ''
IDENTIFIER = ''
@abc.abstractmethod
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
class EnumerationEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting enumeration event data.
Attributes:
default (str): default value.
input_attribute (str): name of the attribute that contains the enumeration
input value.
output_attribute (str): name of the attribute where the enumeration output
value should be stored.
values (dict[str, str]): mapping of enumeration input and output values.
"""
def __init__(
self, default=None, input_attribute=None, output_attribute=None,
values=None):
"""Initialized a helper for formatting enumeration event data.
Args:
default (Optional[str]): default value.
input_attribute (Optional[str]): name of the attribute that contains
the enumeration input value.
output_attribute (Optional[str]): name of the attribute where the
enumeration output value should be stored.
values (Optional[dict[str, str]]): mapping of enumeration input and
output values.
"""
super(EnumerationEventFormatterHelper, self).__init__()
self.default = default
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.values = values or {}
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
If default value is None and there is no corresponding enumeration value
then the original value is used.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value is not None:
default_value = self.default
if default_value is None:
default_value = input_value
event_values[self.output_attribute] = self.values.get(
input_value, default_value)
class FlagsEventFormatterHelper(EventFormatterHelper):
"""Helper for formatting flags event data.
Attributes:
input_attribute (str): name of the attribute that contains the flags
input value.
output_attribute (str): name of the attribute where the flags output
value should be stored.
values (dict[str, str]): mapping of flags input and output values.
"""
def __init__(
self, input_attribute=None, output_attribute=None, values=None):
"""Initialized a helper for formatting flags event data.
Args:
input_attribute (Optional[str]): name of the attribute that contains
the flags input value.
output_attribute (Optional[str]): name of the attribute where the
flags output value should be stored.
values (Optional[dict[str, str]]): mapping of flags input and output
values.
"""
super(FlagsEventFormatterHelper, self).__init__()
self.input_attribute = input_attribute
self.output_attribute = output_attribute
self.values = values or {}
def FormatEventValues(self, event_values):
"""Formats event values using the helper.
Args:
event_values (dict[str, object]): event values.
"""
input_value = event_values.get(self.input_attribute, None)
if input_value is None:
return
output_values = []
for flag, mapped_value in self.values.items():
if flag & input_value:
output_values.append(mapped_value)
event_values[self.output_attribute] = ', '.join(output_values)
class EventFormatter(object):
"""Base class to format event values.
Attributes:
custom_helpers (list[str]): identifiers of custom event formatter helpers.
helpers (list[EventFormatterHelper]): event formatter helpers.
"""
# The format string can be defined as:
# {name}, {name:format}, {name!conversion}, {name!conversion:format}
_FORMAT_STRING_ATTRIBUTE_NAME_RE = re.compile(
'{([a-z][a-zA-Z0-9_]*)[!]?[^:}]*[:]?[^}]*}')
def __init__(self, data_type='internal'):
"""Initializes an event formatter.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
"""
super(EventFormatter, self).__init__()
self._data_type = data_type
self._format_string_attribute_names = None
self.custom_helpers = []
self.helpers = []
@property
def data_type(self):
"""str: unique identifier for the event data supported by the formatter."""
return self._data_type.lower()
def _FormatMessage(self, format_string, event_values):
"""Determines the formatted message.
Args:
format_string (str): message format string.
event_values (dict[str, object]): event values.
Returns:
str: formatted message.
"""
try:
message_string = format_string.format(**event_values)
except KeyError as exception:
data_type = event_values.get('data_type', 'N/A')
display_name = event_values.get('display_name', 'N/A')
event_identifier = event_values.get('uuid', 'N/A')
parser_chain = event_values.get('parser', 'N/A')
error_message = (
'unable to format string: "{0:s}" missing required event '
'value: {1!s}').format(format_string, exception)
error_message = (
'Event: {0:s} data type: {1:s} display name: {2:s} '
'parser chain: {3:s} with error: {4:s}').format(
event_identifier, data_type, display_name, parser_chain,
error_message)
logger.error(error_message)
attribute_values = []
for attribute, value in event_values.items():
attribute_values.append('{0:s}: {1!s}'.format(attribute, value))
message_string = ' '.join(attribute_values)
except UnicodeDecodeError as exception:
data_type = event_values.get('data_type', 'N/A')
display_name = event_values.get('display_name', 'N/A')
event_identifier = event_values.get('uuid', 'N/A')
parser_chain = event_values.get('parser', 'N/A')
error_message = 'Unicode decode error: {0!s}'.format(exception)
error_message = (
'Event: {0:s} data type: {1:s} display name: {2:s} '
'parser chain: {3:s} with error: {4:s}').format(
event_identifier, data_type, display_name, parser_chain,
error_message)
logger.error(error_message)
message_string = ''
# Strip carriage return and linefeed form the message strings.
# Using replace function here because it is faster than re.sub() or
# string.strip().
return message_string.replace('\r', '').replace('\n', '')
def FormatEventValues(self, event_values):
"""Formats event values using the helpers.
Args:
event_values (dict[str, object]): event values.
"""
for helper in self.helpers:
helper.FormatEventValues(event_values)
@abc.abstractmethod
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
# pylint: disable=unused-argument
def AddCustomHelper(
self, identifier, input_attribute=None, output_attribute=None):
"""Adds a custom event formatter helper.
Args:
identifier (str): identifier.
input_attribute (Optional[str]): name of the attribute that contains
the input value.
output_attribute (Optional[str]): name of the attribute where the
output value should be stored.
"""
self.custom_helpers.append(identifier)
def AddHelper(self, helper):
"""Adds an event formatter helper.
Args:
helper (EventFormatterHelper): event formatter helper to add.
"""
self.helpers.append(helper)
@abc.abstractmethod
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
@abc.abstractmethod
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
class BasicEventFormatter(EventFormatter):
"""Format event values using a message format string.
Attributes:
custom_helpers (list[str]): identifiers of custom event formatter helpers.
helpers (list[EventFormatterHelper]): event formatter helpers.
"""
def __init__(
self, data_type='basic', format_string=None, format_string_short=None):
"""Initializes a basic event formatter.
The syntax of the format strings is similar to that of format() where
the place holder for a certain event object attribute is defined as
{attribute_name}.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
format_string (Optional[str]): (long) message format string.
format_string_short (Optional[str]): short message format string.
"""
super(BasicEventFormatter, self).__init__(data_type=data_type)
self._format_string_attribute_names = None
self._format_string = format_string
self._format_string_short = format_string_short
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
if self._format_string_attribute_names is None:
self._format_string_attribute_names = (
self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
self._format_string))
return set(self._format_string_attribute_names)
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
return self._FormatMessage(self._format_string, event_values)
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
if self._format_string_short:
format_string = self._format_string_short
else:
format_string = self._format_string
short_message_string = self._FormatMessage(format_string, event_values)
# Truncate the short message string if necessary.
if len(short_message_string) > 80:
short_message_string = '{0:s}...'.format(short_message_string[:77])
return short_message_string
class ConditionalEventFormatter(EventFormatter):
"""Conditionally format event values using format string pieces."""
_DEFAULT_FORMAT_STRING_SEPARATOR = ' '
def __init__(
self, data_type='conditional', format_string_pieces=None,
format_string_separator=None, format_string_short_pieces=None):
"""Initializes a conditional event formatter.
The syntax of the format strings pieces is similar to of the basic event
formatter (BasicEventFormatter). Every format string piece should contain
at maximum one unique attribute name. Format string pieces without an
attribute name are supported.
Args:
data_type (Optional[str]): unique identifier for the event data supported
by the formatter.
format_string_pieces (Optional[list[str]]): (long) message format string
pieces.
format_string_separator (Optional[str]): string by which separate format
string pieces should be joined.
format_string_short_pieces (Optional[list[str]]): short message format
string pieces.
"""
if format_string_separator is None:
format_string_separator = self._DEFAULT_FORMAT_STRING_SEPARATOR
super(ConditionalEventFormatter, self).__init__(data_type=data_type)
self._format_string_pieces = format_string_pieces or []
self._format_string_pieces_map = []
self._format_string_separator = format_string_separator
self._format_string_short_pieces = format_string_short_pieces or []
self._format_string_short_pieces_map = []
def _CreateFormatStringMap(
self, format_string_pieces, format_string_pieces_map):
"""Creates a format string map.
The format string pieces map is a list containing the attribute name
per format string piece. E.g. ["Description: {description}"] would be
mapped to: [0] = "description". If the string piece does not contain
an attribute name it is treated as text that does not needs formatting.
Args:
format_string_pieces (list[str]): format string pieces.
format_string_pieces_map (list[str]): format string pieces map.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
for format_string_piece in format_string_pieces:
attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
format_string_piece)
if len(set(attribute_names)) > 1:
raise RuntimeError((
'Invalid format string piece: [{0:s}] contains more than 1 '
'attribute name.').format(format_string_piece))
if not attribute_names:
# The text format string piece is stored as an empty map entry to keep
# the index in the map equal to the format string pieces.
attribute_name = ''
else:
attribute_name = attribute_names[0]
format_string_pieces_map.append(attribute_name)
def _CreateFormatStringMaps(self):
"""Creates the format string maps.
Maps are built of the string pieces and their corresponding attribute
name to optimize conditional string formatting.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
self._format_string_pieces_map = []
self._CreateFormatStringMap(
self._format_string_pieces, self._format_string_pieces_map)
self._format_string_short_pieces_map = []
self._CreateFormatStringMap(
self._format_string_short_pieces, self._format_string_short_pieces_map)
def _ConditionalFormatMessage(
self, format_string_pieces, format_string_pieces_map, event_values):
"""Determines the conditional formatted message.
Args:
format_string_pieces (dict[str, str]): format string pieces.
format_string_pieces_map (list[int, str]): format string pieces map.
event_values (dict[str, object]): event values.
Returns:
str: conditional formatted message.
Raises:
RuntimeError: when an invalid format string piece is encountered.
"""
string_pieces = []
for map_index, attribute_name in enumerate(format_string_pieces_map):
if not attribute_name or event_values.get(
attribute_name, None) is not None:
string_pieces.append(format_string_pieces[map_index])
format_string = self._format_string_separator.join(string_pieces)
return self._FormatMessage(format_string, event_values)
def GetFormatStringAttributeNames(self):
"""Retrieves the attribute names in the format string.
Returns:
set(str): attribute names.
"""
if self._format_string_attribute_names is None:
self._format_string_attribute_names = []
for format_string_piece in self._format_string_pieces:
attribute_names = self._FORMAT_STRING_ATTRIBUTE_NAME_RE.findall(
format_string_piece)
if attribute_names:
self._format_string_attribute_names.extend(attribute_names)
return set(self._format_string_attribute_names)
def GetMessage(self, event_values):
"""Determines the message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: message.
"""
if not self._format_string_pieces_map:
self._CreateFormatStringMaps()
return self._ConditionalFormatMessage(
self._format_string_pieces, self._format_string_pieces_map,
event_values)
def GetMessageShort(self, event_values):
"""Determines the short message.
Args:
event_values (dict[str, object]): event values.
Returns:
str: short message.
"""
if not self._format_string_pieces_map:
self._CreateFormatStringMaps()
if (self._format_string_short_pieces and
self._format_string_short_pieces != ['']):
format_string_pieces = self._format_string_short_pieces
format_string_pieces_map = self._format_string_short_pieces_map
else:
format_string_pieces = self._format_string_pieces
format_string_pieces_map = self._format_string_pieces_map
short_message_string = self._ConditionalFormatMessage(
format_string_pieces, format_string_pieces_map, event_values)
# Truncate the short message string if necessary.
if len(short_message_string) > 80:
short_message_string = '{0:s}...'.format(short_message_string[:77])
return short_message_string
|
audiomate/annotations/label_list.py | CostanzoPablo/audiomate | 133 | 5820 | import collections
import copy
import intervaltree
from .label import Label
class LabelList:
"""
Represents a list of labels which describe an utterance.
An utterance can have multiple label-lists.
Args:
idx (str): An unique identifier for the label-list
within a corpus for one utterance.
labels (list): The list containing the
:py:class:`audiomate.annotations.Label`.
Attributes:
utterance (Utterance): The utterance this label-list is belonging to.
label_tree (IntervalTree): The interval-tree storing the labels.
Example:
>>> label_list = LabelList(idx='transcription', labels=[
>>> Label('this', 0, 2),
>>> Label('is', 2, 4),
>>> Label('timmy', 4, 8)
>>> ])
"""
__slots__ = ['idx', 'label_tree', 'utterance']
def __init__(self, idx='default', labels=None):
self.idx = idx
self.utterance = None
self.label_tree = intervaltree.IntervalTree()
if labels is not None:
self.update(labels)
def __eq__(self, other):
data_this = (self.idx, self.label_tree)
data_other = (other.idx, other.label_tree)
return data_this == data_other
def __iter__(self):
for interval in self.label_tree:
yield interval.data
def __len__(self):
return self.label_tree.__len__()
def __copy__(self):
# utterance is ignored intentionally,
# since it is kind of a weak ref
return LabelList(
idx=self.idx,
labels=[iv.data for iv in self.label_tree]
)
def __deepcopy__(self, memo):
# utterance is ignored intentionally,
# since it is kind of a weak ref
return LabelList(
idx=self.idx,
labels=copy.deepcopy([iv.data for iv in self.label_tree], memo)
)
@property
def labels(self):
""" Return list of labels. """
return list(self)
@property
def start(self):
""" Return start of the earliest starting label (lower bound). """
return self.label_tree.begin()
@property
def end(self):
""" Return end of the lastly ending label (upper bound). """
return self.label_tree.end()
@property
def total_length(self):
"""
Return the cumulative length of all labels
(Number of characters).
"""
return sum(label.length for label in self.labels)
#
# Alteration
#
def add(self, label):
"""
Add a label to the end of the list.
Args:
label (Label): The label to add.
"""
label.label_list = self
self.label_tree.addi(label.start, label.end, label)
def addl(self, value, start=0.0, end=float('inf')):
""" Shortcut for ``add(Label(value, start, end))``. """
self.add(Label(value, start=start, end=end))
def update(self, labels):
"""
Add a list of labels to the end of the list.
Args:
labels (list): Labels to add.
"""
ivs = []
for label in labels:
label.label_list = self
ivs.append(intervaltree.Interval(label.start, label.end, label))
self.label_tree.update(ivs)
def apply(self, fn):
"""
Apply the given function `fn` to every label in this label list.
`fn` is a function of one argument that receives the current label
which can then be edited in place.
Args:
fn (func): Function to apply to every label
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('another_label', 2.0, 3.0)
... ])
>>> def shift_labels(label):
... label.start += 1.0
... label.end += 1.0
...
>>> ll.apply(shift_labels)
>>> ll.labels
[Label(a_label, 2.0, 3.0), Label(another_label, 3.0, 4.0)]
"""
for label in self.labels:
fn(label)
def merge_overlaps(self, threshold=0.0):
"""
Merge overlapping labels with the same value.
Two labels are considered overlapping,
if ``l2.start - l1.end < threshold``.
Args:
threshold (float): Maximal distance between two labels
to be considered as overlapping.
(default: 0.0)
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('a_label', 1.5, 2.7),
... Label('b_label', 1.0, 2.0),
... ])
>>> ll.merge_overlapping_labels()
>>> ll.labels
[
Label('a_label', 1.0, 2.7),
Label('b_label', 1.0, 2.0),
]
"""
updated_labels = []
all_intervals = self.label_tree.copy()
# recursivly find a group of overlapping labels with the same value
def recursive_overlaps(interval):
range_start = interval.begin - threshold
range_end = interval.end + threshold
direct_overlaps = all_intervals.overlap(range_start, range_end)
all_overlaps = [interval]
all_intervals.discard(interval)
for overlap in direct_overlaps:
if overlap.data.value == interval.data.value:
all_overlaps.extend(recursive_overlaps(overlap))
return all_overlaps
# For every remaining interval
# - Find overlapping intervals recursively
# - Remove them
# - Create a concatenated new label
while not all_intervals.is_empty():
next_interval = list(all_intervals)[0]
overlapping = recursive_overlaps(next_interval)
ov_start = float('inf')
ov_end = 0.0
ov_value = next_interval.data.value
for overlap in overlapping:
ov_start = min(ov_start, overlap.begin)
ov_end = max(ov_end, overlap.end)
all_intervals.discard(overlap)
updated_labels.append(Label(
ov_value,
ov_start,
ov_end
))
# Replace the old labels with the updated ones
self.label_tree.clear()
self.update(updated_labels)
#
# Statistics
#
def label_total_duration(self):
"""
Return for each distinct label value the total duration of
all occurrences.
Returns:
dict: A dictionary containing for every label-value (key)
the total duration in seconds (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3, 5),
>>> Label('b', 5, 8),
>>> Label('a', 8, 10),
>>> Label('b', 10, 14),
>>> Label('a', 15, 18.5)
>>> ])
>>> ll.label_total_duration()
{'a': 7.5 'b': 7.0}
"""
durations = collections.defaultdict(float)
for label in self:
durations[label.value] += label.duration
return durations
def label_values(self):
"""
Return a list of all occuring label values.
Returns:
list: Lexicographically sorted list (str) of label values.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14),
>>> Label('d', 15, 18)
>>> ])
>>> ll.label_values()
['a', 'b', 'c', 'd']
"""
all_labels = {l.value for l in self}
return sorted(all_labels)
def label_count(self):
"""
Return for each label the number of occurrences within the list.
Returns:
dict: A dictionary containing for every label-value (key)
the number of occurrences (value).
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('a', 7.2, 10.5),
>>> Label('b', 10.5, 14),
>>> Label('a', 15, 18)
>>> ])
>>> ll.label_count()
{'a': 3 'b': 2}
"""
occurrences = collections.defaultdict(int)
for label in self:
occurrences[label.value] += 1
return occurrences
def all_tokens(self, delimiter=' '):
"""
Return a list of all tokens occurring in the label-list.
Args:
delimiter (str): The delimiter used to split labels into tokens.
See :meth:`audiomate.annotations.Label.tokenized`
Returns:
:class:`set`: A set of distinct tokens.
"""
tokens = set()
for label in self:
tokens = tokens.union(set(label.tokenized(delimiter=delimiter)))
return tokens
#
# Query Label Values
#
def join(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a string with all labels concatenated together.
The order of the labels is defined by the start of the label.
If the overlapping between two labels is greater than
``overlap_threshold``, an Exception is thrown.
Args:
delimiter (str): A string to join two consecutive labels.
overlap_threshold (float): Maximum overlap between two
consecutive labels.
Returns:
str: A string with all labels concatenated together.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c', start=7.0, end=10.2),
>>> Label('d', start=10.3, end=14.0)
>>> ])
>>> ll.join(' - ')
'a - b - c - d'
"""
sorted_by_start = sorted(self.labels)
concat_values = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
concat_values.append(label.value)
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return delimiter.join(concat_values)
def tokenized(self, delimiter=' ', overlap_threshold=0.1):
"""
Return a ordered list of tokens based on all labels.
Joins all token from all labels (``label.tokenized()```).
If the overlapping between two labels is greater than
``overlap_threshold``, an Exception is thrown.
Args:
delimiter (str): The delimiter used to split labels into tokens.
(default: space)
overlap_threshold (float): Maximum overlap between two
consecutive labels.
Returns:
str: A list containing tokens of all labels ordered according
to the label order.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a d q', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('c a', start=7.0, end=10.2),
>>> Label('f g', start=10.3, end=14.0)
>>> ])
>>> ll.tokenized(delimiter=' ', overlap_threshold=0.1)
['a', 'd', 'q', 'b', 'c', 'a', 'f', 'g']
"""
sorted_by_start = sorted(self.labels)
tokens = []
last_label_end = None
for label in sorted_by_start:
if last_label_end is None or (last_label_end - label.start < overlap_threshold and last_label_end > 0):
tokens.extend(label.tokenized(delimiter=delimiter))
last_label_end = label.end
else:
raise ValueError('Labels overlap, not able to define the correct order')
return tokens
#
# Restructuring
#
def separated(self):
"""
Create a separate Label-List for every distinct label-value.
Returns:
dict: A dictionary with distinct label-values as keys. Every value
is a LabelList containing only labels with the same value.
Example:
>>> ll = LabelList(idx='some', labels=[
>>> Label('a', start=0, end=4),
>>> Label('b', start=3.95, end=6.0),
>>> Label('a', start=7.0, end=10.2),
>>> Label('b', start=10.3, end=14.0)
>>> ])
>>> s = ll.separate()
>>> s['a'].labels
[Label('a', start=0, end=4), Label('a', start=7.0, end=10.2)]
>>> s['b'].labels
[Label('b', start=3.95, end=6.0), Label('b', start=10.3, end=14.0)]
"""
separated_lls = collections.defaultdict(LabelList)
for label in self.labels:
separated_lls[label.value].add(label)
for ll in separated_lls.values():
ll.idx = self.idx
return separated_lls
def labels_in_range(self, start, end, fully_included=False):
"""
Return a list of labels, that are within the given range.
Also labels that only overlap are included.
Args:
start(float): Start-time in seconds.
end(float): End-time in seconds.
fully_included(bool): If ``True``, only labels fully included
in the range are returned. Otherwise
also overlapping ones are returned.
(default ``False``)
Returns:
list: List of labels in the range.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14)
>>>])
>>> ll.labels_in_range(6.2, 10.1)
[Label('b', 5.1, 8.9), Label('c', 7.2, 10.5)]
"""
if fully_included:
intervals = self.label_tree.envelop(start, end)
else:
intervals = self.label_tree.overlap(start, end)
return [iv.data for iv in intervals]
def ranges(self, yield_ranges_without_labels=False, include_labels=None):
"""
Generate all ranges of the label-list. A range is defined
as a part of the label-list for which the same labels are defined.
Args:
yield_ranges_without_labels(bool): If True also yields ranges for
which no labels are defined.
include_labels(list): If not empty, only the label values in
the list will be considered.
Returns:
generator: A generator which yields one range
(tuple start/end/list-of-labels) at a time.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 3.2, 4.5),
>>> Label('b', 5.1, 8.9),
>>> Label('c', 7.2, 10.5),
>>> Label('d', 10.5, 14)
>>>])
>>> ranges = ll.ranges()
>>> next(ranges)
(3.2, 4.5, [ < audiomate.annotations.Label at 0x1090527c8 > ])
>>> next(ranges)
(4.5, 5.1, [])
>>> next(ranges)
(5.1, 7.2, [ < audiomate.annotations.label.Label at 0x1090484c8 > ])
"""
tree_copy = self.label_tree.copy()
# Remove labels not included
if include_labels is not None:
for iv in list(tree_copy):
if iv.data.value not in include_labels:
tree_copy.remove(iv)
def reduce(x, y):
x.append(y)
return x
# Split labels when overlapping and merge equal ranges to a list of labels
tree_copy.split_overlaps()
tree_copy.merge_equals(data_reducer=reduce, data_initializer=[])
intervals = sorted(tree_copy)
last_end = intervals[0].begin
# yield range by range
for iv in intervals:
# yield an empty range if necessary
if yield_ranges_without_labels and iv.begin > last_end:
yield (last_end, iv.begin, [])
yield (iv.begin, iv.end, iv.data)
last_end = iv.end
def split(self, cutting_points, shift_times=False, overlap=0.0):
"""
Split the label-list into x parts and return them as new label-lists.
x is defined by the number of cutting-points
(``x == len(cutting_points) + 1``).
The result is a list of label-lists corresponding to each part.
Label-list 0 contains labels between ``0`` and ``cutting_points[0]``.
Label-list 1 contains labels between ``cutting_points[0]`` and
``cutting_points[1]``. And so on.
Args:
cutting_points(list): List of floats defining the points in seconds,
where the label-list is splitted.
shift_times(bool): If True, start and end-time are shifted in
splitted label-lists. So the start is relative
to the cutting point and not to the beginning
of the original label-list.
overlap(float): Amount of overlap in seconds. This amount is
subtracted from a start-cutting-point, and added
to a end-cutting-point.
Returns:
list: A list of of: class: `audiomate.annotations.LabelList`.
Example:
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>> Label('c', 11, 15),
>>>])
>>>
>>> res = ll.split([4.1, 8.9, 12.0])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.1)]
>>> res[1].labels
[
Label('a', 4.1, 5.0),
Label('b', 5.0, 8.9)
]
>>> res[2].labels
[
Label('b', 8.9, 10.0),
Label('c', 11.0, 12.0)
]
>>> res[3].labels
[Label('c', 12.0, 15.0)]
If ``shift_times = True``, the times are adjusted to be relative
to the cutting-points for every label-list but the first.
>>> ll = LabelList(labels=[
>>> Label('a', 0, 5),
>>> Label('b', 5, 10),
>>>])
>>>
>>> res = ll.split([4.6])
>>> len(res)
4
>>> res[0].labels
[Label('a', 0.0, 4.6)]
>>> res[1].labels
[
Label('a', 0.0, 0.4),
Label('b', 0.4, 5.4)
]
"""
if len(cutting_points) == 0:
raise ValueError('At least one cutting-point is needed!')
# we have to loop in sorted order
cutting_points = sorted(cutting_points)
splits = []
iv_start = 0.0
for i in range(len(cutting_points) + 1):
if i < len(cutting_points):
iv_end = cutting_points[i]
else:
iv_end = float('inf')
# get all intervals intersecting range
intervals = self.label_tree.overlap(
iv_start - overlap,
iv_end + overlap
)
cp_splits = LabelList(idx=self.idx)
# Extract labels from intervals with updated times
for iv in intervals:
label = copy.deepcopy(iv.data)
label.start = max(0, iv_start - overlap, label.start)
label.end = min(iv_end + overlap, label.end)
if shift_times:
orig_start = max(0, iv_start - overlap)
label.start -= orig_start
label.end -= orig_start
cp_splits.add(label)
splits.append(cp_splits)
iv_start = iv_end
return splits
#
# Convenience Constructors
#
@classmethod
def create_single(cls, value, idx='default'):
"""
Create a label-list with a single label
containing the given value.
"""
return LabelList(idx=idx, labels=[
Label(value=value)
])
@classmethod
def with_label_values(cls, values, idx='default'):
"""
Create a new label-list containing labels with the given values.
All labels will have default start/end values of 0 and ``inf``.
Args:
values(list): List of values(str) that should be created and
appended to the label-list.
idx(str): The idx of the label-list.
Returns:
(LabelList): New label-list.
Example:
>>> ll = LabelList.with_label_values(['a', 'x', 'z'], idx='letters')
>>> ll.idx
'letters'
>>> ll.labels
[
Label('a', 0, inf),
Label('x', 0, inf),
Label('z', 0, inf),
]
"""
ll = LabelList(idx=idx)
for label_value in values:
ll.add(Label(label_value))
return ll
|
astropy/io/fits/hdu/streaming.py | jayvdb/astropy | 445 | 5824 | <gh_stars>100-1000
# Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import os
from .base import _BaseHDU, BITPIX2DTYPE
from .hdulist import HDUList
from .image import PrimaryHDU
from astropy.io.fits.file import _File
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import fileobj_name
class StreamingHDU:
"""
A class that provides the capability to stream data to a FITS file
instead of requiring data to all be written at once.
The following pseudocode illustrates its use::
header = astropy.io.fits.Header()
for all the cards you need in the header:
header[key] = (value, comment)
shdu = astropy.io.fits.StreamingHDU('filename.fits', header)
for each piece of data:
shdu.write(data)
shdu.close()
"""
def __init__(self, name, header):
"""
Construct a `StreamingHDU` object given a file name and a header.
Parameters
----------
name : file path, file object, or file like object
The file to which the header and data will be streamed. If opened,
the file object must be opened in a writeable binary mode such as
'wb' or 'ab+'.
header : `Header` instance
The header object associated with the data to be written
to the file.
Notes
-----
The file will be opened and the header appended to the end of
the file. If the file does not already exist, it will be
created, and if the header represents a Primary header, it
will be written to the beginning of the file. If the file
does not exist and the provided header is not a Primary
header, a default Primary HDU will be inserted at the
beginning of the file and the provided header will be added as
the first extension. If the file does already exist, but the
provided header represents a Primary header, the header will
be modified to an image extension header and appended to the
end of the file.
"""
if isinstance(name, gzip.GzipFile):
raise TypeError('StreamingHDU not supported for GzipFile objects.')
self._header = header.copy()
# handle a file object instead of a file name
filename = fileobj_name(name) or ''
# Check if the file already exists. If it does not, check to see
# if we were provided with a Primary Header. If not we will need
# to prepend a default PrimaryHDU to the file before writing the
# given header.
newfile = False
if filename:
if not os.path.exists(filename) or os.path.getsize(filename) == 0:
newfile = True
elif (hasattr(name, 'len') and name.len == 0):
newfile = True
if newfile:
if 'SIMPLE' not in self._header:
hdulist = HDUList([PrimaryHDU()])
hdulist.writeto(name, 'exception')
else:
# This will not be the first extension in the file so we
# must change the Primary header provided into an image
# extension header.
if 'SIMPLE' in self._header:
self._header.set('XTENSION', 'IMAGE', 'Image extension',
after='SIMPLE')
del self._header['SIMPLE']
if 'PCOUNT' not in self._header:
dim = self._header['NAXIS']
if dim == 0:
dim = ''
else:
dim = str(dim)
self._header.set('PCOUNT', 0, 'number of parameters',
after='NAXIS' + dim)
if 'GCOUNT' not in self._header:
self._header.set('GCOUNT', 1, 'number of groups',
after='PCOUNT')
self._ffo = _File(name, 'append')
# TODO : Fix this once the HDU writing API is cleaned up
tmp_hdu = _BaseHDU()
# Passing self._header as an argument to _BaseHDU() will cause its
# values to be modified in undesired ways...need to have a better way
# of doing this
tmp_hdu._header = self._header
self._header_offset = tmp_hdu._writeheader(self._ffo)[0]
self._data_offset = self._ffo.tell()
self._size = self.size
if self._size != 0:
self.writecomplete = False
else:
self.writecomplete = True
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def write(self, data):
"""
Write the given data to the stream.
Parameters
----------
data : ndarray
Data to stream to the file.
Returns
-------
writecomplete : int
Flag that when `True` indicates that all of the required
data has been written to the stream.
Notes
-----
Only the amount of data specified in the header provided to the class
constructor may be written to the stream. If the provided data would
cause the stream to overflow, an `OSError` exception is
raised and the data is not written. Once sufficient data has been
written to the stream to satisfy the amount specified in the header,
the stream is padded to fill a complete FITS block and no more data
will be accepted. An attempt to write more data after the stream has
been filled will raise an `OSError` exception. If the
dtype of the input data does not match what is expected by the header,
a `TypeError` exception is raised.
"""
size = self._ffo.tell() - self._data_offset
if self.writecomplete or size + data.nbytes > self._size:
raise OSError('Attempt to write more data to the stream than the '
'header specified.')
if BITPIX2DTYPE[self._header['BITPIX']] != data.dtype.name:
raise TypeError('Supplied data does not match the type specified '
'in the header.')
if data.dtype.str[0] != '>':
# byteswap little endian arrays before writing
output = data.byteswap()
else:
output = data
self._ffo.writearray(output)
if self._ffo.tell() - self._data_offset == self._size:
# the stream is full so pad the data to the next FITS block
self._ffo.write(_pad_length(self._size) * '\0')
self.writecomplete = True
self._ffo.flush()
return self.writecomplete
@property
def size(self):
"""
Return the size (in bytes) of the data portion of the HDU.
"""
size = 0
naxis = self._header.get('NAXIS', 0)
if naxis > 0:
simple = self._header.get('SIMPLE', 'F')
random_groups = self._header.get('GROUPS', 'F')
if simple == 'T' and random_groups == 'T':
groups = 1
else:
groups = 0
size = 1
for idx in range(groups, naxis):
size = size * self._header['NAXIS' + str(idx + 1)]
bitpix = self._header['BITPIX']
gcount = self._header.get('GCOUNT', 1)
pcount = self._header.get('PCOUNT', 0)
size = abs(bitpix) * gcount * (pcount + size) // 8
return size
def close(self):
"""
Close the physical FITS file.
"""
self._ffo.close()
|
paasta_tools/async_utils.py | sobolevn/paasta | 1,711 | 5831 | import asyncio
import functools
import time
import weakref
from collections import defaultdict
from typing import AsyncIterable
from typing import Awaitable
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import TypeVar
T = TypeVar("T")
# NOTE: this method is not thread-safe due to lack of locking while checking
# and updating the cache
def async_ttl_cache(
ttl: Optional[float] = 300,
cleanup_self: bool = False,
*,
cache: Optional[Dict] = None,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
async def call_or_get_from_cache(cache, async_func, args_for_key, args, kwargs):
# Please note that anything which is put into `key` will be in the
# cache forever, potentially causing memory leaks. The most common
# case is the `self` arg pointing to a huge object. To mitigate that
# we're using `args_for_key`, which is supposed not contain any huge
# objects.
key = functools._make_key(args_for_key, kwargs, typed=False)
try:
future, last_update = cache[key]
if ttl is not None and time.time() - last_update > ttl:
raise KeyError
except KeyError:
future = asyncio.ensure_future(async_func(*args, **kwargs))
# set the timestamp to +infinity so that we always wait on the in-flight request.
cache[key] = (future, float("Inf"))
try:
value = await future
except Exception:
# Only update the cache if it's the same future we awaited and
# it hasn't already been updated by another coroutine
# Note also that we use get() in case the key was deleted from the
# cache by another coroutine
if cache.get(key) == (future, float("Inf")):
del cache[key]
raise
else:
if cache.get(key) == (future, float("Inf")):
cache[key] = (future, time.time())
return value
if cleanup_self:
instance_caches: Dict = cache if cache is not None else defaultdict(dict)
def on_delete(w):
del instance_caches[w]
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(self, *args, **kwargs):
w = weakref.ref(self, on_delete)
self_cache = instance_caches[w]
return await call_or_get_from_cache(
self_cache, wrapped, args, (self,) + args, kwargs
)
return inner
else:
cache2: Dict = cache if cache is not None else {} # Should be Dict[Any, T] but that doesn't work.
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await call_or_get_from_cache(cache2, wrapped, args, args, kwargs)
return inner
return outer
async def aiter_to_list(aiter: AsyncIterable[T],) -> List[T]:
return [x async for x in aiter]
def async_timeout(
seconds: int = 10,
) -> Callable[
[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]] # wrapped # inner
]:
def outer(wrapped):
@functools.wraps(wrapped)
async def inner(*args, **kwargs):
return await asyncio.wait_for(wrapped(*args, **kwargs), timeout=seconds)
return inner
return outer
|
datagen.py | kuangliu/pytorch-ssd | 124 | 5872 | '''Load image/class/box from a annotation file.
The annotation file is organized as:
image_name #obj xmin ymin xmax ymax class_index ..
'''
from __future__ import print_function
import os
import sys
import os.path
import random
import numpy as np
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from encoder import DataEncoder
from PIL import Image, ImageOps
class ListDataset(data.Dataset):
img_size = 300
def __init__(self, root, list_file, train, transform):
'''
Args:
root: (str) ditectory to images.
list_file: (str) path to index file.
train: (boolean) train or test.
transform: ([transforms]) image transforms.
'''
self.root = root
self.train = train
self.transform = transform
self.fnames = []
self.boxes = []
self.labels = []
self.data_encoder = DataEncoder()
with open(list_file) as f:
lines = f.readlines()
self.num_samples = len(lines)
for line in lines:
splited = line.strip().split()
self.fnames.append(splited[0])
num_objs = int(splited[1])
box = []
label = []
for i in range(num_objs):
xmin = splited[2+5*i]
ymin = splited[3+5*i]
xmax = splited[4+5*i]
ymax = splited[5+5*i]
c = splited[6+5*i]
box.append([float(xmin),float(ymin),float(xmax),float(ymax)])
label.append(int(c))
self.boxes.append(torch.Tensor(box))
self.labels.append(torch.LongTensor(label))
def __getitem__(self, idx):
'''Load a image, and encode its bbox locations and class labels.
Args:
idx: (int) image index.
Returns:
img: (tensor) image tensor.
loc_target: (tensor) location targets, sized [8732,4].
conf_target: (tensor) label targets, sized [8732,].
'''
# Load image and bbox locations.
fname = self.fnames[idx]
img = Image.open(os.path.join(self.root, fname))
boxes = self.boxes[idx].clone()
labels = self.labels[idx]
# Data augmentation while training.
if self.train:
img, boxes = self.random_flip(img, boxes)
img, boxes, labels = self.random_crop(img, boxes, labels)
# Scale bbox locaitons to [0,1].
w,h = img.size
boxes /= torch.Tensor([w,h,w,h]).expand_as(boxes)
img = img.resize((self.img_size,self.img_size))
img = self.transform(img)
# Encode loc & conf targets.
loc_target, conf_target = self.data_encoder.encode(boxes, labels)
return img, loc_target, conf_target
def random_flip(self, img, boxes):
'''Randomly flip the image and adjust the bbox locations.
For bbox (xmin, ymin, xmax, ymax), the flipped bbox is:
(w-xmax, ymin, w-xmin, ymax).
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
Returns:
img: (PIL.Image) randomly flipped image.
boxes: (tensor) randomly flipped bbox locations, sized [#obj, 4].
'''
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
w = img.width
xmin = w - boxes[:,2]
xmax = w - boxes[:,0]
boxes[:,0] = xmin
boxes[:,2] = xmax
return img, boxes
def random_crop(self, img, boxes, labels):
'''Randomly crop the image and adjust the bbox locations.
For more details, see 'Chapter2.2: Data augmentation' of the paper.
Args:
img: (PIL.Image) image.
boxes: (tensor) bbox locations, sized [#obj, 4].
labels: (tensor) bbox labels, sized [#obj,].
Returns:
img: (PIL.Image) cropped image.
selected_boxes: (tensor) selected bbox locations.
labels: (tensor) selected bbox labels.
'''
imw, imh = img.size
while True:
min_iou = random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])
if min_iou is None:
return img, boxes, labels
for _ in range(100):
w = random.randrange(int(0.1*imw), imw)
h = random.randrange(int(0.1*imh), imh)
if h > 2*w or w > 2*h:
continue
x = random.randrange(imw - w)
y = random.randrange(imh - h)
roi = torch.Tensor([[x, y, x+w, y+h]])
center = (boxes[:,:2] + boxes[:,2:]) / 2 # [N,2]
roi2 = roi.expand(len(center), 4) # [N,4]
mask = (center > roi2[:,:2]) & (center < roi2[:,2:]) # [N,2]
mask = mask[:,0] & mask[:,1] #[N,]
if not mask.any():
continue
selected_boxes = boxes.index_select(0, mask.nonzero().squeeze(1))
iou = self.data_encoder.iou(selected_boxes, roi)
if iou.min() < min_iou:
continue
img = img.crop((x, y, x+w, y+h))
selected_boxes[:,0].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,1].add_(-y).clamp_(min=0, max=h)
selected_boxes[:,2].add_(-x).clamp_(min=0, max=w)
selected_boxes[:,3].add_(-y).clamp_(min=0, max=h)
return img, selected_boxes, labels[mask]
def __len__(self):
return self.num_samples
|
lingvo/core/builder.py | allenwang28/lingvo | 2,611 | 5873 | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library to build composite layers.
WARNING:
The builder pattern is still experimental and we need to gain experience
on when to use and when not to use.
Please discuss w/ teammates before using it to build complicated
layers.
"""
import functools
from lingvo.core import activations
from lingvo.core import builder_layers
from lingvo.core import hyperparams
from lingvo.core import layers
from lingvo.core import py_utils
from lingvo.core import tshape
class Base:
"""Model builder with commonly used layers.
A method in a builder class constructs a layer param. FProp of a layer
constructed by a builder takes a tuple of tf.Tensor (one or more) and returns
a tuple of tf.Tensor (one or more). Even though certain layers support FProp
argument being None (e.g., Conv2DLayer), builder should not depend on such a
support.
The constructed layer is often a composition of multiple sub-layers connected
in certain patterns. We expect to have a few methods to facilitate building
these patterns. For example, _Seq() helps to build a sequential layer that
calls its sub-layer one after another.
TODO(zhifengc): Adds a more concrete example.
"""
@classmethod
def Params(cls):
"""The params of this layer."""
p = hyperparams.InstantiableParams(cls)
p.Define('deterministic_dropout', False,
'Used deterministic dropout or not.')
p.Define(
'fprop_dtype', None,
'Activations datatype to use. To enable bfloat16 activations for '
'layers built using model builder, set fprop_dtype to '
'tf.bfloat16, which will be propagated to layers that support '
'bfloat16 activations. Default is None, which will use float32 '
'activations.')
# SPMD partition related params.
p.Define(
'device_mesh', None,
'A numpy.ndarray specifying the topology of a device mesh to place the '
'computations onto. If device_mesh is None, it is assumed to be a '
'single device. Here are some examples: '
'np.array([0, 1, 2, 3, 4, 5, 6, 7]) which is a 1d mesh with 8 devices, '
'np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) which is 2d matrix of 8 '
'devices.')
p.Define(
'weight_split_dims_mapping', None,
'Relevant only if device_mesh above is not None. If not None, it '
'specifies how weight of this layer or those of the sublayers should '
'be sharded over device mesh. ')
p.Define(
'activation_split_dims_mapping', None,
'Relevant only if device_mesh above is not None. If not None, it '
'specifies how activation of this layer or those of the sublayers '
'should be sharded over device mesh. ')
return p
@property
def params(self):
"""Returns the params upon which this layer is built."""
return self._params
def __init__(self, params):
# Sub-classes should put some options common to many layers in __init__.
self._params = params.Copy()
######################################################################
# Layers to compose multiple layers.
#
# Sub-classes are discouraged to override these composition method.
######################################################################
def _Rep(self, name, repeat, *subs):
r"""Connects sub-layers sequentially and repeat multiple times.
E.g., _Rep('foo', 2, sa, sb, sc) constructs a layer with 6 layers
sequentially connected: [sa1, sb1, sc1, sa2, sb2, sc2]. sa1 and sa2 have
the same structure as the given sa, but sa1 and sa2 do not share the same
weight.
Args:
name: The layer name.
repeat: Repeat \*subs this many times in the compose layer.
*subs: A list of sub-layers.
Returns:
The param for the composed layer.
"""
iterations = []
for i in range(repeat):
iterations.append(self._Seq('iter_%03d' % i, *[p.Copy() for p in subs]))
return self._Seq(name, *iterations)
def _Seq(self, name, *subs):
"""Connects sub-layers sequentially."""
return builder_layers.SequentialLayer.Params().Set(
name=name, sub=list(subs))
def _Graph(self, name, input_endpoints, output_endpoints,
*signature_sub_param_list):
"""Connects sub-layers into a data flow graph."""
return builder_layers.GraphLayer.Params().Set(
name=name,
input_endpoints=input_endpoints,
output_endpoints=output_endpoints,
sub=list(signature_sub_param_list))
def _Id(self, name):
"""Identity. (t_1, ..., t_n) -> (t1, ..., t_n)."""
return self._Seq(name)
def _Arg(self, name, index):
"""Picks index-th element. (t_1, ..., t_n) -> (t_{index},)."""
return builder_layers.ArgIndexLayer.Params().Set(name=name, idx=[index])
def _Par(self, name, *subs):
"""y = (f1, f2, ..., fn)(x).
We feed the input tuple to all sub-layers and concatenates their output
tuples into one tuple.
Args:
name: The layer name.
*subs: A list of sub-layers.
Returns:
The param for the composed layer.
"""
def ConcatTuples(tuples):
# tuples is a list of tuples.
return tuple(functools.reduce(lambda x, y: x + list(y), tuples, []))
def ConcatMeta(tuples):
return py_utils.NestedMap(
flops=0,
out_shapes=tuple(
functools.reduce(lambda x, y: x + list(y), tuples, [])))
return builder_layers.ParallelLayer.Params().Set(
name=name, sub=list(subs), merge=ConcatTuples, merge_meta=ConcatMeta)
def _Fn(self, name, fn, fn_out=None, fn_flops=None):
"""y = fn(x).
Applies a fn: tuple(Tensor) -> a single Tensor or tuple(Tensor) to the input
tuple. Typically, fn is a very simple python function. This layer can be
used for prototyping but we advice to implement the logic as a sub-class of
BaseLayer for all established layers as FnLayer can't be serialized.
Args:
name: The layer name.
fn: A lambda tuple(Tensor) -> tuple(Tensor).
fn_out: A lambda tuple(tshape.Shape) -> output tuple(tshape.Shape)
fn_flops: A lambda tuple(tshape.Shape) -> estimated flops of fn.
If None, we assume flops == sum of elements in the inputs.
Returns:
The param for the composed layer.
"""
def FnMeta(*shapes):
"""A lambda tuple(tshape.Shape) -> NestedMap{flops, out_shapes}."""
if fn_out:
out_shapes = fn_out(*shapes)
if isinstance(out_shapes, tshape.Shape):
out_shapes = (out_shapes,)
else:
out_shapes = shapes
if fn_flops:
flops = fn_flops(*shapes)
else:
flops = sum([s.size for s in shapes])
return py_utils.NestedMap(flops=flops, out_shapes=out_shapes)
return builder_layers.FnLayer.Params().Set(name=name, fn=fn, fn_meta=FnMeta)
def _Save(self, name):
"""Returns a layer from which the activation and gradient can be accessed."""
return layers.FetchLayer.Params().Set(name=name)
def _AddFetches(self, name, body, fetches):
"""Fetches saved activations in the body sub-layer.
E.g.:
_AddFetches('foo', _Seq( 'stack', _Layer('layer1', ...),
_Save('layer1_out', ...), _Layer('layer2', ...), _Save('layer2_out', ...),
_Output('output', ...)), ['layer1_out', 'layer2_out'])
The layer returns the stack's final output together with intermediate
activations from layer1_out and layer2_out.
Args:
name: This layer's name.
body: The sub-layer.
fetches: A list of fetch names inside the sub-layer body.
Returns:
A layer whose outputs correspond to the activations of fetch points
in the sub-layer body. [input1, input2, ..., inputN, fetch1, ..., fetchM].
"""
return builder_layers.BranchLayer.Params().Set(
name=name, body=body, fetches=fetches)
def _Rematerialize(self, name, body):
"""Forces rematerialization on FProp of the body layer."""
return builder_layers.RematerializationLayer.Params().Set(
name=name, body=body)
def _BatchParallel(self, name, sub):
"""Splits the batch and compute the forward pass on multiple devices.
Args:
name: This layer's name.
sub: The sub-layer.
Returns:
A BatchParallel layer which splits the batch and computes the forward pass
on multiple devices.
"""
return builder_layers.BatchParallelLayer.Params().Set(name=name, sub=sub)
def _PrintShape(self, name):
"""Print FProp input shape information."""
return builder_layers.PrintShapeLayer.Params().Set(name=name)
def _CreateNestedMap(self, name, keys):
"""Returns a NestedMap with keys from fprop args."""
return builder_layers.CreateNestedMapLayer.Params().Set(
name=name, keys=keys)
###########################################################################
# Basic nn layers.
#
# The following method returns a layer param, whose FProp takes a single
# Tensor and returns a single Tensor.
#
# These methods are designed to have minimal knobs. Sub-classes which needs to
# be flexible can override these methods with different options. E.g., a
# sub-class builder can override _BN() to tune the decay option.
###########################################################################
def _BN(self, name, dims):
"""Batch norm."""
return layers.BatchNormLayer.Params().Set(name=name, dim=dims, decay=0.99)
def _LN(self, name, dims, use_fused_layernorm=False):
"""Layer norm."""
return layers.LayerNorm.Params().Set(
name=name,
input_dim=dims,
use_fused_layernorm=use_fused_layernorm,
fprop_dtype=self.params.fprop_dtype)
def _Dropout(self, name, keep_prob, noise_shape_broadcast_dims=None):
"""Returns a DropoutLayer Params."""
if self.params.deterministic_dropout:
return layers.DeterministicDropoutLayer.Params().Set(
name=name,
keep_prob=keep_prob,
noise_shape_broadcast_dims=noise_shape_broadcast_dims)
return layers.DropoutLayer.Params().Set(
name=name,
keep_prob=keep_prob,
noise_shape_broadcast_dims=noise_shape_broadcast_dims,
fprop_dtype=self.params.fprop_dtype)
def _Linear(self,
name,
idims,
odims,
device_mesh=None,
weight_split_dims_mapping=None,
qdomain=None):
"""Linear layer. y = matmul([..., idims], [idims, odims])."""
p = builder_layers.LinearLayer.Params()
p.name = name
p.input_dims = idims
p.output_dims = odims
p.fprop_dtype = self.params.fprop_dtype
p.device_mesh = device_mesh
p.weight_split_dims_mapping = weight_split_dims_mapping
p.qdomain.default = qdomain
return p
def _Bias(self, name, dims, device_mesh=None, weight_split_dims_mapping=None):
"""Bias layer. The bias is added to the last dimension of the input."""
return builder_layers.BiasLayer.Params().Set(
name=name,
dims=dims,
fprop_dtype=self.params.fprop_dtype,
device_mesh=device_mesh,
weight_split_dims_mapping=weight_split_dims_mapping)
def _Activation(self, name, fn='RELU'):
"""Activation layer."""
return activations.ActivationLayer.Params().Set(activation=fn, name=name)
def _FC(self, name, idims, odims, act='RELU'):
"""Feed-forward fully connected. y = act(matmul(x, w) + b)."""
# pyformat: disable
return self._Seq(
name,
self._Linear('linear', idims, odims),
self._Bias('bias', odims),
self._Activation('act', fn=act))
def _MLP(self, name, dims, act='RELU'):
"""Multiple layers of feed-forward fully connected.
Args:
name: The layer name.
dims: A list of int. i-th layer has dims[i] as its input dimension, and
dims[i+1] as its output dimensions.
act: The activation function.
Returns:
The param for the composed layer.
"""
l = []
for n, (i, o) in enumerate(zip(dims[:-1], dims[1:])):
l += [self._FC('l%03d' % n, i, o, act)]
return self._Seq(name, *l)
def _Conv2D(self, name, filter_shape, filter_stride):
"""Conv2D layer."""
return layers.Conv2DLayerNoPadding.Params().Set(
name=name, filter_shape=filter_shape, filter_stride=filter_stride,
fprop_dtype=self.params.fprop_dtype)
def _Reshape(self, name, shape):
"""Reshape inputs to the shape provided."""
return builder_layers.ReshapeLayer.Params().Set(name=name,
shape=shape)
|
raiden/tests/integration/long_running/test_stress.py | tirkarthi/raiden | 2,101 | 5875 | <gh_stars>1000+
import time
from http import HTTPStatus
from itertools import count
from typing import Sequence
import gevent
import grequests
import pytest
import structlog
from eth_utils import to_canonical_address
from flask import url_for
from raiden.api.python import RaidenAPI
from raiden.api.rest import APIServer, RestAPI
from raiden.constants import RoutingMode
from raiden.message_handler import MessageHandler
from raiden.network.transport import MatrixTransport
from raiden.raiden_event_handler import RaidenEventHandler
from raiden.raiden_service import RaidenService
from raiden.settings import RestApiConfig
from raiden.tests.integration.api.utils import wait_for_listening_port
from raiden.tests.integration.fixtures.raiden_network import RestartNode
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.protocol import HoldRaidenEventHandler
from raiden.tests.utils.transfer import (
assert_synced_channel_state,
wait_assert,
watch_for_unlock_failures,
)
from raiden.transfer import views
from raiden.ui.startup import RaidenBundle
from raiden.utils.formatting import to_checksum_address
from raiden.utils.typing import (
Address,
BlockNumber,
Host,
Iterator,
List,
Port,
TokenAddress,
TokenAmount,
TokenNetworkAddress,
Tuple,
)
log = structlog.get_logger(__name__)
def iwait_and_get(items: Sequence[gevent.Greenlet]) -> None:
"""Iteratively wait and get on passed greenlets.
This ensures exceptions in the greenlets are re-raised as soon as possible.
"""
for item in gevent.iwait(items):
item.get()
def _url_for(apiserver: APIServer, endpoint: str, **kwargs) -> str:
# url_for() expects binary address so we have to convert here
for key, val in kwargs.items():
if isinstance(val, str) and val.startswith("0x"):
kwargs[key] = to_canonical_address(val)
with apiserver.flask_app.app_context():
return url_for(f"v1_resources.{endpoint}", **kwargs)
def start_apiserver(raiden_app: RaidenService, rest_api_port_number: Port) -> APIServer:
raiden_api = RaidenAPI(raiden_app)
rest_api = RestAPI(raiden_api)
api_server = APIServer(
rest_api, config=RestApiConfig(host=Host("localhost"), port=rest_api_port_number)
)
# required for url_for
api_server.flask_app.config["SERVER_NAME"] = f"localhost:{rest_api_port_number}"
api_server.start()
wait_for_listening_port(rest_api_port_number)
return api_server
def start_apiserver_for_network(
raiden_network: List[RaidenService], port_generator: Iterator[Port]
) -> List[APIServer]:
return [start_apiserver(app, next(port_generator)) for app in raiden_network]
def restart_app(app: RaidenService, restart_node: RestartNode) -> RaidenService:
new_transport = MatrixTransport(
config=app.config.transport, environment=app.config.environment_type
)
raiden_event_handler = RaidenEventHandler()
hold_handler = HoldRaidenEventHandler(raiden_event_handler)
app = RaidenService(
config=app.config,
rpc_client=app.rpc_client,
proxy_manager=app.proxy_manager,
query_start_block=BlockNumber(0),
raiden_bundle=RaidenBundle(
app.default_registry,
app.default_secret_registry,
),
services_bundle=app.default_services_bundle,
transport=new_transport,
raiden_event_handler=hold_handler,
message_handler=MessageHandler(),
routing_mode=RoutingMode.PRIVATE,
)
restart_node(app)
return app
def restart_network(
raiden_network: List[RaidenService], restart_node: RestartNode
) -> List[RaidenService]:
for app in raiden_network:
app.stop()
wait_network = (gevent.spawn(restart_app, app, restart_node) for app in raiden_network)
gevent.joinall(set(wait_network), raise_error=True)
new_network = [greenlet.get() for greenlet in wait_network]
return new_network
def restart_network_and_apiservers(
raiden_network: List[RaidenService],
restart_node: RestartNode,
api_servers: List[APIServer],
port_generator: Iterator[Port],
) -> Tuple[List[RaidenService], List[APIServer]]:
"""Stop an app and start it back"""
for rest_api in api_servers:
rest_api.stop()
new_network = restart_network(raiden_network, restart_node)
new_servers = start_apiserver_for_network(new_network, port_generator)
return (new_network, new_servers)
def address_from_apiserver(apiserver: APIServer) -> Address:
return apiserver.rest_api.raiden_api.address
def transfer_and_assert(
server_from: APIServer,
server_to: APIServer,
token_address: TokenAddress,
identifier: int,
amount: TokenAmount,
) -> None:
url = _url_for(
server_from,
"token_target_paymentresource",
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(address_from_apiserver(server_to)),
)
json = {"amount": amount, "identifier": identifier}
log.debug("PAYMENT REQUEST", url=url, json=json)
request = grequests.post(url, json=json)
start = time.monotonic()
response = request.send().response
duration = time.monotonic() - start
log.debug("PAYMENT RESPONSE", url=url, json=json, response=response, duration=duration)
assert getattr(request, "exception", None) is None
assert response is not None
assert response.status_code == HTTPStatus.OK, f"Payment failed, reason: {response.content}"
assert response.headers["Content-Type"] == "application/json"
def sequential_transfers(
server_from: APIServer,
server_to: APIServer,
number_of_transfers: int,
token_address: TokenAddress,
identifier_generator: Iterator[int],
) -> None:
for _ in range(number_of_transfers):
transfer_and_assert(
server_from=server_from,
server_to=server_to,
token_address=token_address,
identifier=next(identifier_generator),
amount=TokenAmount(1),
)
def stress_send_serial_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send `deposit` transfers of value `1` one at a time, without changing
the initial capacity.
"""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
# deplete the channels in one direction
for server_from, server_to in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
# deplete the channels in the backwards direction
for server_to, server_from in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit * 2,
token_address=token_address,
identifier_generator=identifier_generator,
)
# reset the balances balances by sending the "extra" deposit forward
for server_from, server_to in pairs:
sequential_transfers(
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
def stress_send_parallel_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send `deposit` transfers in parallel, without changing the initial capacity."""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
# deplete the channels in one direction
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
)
# deplete the channels in the backwards direction
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit * 2,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_to, server_from in pairs
]
)
# reset the balances balances by sending the "extra" deposit forward
iwait_and_get(
[
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
)
def stress_send_and_receive_parallel_transfers(
rest_apis: List[APIServer],
token_address: TokenAddress,
identifier_generator: Iterator[int],
deposit: TokenAmount,
) -> None:
"""Send transfers of value one in parallel"""
pairs = list(zip(rest_apis, rest_apis[1:] + [rest_apis[0]]))
forward_transfers = [
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_from, server_to in pairs
]
backwards_transfers = [
gevent.spawn(
sequential_transfers,
server_from=server_from,
server_to=server_to,
number_of_transfers=deposit,
token_address=token_address,
identifier_generator=identifier_generator,
)
for server_to, server_from in pairs
]
iwait_and_get(forward_transfers + backwards_transfers)
def assert_channels(
raiden_network: List[RaidenService],
token_network_address: TokenNetworkAddress,
deposit: TokenAmount,
) -> None:
pairs = list(zip(raiden_network, raiden_network[1:] + [raiden_network[0]]))
for first, second in pairs:
wait_assert(
assert_synced_channel_state,
token_network_address,
first,
deposit,
[],
second,
deposit,
[],
)
@pytest.mark.skip(reason="flaky, see https://github.com/raiden-network/raiden/issues/4803")
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [3])
@pytest.mark.parametrize("number_of_tokens", [1])
@pytest.mark.parametrize("channels_per_node", [2])
@pytest.mark.parametrize("deposit", [2])
@pytest.mark.parametrize("reveal_timeout", [15])
@pytest.mark.parametrize("settle_timeout", [120])
def test_stress(
raiden_network: List[RaidenService],
restart_node: RestartNode,
deposit: TokenAmount,
token_addresses: List[TokenAddress],
port_generator: Iterator[Port],
) -> None:
token_address = token_addresses[0]
rest_apis = start_apiserver_for_network(raiden_network, port_generator)
identifier_generator = count(start=1)
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_raiden(raiden_network[0]),
raiden_network[0].default_registry.address,
token_address,
)
assert token_network_address
for _ in range(2):
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_serial_transfers(rest_apis, token_address, identifier_generator, deposit)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_parallel_transfers(rest_apis, token_address, identifier_generator, deposit)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
assert_channels(raiden_network, token_network_address, deposit)
with watch_for_unlock_failures(*raiden_network):
stress_send_and_receive_parallel_transfers(
rest_apis, token_address, identifier_generator, deposit
)
raiden_network, rest_apis = restart_network_and_apiservers(
raiden_network, restart_node, rest_apis, port_generator
)
restart_network(raiden_network, restart_node)
|
igibson/utils/data_utils/ext_object/scripts/step_1_visual_mesh.py | mamadbiabon/iGibson | 360 | 5883 | <reponame>mamadbiabon/iGibson<filename>igibson/utils/data_utils/ext_object/scripts/step_1_visual_mesh.py
import os
import sys
import bpy
script_dir = os.path.dirname(os.path.abspath(__file__))
utils_dir = os.path.join(script_dir, "../../blender_utils")
sys.path.append(utils_dir)
from utils import bake_model, clean_unused, export_ig_object, import_obj_folder
#############################################
# Parse command line arguments
#############################################
def get_arg(argv, flag, default=None):
if flag in argv:
return argv[argv.index(flag) + 1]
return default
should_bake = "--bake" in sys.argv
axis = ["X", "Y", "Z", "-X", "-Y", "-Z"]
import_axis_up = get_arg(sys.argv, "--up", default="Z")
if import_axis_up not in axis:
raise ValueError("Axis up not supported: {} (should be among X,Y,Z,-X,-Y,-Z)".format(import_axis_up))
import_axis_forward = get_arg(sys.argv, "--forward", default="X")
if import_axis_forward not in axis:
raise ValueError("Axis forward not supported: {} (should be among X,Y,Z,-X,-Y,-Z)".format(import_axis_forward))
source_dir = get_arg(sys.argv, "--source_dir")
if source_dir is None:
raise ValueError("Source directory not specified.")
dest_dir = get_arg(sys.argv, "--dest_dir")
if dest_dir is None:
raise ValueError("Destination directory not specified.")
os.makedirs(dest_dir, exist_ok=True)
model_id = os.path.basename(source_dir)
#############################################
# Importing obj files from source dir
#############################################
for on in bpy.context.scene.objects.keys():
obj = bpy.context.scene.objects[on]
bpy.data.objects.remove(obj)
clean_unused()
import_obj_folder(model_id, source_dir, up=import_axis_up, forward=import_axis_forward)
#############################################
# Optional UV Unwrapping
# This only needed if baking will be performed
#############################################
if should_bake:
uv_unwrapped = True
for o in bpy.context.scene.objects:
if not o.data.uv_layers:
uv_unwrapped = False
if not uv_unwrapped:
bpy.ops.object.mode_set(mode="OBJECT")
vl = bpy.context.view_layer
bpy.ops.object.select_all(action="DESELECT")
for on in bpy.context.scene.objects.keys():
obj = bpy.context.scene.objects[on]
new_uv = bpy.context.scene.objects[on].data.uv_layers.new(name="obj_uv")
vl.objects.active = obj
obj.select_set(True)
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action="SELECT")
bpy.ops.uv.smart_project(angle_limit=66, island_margin=0.02)
bpy.context.tool_settings.mesh_select_mode = (False, False, True)
bpy.ops.object.mode_set(mode="OBJECT")
#############################################
# Export models
#############################################
export_ig_object(dest_dir, save_material=not should_bake)
#############################################
# Optional Texture Baking
#############################################
if should_bake:
mat_dir = os.path.join(dest_dir, "material")
os.makedirs(mat_dir, exist_ok=True)
# bpy.ops.wm.open_mainfile(filepath=blend_path)
# import_ig_object(model_root, import_mat=True)
for obj in bpy.context.scene.objects:
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.select_all(action="SELECT")
bpy.ops.object.join()
channels = {
"DIFFUSE": (2048, 32),
"ROUGHNESS": (1024, 16),
"METALLIC": (1024, 16),
"NORMAL": (1024, 16),
}
bake_model(mat_dir, channels, overwrite=True)
bpy.ops.wm.quit_blender()
|
mushroom_rl/utils/plots/common_plots.py | PuzeLiu/mushroom-rl | 344 | 5888 | from mushroom_rl.utils.plots import PlotItemBuffer, DataBuffer
from mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited
class RewardPerStep(PlotItemBuffer):
"""
Class that represents a plot for the reward at every step.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used.
"""
title = "Step_Reward"
curves_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, curves_params)
class RewardPerEpisode(PlotItemBuffer):
"""
Class that represents a plot for the accumulated reward per episode.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used.
"""
title = "Episode_Reward"
curves_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, curves_params)
class Actions(PlotItemBufferLimited):
"""
Class that represents a plot for the actions.
"""
def __init__(self, plot_buffers, maxs=None, mins=None):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
maxs(list, None): list of max values of each data buffer plotted.
If an element is None, no max line is drawn;
mins(list, None): list of min values of each data buffer plotted.
If an element is None, no min line is drawn.
"""
title = "Actions"
super().__init__(title, plot_buffers, maxs=maxs, mins=mins)
class Observations(PlotItemBufferLimited):
"""
Class that represents a plot for the observations.
"""
def __init__(self, plot_buffers, maxs=None, mins=None, dotted_limits=None):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
maxs(list, None): list of max values of each data buffer plotted.
If an element is None, no max line is drawn;
mins(list, None): list of min values of each data buffer plotted.
If an element is None, no min line is drawn.
dotted_limits (list, None): list of booleans. If True, the
corresponding limit is dotted; otherwise, it is printed as a
solid line.
"""
title = "Observations"
super().__init__(title, plot_buffers, maxs=maxs, mins=mins,
dotted_limits=dotted_limits)
class LenOfEpisodeTraining(PlotItemBuffer):
"""
Class that represents a plot for the length of the episode.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
"""
title = "Len of Episode"
plot_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, plot_params)
|
src/models/nn/adaptive_softmax.py | dumpmemory/state-spaces | 513 | 5897 | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
class OptionalParameterList(nn.ParameterList):
def extra_repr(self):
child_lines = []
for k, p in self._parameters.items():
if p is not None:
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
tie_projs=None, out_layers_weights=None, out_projs=None,
keep_order=False,
bias_scale=0.0,
dropout=0.0,
):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = list(cutoffs) + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
# [21-09-15 AG]: bake the first False into the definition, just as [0] is built into the cutoffs
if tie_projs is None: tie_projs = []
elif isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs)
else: tie_projs = list(tie_projs)
tie_projs = [False] + tie_projs
self.tie_projs = tie_projs
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
if not out_layers_weights:
self.out_layers_weights = nn.ParameterList()
else:
self.out_layers_weights = out_layers_weights
self.out_layers_biases = nn.ParameterList()
self.shared_out_projs = out_projs
self.out_projs = OptionalParameterList()
self.dropout = dropout
self.drop = nn.Dropout(dropout)
if div_val == 1:
if d_proj != d_embed:
for i in range(len(self.cutoffs)):
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_embed))
)
else:
# self.out_projs = [None] * len(self.cutoffs)
self.out_projs.append(None)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(n_token))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(n_token, d_embed))
)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_emb_i))
)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(r_idx - l_idx))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i))
)
for bias in self.out_layers_biases:
bound = bias_scale * d_proj ** -.5
nn.init.uniform_(bias, -bound, bound)
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
if self.dropout > 0.0:
logit = hidden @ proj
logit = self.drop(logit)
logit = logit @ weight.t()
else:
logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
if bias is not None:
logit = logit + bias
return logit
def get_out_proj(self, i):
if self.tie_projs[i]:
if len(self.shared_out_projs) == 0:
return None
elif len(self.shared_out_projs) == 1:
return self.shared_out_projs[0]
else:
return self.shared_out_projs[i]
else:
return self.out_projs[i]
def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs):
# [21-09-15 AG]: TODO may need to handle key_padding_mask
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
hidden = hidden.reshape(-1, hidden.size(-1))
target = target.reshape(-1)
if hidden.size(0) != target.size(0):
print(hidden.shape, target.shape)
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero(as_tuple=False).squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if self.keep_order or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll.mean() # TODO maybe cases for length or padding_mask
class AdaptiveEmbedding(nn.Module):
""" Copy of transformers.AdaptiveEmbedding that works with fp16 by replacing the index_put_ operation
Initialization has been fixed for the case when d_proj = d_embed
"""
def __init__(self, n_token, d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = list(cutoffs) + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.drop = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
_init_embed(self.emb_layers[-1].weight, d_embed, init_scale)
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5)
if d_proj != d_embed: # TODO
# self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5)
_init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale)
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
def forward(self, inp, *args, **kwargs):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
embed = self.drop(embed)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
# Changes
# emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
embeddings = []
indices = torch.zeros_like(inp_flat) # empty should work as long as cutoffs[-1] > max token
_total_tokens = 0
# emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,)
_tokens = indices_i.numel()
if _tokens == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = self.drop(emb_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
# Changes
embeddings.append(emb_i)
indices.index_put_(
(indices_i,),
torch.arange(_tokens, device=inp.device) + _total_tokens
)
_total_tokens += _tokens
# emb_flat.index_copy_(0, indices_i, emb_i)
embeddings = torch.cat(embeddings, dim=0)
emb_flat = embeddings[indices]
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
# embed.div_(self.emb_scale)
return embed
def _init_weight(weight, d : int, init_scale : Optional[float], default=None):
assert init_scale or default
if init_scale is None:
std = default
else:
std = init_scale * (d ** -0.5)
nn.init.normal_(weight, mean=0, std=std)
_init_embed = functools.partial(_init_weight, default=0.02)
_init_proj = functools.partial(_init_weight, default=0.01)
### Just for this codebase, we need to squeeze the last dimension because inputs are always given as (B, L, D) instead of (B, L)
import src.models.nn.utils as U
# AdaptiveEmbedding = U.Squeeze(AdaptiveEmbedding)
|
test/jit/test_backend_nnapi.py | Hacky-DH/pytorch | 60,067 | 5949 | import os
import sys
import unittest
import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
"""
Unit Tests for Nnapi backend with delegate
Inherits most tests from TestNNAPI, which loads Android NNAPI models
without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.
# Second skip is because ASAN is currently causing an error.
# It is still unclear how to resolve this. T95764916
torch_root = Path(__file__).resolve().parent.parent.parent
lib_path = torch_root / 'build' / 'lib' / 'libnnapi_backend.so'
@unittest.skipIf(not os.path.exists(lib_path),
"Skipping the test as libnnapi_backend.so was not found")
@unittest.skipIf(TEST_WITH_ASAN, "Unresolved bug with ASAN")
class TestNnapiBackend(TestNNAPI):
def setUp(self):
super().setUp()
# Save default dtype
module = torch.nn.PReLU()
self.default_dtype = module.weight.dtype
# Change dtype to float32 (since a different unit test changed dtype to float64,
# which is not supported by the Android NNAPI delegate)
# Float32 should typically be the default in other files.
torch.set_default_dtype(torch.float32)
# Load nnapi delegate library
torch.ops.load_library(str(lib_path))
# Override
def call_lowering_to_nnapi(self, traced_module, args):
compile_spec = {"forward": {"inputs": args}}
return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)
def test_tensor_input(self):
# Lower a simple module
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
# Argument input is a single Tensor
self.call_lowering_to_nnapi(traced, args)
# Argument input is a Tensor in a list
self.call_lowering_to_nnapi(traced, [args])
# Test exceptions for incorrect compile specs
def test_compile_spec_santiy(self):
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
errorMsgTail = r"""
method_compile_spec should contain a Tensor or Tensor List which bundles input parameters: shape, dtype, quantization, and dimorder.
For input shapes, use 0 for run/load time flexible input.
method_compile_spec must use the following format:
{"forward": {"inputs": at::Tensor}} OR {"forward": {"inputs": c10::List<at::Tensor>}}"""
# No forward key
compile_spec = {"backward": {"inputs": args}}
with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No Tensor or TensorList under the inputs key
compile_spec = {"forward": {"inputs": 1}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
compile_spec = {"forward": {"inputs": [1]}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
def tearDown(self):
# Change dtype back to default (Otherwise, other unit tests will complain)
torch.set_default_dtype(self.default_dtype)
|
tests/test_markup.py | samdoran/sphinx | 4,973 | 5984 | <gh_stars>1000+
"""
test_markup
~~~~~~~~~~~
Test various Sphinx-specific markup extensions.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import pytest
from docutils import frontend, nodes, utils
from docutils.parsers.rst import Parser as RstParser
from sphinx import addnodes
from sphinx.builders.html.transforms import KeyboardTransform
from sphinx.builders.latex import LaTeXBuilder
from sphinx.roles import XRefRole
from sphinx.testing.util import Struct, assert_node
from sphinx.transforms import SphinxSmartQuotes
from sphinx.util import docutils, texescape
from sphinx.util.docutils import sphinx_domains
from sphinx.writers.html import HTMLTranslator, HTMLWriter
from sphinx.writers.latex import LaTeXTranslator, LaTeXWriter
@pytest.fixture
def settings(app):
texescape.init() # otherwise done by the latex builder
optparser = frontend.OptionParser(
components=(RstParser, HTMLWriter, LaTeXWriter))
settings = optparser.get_default_values()
settings.smart_quotes = True
settings.env = app.builder.env
settings.env.temp_data['docname'] = 'dummy'
settings.contentsname = 'dummy'
settings.rfc_base_url = 'http://tools.ietf.org/html/'
domain_context = sphinx_domains(settings.env)
domain_context.enable()
yield settings
domain_context.disable()
@pytest.fixture
def new_document(settings):
def create():
document = utils.new_document('test data', settings)
document['file'] = 'dummy'
return document
return create
@pytest.fixture
def inliner(new_document):
document = new_document()
document.reporter.get_source_and_line = lambda line=1: ('dummy.rst', line)
return Struct(document=document, reporter=document.reporter)
@pytest.fixture
def parse(new_document):
def parse_(rst):
document = new_document()
parser = RstParser()
parser.parse(rst, document)
SphinxSmartQuotes(document, startnode=None).apply()
for msg in document.traverse(nodes.system_message):
if msg['level'] == 1:
msg.replace_self([])
return document
return parse_
# since we're not resolving the markup afterwards, these nodes may remain
class ForgivingTranslator:
def visit_pending_xref(self, node):
pass
def depart_pending_xref(self, node):
pass
class ForgivingHTMLTranslator(HTMLTranslator, ForgivingTranslator):
pass
class ForgivingLaTeXTranslator(LaTeXTranslator, ForgivingTranslator):
pass
@pytest.fixture
def verify_re_html(app, parse):
def verify(rst, html_expected):
document = parse(rst)
KeyboardTransform(document).apply()
html_translator = ForgivingHTMLTranslator(document, app.builder)
document.walkabout(html_translator)
html_translated = ''.join(html_translator.fragment).strip()
assert re.match(html_expected, html_translated), 'from ' + rst
return verify
@pytest.fixture
def verify_re_latex(app, parse):
def verify(rst, latex_expected):
document = parse(rst)
app.builder = LaTeXBuilder(app)
app.builder.set_environment(app.env)
app.builder.init()
theme = app.builder.themes.get('manual')
latex_translator = ForgivingLaTeXTranslator(document, app.builder, theme)
latex_translator.first_document = -1 # don't write \begin{document}
document.walkabout(latex_translator)
latex_translated = ''.join(latex_translator.body).strip()
assert re.match(latex_expected, latex_translated), 'from ' + repr(rst)
return verify
@pytest.fixture
def verify_re(verify_re_html, verify_re_latex):
def verify_re_(rst, html_expected, latex_expected):
if html_expected:
verify_re_html(rst, html_expected)
if latex_expected:
verify_re_latex(rst, latex_expected)
return verify_re_
@pytest.fixture
def verify(verify_re_html, verify_re_latex):
def verify_(rst, html_expected, latex_expected):
if html_expected:
verify_re_html(rst, re.escape(html_expected) + '$')
if latex_expected:
verify_re_latex(rst, re.escape(latex_expected) + '$')
return verify_
@pytest.fixture
def get_verifier(verify, verify_re):
v = {
'verify': verify,
'verify_re': verify_re,
}
def get(name):
return v[name]
return get
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
# pep role
'verify',
':pep:`8`',
('<p><span class="target" id="index-0"></span><a class="pep reference external" '
'href="http://www.python.org/dev/peps/pep-0008"><strong>PEP 8</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{Python Enhancement Proposals@\\spxentry{Python Enhancement Proposals}'
'!PEP 8@\\spxentry{PEP 8}}\\sphinxhref{http://www.python.org/dev/peps/pep-0008}'
'{\\sphinxstylestrong{PEP 8}}')
),
(
# pep role with anchor
'verify',
':pep:`8#id1`',
('<p><span class="target" id="index-0"></span><a class="pep reference external" '
'href="http://www.python.org/dev/peps/pep-0008#id1">'
'<strong>PEP 8#id1</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{Python Enhancement Proposals@\\spxentry{Python Enhancement Proposals}'
'!PEP 8\\#id1@\\spxentry{PEP 8\\#id1}}\\sphinxhref'
'{http://www.python.org/dev/peps/pep-0008\\#id1}'
'{\\sphinxstylestrong{PEP 8\\#id1}}')
),
(
# rfc role
'verify',
':rfc:`2324`',
('<p><span class="target" id="index-0"></span><a class="rfc reference external" '
'href="http://tools.ietf.org/html/rfc2324.html"><strong>RFC 2324</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{RFC@\\spxentry{RFC}!RFC 2324@\\spxentry{RFC 2324}}'
'\\sphinxhref{http://tools.ietf.org/html/rfc2324.html}'
'{\\sphinxstylestrong{RFC 2324}}')
),
(
# rfc role with anchor
'verify',
':rfc:`2324#id1`',
('<p><span class="target" id="index-0"></span><a class="rfc reference external" '
'href="http://tools.ietf.org/html/rfc2324.html#id1">'
'<strong>RFC 2324#id1</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{RFC@\\spxentry{RFC}!RFC 2324\\#id1@\\spxentry{RFC 2324\\#id1}}'
'\\sphinxhref{http://tools.ietf.org/html/rfc2324.html\\#id1}'
'{\\sphinxstylestrong{RFC 2324\\#id1}}')
),
(
# correct interpretation of code with whitespace
'verify_re',
'``code sample``',
('<p><code class="(samp )?docutils literal notranslate"><span class="pre">'
'code</span>   <span class="pre">sample</span></code></p>'),
r'\\sphinxAtStartPar\n\\sphinxcode{\\sphinxupquote{code sample}}',
),
(
# interpolation of arrows in menuselection
'verify',
':menuselection:`a --> b`',
('<p><span class="menuselection">a \N{TRIANGULAR BULLET} b</span></p>'),
'\\sphinxAtStartPar\n\\sphinxmenuselection{a \\(\\rightarrow\\) b}',
),
(
# interpolation of ampersands in menuselection
'verify',
':menuselection:`&Foo -&&- &Bar`',
('<p><span class="menuselection"><span class="accelerator">F</span>oo '
'-&- <span class="accelerator">B</span>ar</span></p>'),
('\\sphinxAtStartPar\n'
r'\sphinxmenuselection{\sphinxaccelerator{F}oo \sphinxhyphen{}'
r'\&\sphinxhyphen{} \sphinxaccelerator{B}ar}'),
),
(
# interpolation of ampersands in guilabel
'verify',
':guilabel:`&Foo -&&- &Bar`',
('<p><span class="guilabel"><span class="accelerator">F</span>oo '
'-&- <span class="accelerator">B</span>ar</span></p>'),
('\\sphinxAtStartPar\n'
r'\sphinxguilabel{\sphinxaccelerator{F}oo \sphinxhyphen{}\&\sphinxhyphen{} \sphinxaccelerator{B}ar}'),
),
(
# no ampersands in guilabel
'verify',
':guilabel:`Foo`',
'<p><span class="guilabel">Foo</span></p>',
'\\sphinxAtStartPar\n\\sphinxguilabel{Foo}',
),
(
# kbd role
'verify',
':kbd:`space`',
'<p><kbd class="kbd docutils literal notranslate">space</kbd></p>',
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{space}}',
),
(
# kbd role
'verify',
':kbd:`Control+X`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">Control</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">X</kbd>'
'</kbd></p>'),
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{Control+X}}',
),
(
# kbd role
'verify',
':kbd:`Alt+^`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">Alt</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">^</kbd>'
'</kbd></p>'),
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{Alt+\\textasciicircum{}}}'),
),
(
# kbd role
'verify',
':kbd:`M-x M-s`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'-'
'<kbd class="kbd docutils literal notranslate">x</kbd>'
' '
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'-'
'<kbd class="kbd docutils literal notranslate">s</kbd>'
'</kbd></p>'),
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{M\\sphinxhyphen{}x M\\sphinxhyphen{}s}}'),
),
(
# kbd role
'verify',
':kbd:`-`',
'<p><kbd class="kbd docutils literal notranslate">-</kbd></p>',
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{\\sphinxhyphen{}}}'),
),
(
# kbd role
'verify',
':kbd:`Caps Lock`',
'<p><kbd class="kbd docutils literal notranslate">Caps Lock</kbd></p>',
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{Caps Lock}}'),
),
(
# non-interpolation of dashes in option role
'verify_re',
':option:`--with-option`',
('<p><code( class="xref std std-option docutils literal notranslate")?>'
'<span class="pre">--with-option</span></code></p>$'),
(r'\\sphinxAtStartPar\n'
r'\\sphinxcode{\\sphinxupquote{\\sphinxhyphen{}\\sphinxhyphen{}with\\sphinxhyphen{}option}}$'),
),
(
# verify smarty-pants quotes
'verify',
'"John"',
'<p>“John”</p>',
"\\sphinxAtStartPar\n“John”",
),
(
# ... but not in literal text
'verify',
'``"John"``',
('<p><code class="docutils literal notranslate"><span class="pre">'
'"John"</span></code></p>'),
'\\sphinxAtStartPar\n\\sphinxcode{\\sphinxupquote{"John"}}',
),
(
# verify classes for inline roles
'verify',
':manpage:`mp(1)`',
'<p><em class="manpage">mp(1)</em></p>',
'\\sphinxAtStartPar\n\\sphinxstyleliteralemphasis{\\sphinxupquote{mp(1)}}',
),
(
# correct escaping in normal mode
'verify',
'Γ\\\\∞$',
None,
'\\sphinxAtStartPar\nΓ\\textbackslash{}\\(\\infty\\)\\$',
),
(
# in verbatim code fragments
'verify',
'::\n\n @Γ\\∞${}',
None,
('\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
'@Γ\\PYGZbs{}\\(\\infty\\)\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
'\\end{sphinxVerbatim}'),
),
(
# in URIs
'verify_re',
'`test <https://www.google.com/~me/>`_',
None,
r'\\sphinxAtStartPar\n\\sphinxhref{https://www.google.com/~me/}{test}.*',
),
(
# description list: simple
'verify',
'term\n description',
'<dl class="docutils">\n<dt>term</dt><dd>description</dd>\n</dl>',
None,
),
(
# description list: with classifiers
'verify',
'term : class1 : class2\n description',
('<dl class="docutils">\n<dt>term<span class="classifier">class1</span>'
'<span class="classifier">class2</span></dt><dd>description</dd>\n</dl>'),
None,
),
(
# glossary (description list): multiple terms
'verify',
'.. glossary::\n\n term1\n term2\n description',
('<dl class="glossary docutils">\n'
'<dt id="term-term1">term1<a class="headerlink" href="#term-term1"'
' title="Permalink to this term">¶</a></dt>'
'<dt id="term-term2">term2<a class="headerlink" href="#term-term2"'
' title="Permalink to this term">¶</a></dt>'
'<dd>description</dd>\n</dl>'),
None,
),
])
def test_inline(get_verifier, type, rst, html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
'verify',
r'4 backslashes \\\\',
r'<p>4 backslashes \\</p>',
None,
),
])
@pytest.mark.skipif(docutils.__version_info__ < (0, 16),
reason='docutils-0.16 or above is required')
def test_inline_docutils16(get_verifier, type, rst, html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
@pytest.mark.sphinx(confoverrides={'latex_engine': 'xelatex'})
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
# in verbatim code fragments
'verify',
'::\n\n @Γ\\∞${}',
None,
('\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
'@Γ\\PYGZbs{}∞\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
'\\end{sphinxVerbatim}'),
),
])
def test_inline_for_unicode_latex_engine(get_verifier, type, rst,
html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
def test_samp_role(parse):
# no braces
text = ':samp:`a{b}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a",
[nodes.emphasis, "b"],
"c")])
# nested braces
text = ':samp:`a{{b}}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a",
[nodes.emphasis, "{b"],
"}c")])
# half-opened braces
text = ':samp:`a{bc`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{bc"])
# escaped braces
text = ':samp:`a\\\\{b}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{b}c"])
# no braces (whitespaces are keeped as is)
text = ':samp:`code sample`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "code sample"])
def test_download_role(parse):
# implicit
text = ':download:`sphinx.rst`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference,
nodes.literal, "sphinx.rst"])
assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download',
refexplicit=False, reftarget='sphinx.rst', refwarn=False)
assert_node(doctree[0][0][0], classes=['xref', 'download'])
# explicit
text = ':download:`reftitle <sphinx.rst>`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference,
nodes.literal, "reftitle"])
assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download',
refexplicit=True, reftarget='sphinx.rst', refwarn=False)
assert_node(doctree[0][0][0], classes=['xref', 'download'])
def test_XRefRole(inliner):
role = XRefRole()
# implicit
doctrees, errors = role('ref', 'rawtext', 'text', 5, inliner, {}, [])
assert len(doctrees) == 1
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
assert errors == []
# explicit
doctrees, errors = role('ref', 'rawtext', 'title <target>', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'title'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='target',
refexplicit=True, refwarn=False)
# bang
doctrees, errors = role('ref', 'rawtext', '!title <target>', 5, inliner, {}, [])
assert_node(doctrees[0], [nodes.literal, 'title <target>'])
# refdomain
doctrees, errors = role('test:doc', 'rawtext', 'text', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(doctrees[0], refdoc='dummy', refdomain='test', reftype='doc', reftarget='text',
refexplicit=False, refwarn=False)
# fix_parens
role = XRefRole(fix_parens=True)
doctrees, errors = role('ref', 'rawtext', 'text()', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text()'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
# lowercase
role = XRefRole(lowercase=True)
doctrees, errors = role('ref', 'rawtext', 'TEXT', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'TEXT'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
@pytest.mark.sphinx('dummy', testroot='prolog')
def test_rst_prolog(app, status, warning):
app.builder.build_all()
rst = app.env.get_doctree('restructuredtext')
md = app.env.get_doctree('markdown')
# rst_prolog
assert_node(rst[0], nodes.paragraph)
assert_node(rst[0][0], nodes.emphasis)
assert_node(rst[0][0][0], nodes.Text)
assert rst[0][0][0] == 'Hello world'
# rst_epilog
assert_node(rst[-1], nodes.section)
assert_node(rst[-1][-1], nodes.paragraph)
assert_node(rst[-1][-1][0], nodes.emphasis)
assert_node(rst[-1][-1][0][0], nodes.Text)
assert rst[-1][-1][0][0] == 'Good-bye world'
# rst_prolog & rst_epilog on exlucding reST parser
assert not md.rawsource.startswith('*Hello world*.')
assert not md.rawsource.endswith('*Good-bye world*.\n')
@pytest.mark.sphinx('dummy', testroot='keep_warnings')
def test_keep_warnings_is_True(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 2
assert_node(doctree[0][1], nodes.system_message)
@pytest.mark.sphinx('dummy', testroot='keep_warnings',
confoverrides={'keep_warnings': False})
def test_keep_warnings_is_False(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 1
@pytest.mark.sphinx('dummy', testroot='refonly_bullet_list')
def test_compact_refonly_bullet_list(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 5
assert doctree[0][1].astext() == 'List A:'
assert_node(doctree[0][2], nodes.bullet_list)
assert_node(doctree[0][2][0][0], addnodes.compact_paragraph)
assert doctree[0][2][0][0].astext() == 'genindex'
assert doctree[0][3].astext() == 'List B:'
assert_node(doctree[0][4], nodes.bullet_list)
assert_node(doctree[0][4][0][0], nodes.paragraph)
assert doctree[0][4][0][0].astext() == 'Hello'
@pytest.mark.sphinx('dummy', testroot='default_role')
def test_default_role1(app, status, warning):
app.builder.build_all()
# default-role: pep
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], addnodes.index)
assert_node(doctree[0][1][1], nodes.target)
assert_node(doctree[0][1][2], nodes.reference, classes=["pep"])
# no default-role
doctree = app.env.get_doctree('foo')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], nodes.title_reference)
assert_node(doctree[0][1][1], nodes.Text)
@pytest.mark.sphinx('dummy', testroot='default_role',
confoverrides={'default_role': 'guilabel'})
def test_default_role2(app, status, warning):
app.builder.build_all()
# default-role directive is stronger than configratuion
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], addnodes.index)
assert_node(doctree[0][1][1], nodes.target)
assert_node(doctree[0][1][2], nodes.reference, classes=["pep"])
# default_role changes the default behavior
doctree = app.env.get_doctree('foo')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], nodes.inline, classes=["guilabel"])
assert_node(doctree[0][1][1], nodes.Text)
|
virtual/lib/python3.6/site-packages/sqlalchemy/sql/default_comparator.py | mzazakeith/flask-blog | 207 | 5986 | <reponame>mzazakeith/flask-blog
# sql/default_comparator.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementation of SQL comparison operations.
"""
from .. import exc, util
from . import type_api
from . import operators
from .elements import BindParameter, True_, False_, BinaryExpression, \
Null, _const_expr, _clause_element_as_expr, \
ClauseList, ColumnElement, TextClause, UnaryExpression, \
collate, _is_literal, _literal_as_text, ClauseElement, and_, or_, \
Slice, Visitable, _literal_as_binds, CollectionAggregate
from .selectable import SelectBase, Alias, Selectable, ScalarSelect
def _boolean_compare(expr, op, obj, negate=None, reverse=False,
_python_is_types=(util.NoneType, bool),
result_type = None,
**kwargs):
if result_type is None:
result_type = type_api.BOOLEANTYPE
if isinstance(obj, _python_is_types + (Null, True_, False_)):
# allow x ==/!= True/False to be treated as a literal.
# this comes out to "== / != true/false" or "1/0" if those
# constants aren't supported and works on all platforms
if op in (operators.eq, operators.ne) and \
isinstance(obj, (bool, True_, False_)):
return BinaryExpression(expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate, modifiers=kwargs)
elif op in (operators.is_distinct_from, operators.isnot_distinct_from):
return BinaryExpression(expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate, modifiers=kwargs)
else:
# all other None/True/False uses IS, IS NOT
if op in (operators.eq, operators.is_):
return BinaryExpression(expr, _const_expr(obj),
operators.is_,
negate=operators.isnot,
type_=result_type
)
elif op in (operators.ne, operators.isnot):
return BinaryExpression(expr, _const_expr(obj),
operators.isnot,
negate=operators.is_,
type_=result_type
)
else:
raise exc.ArgumentError(
"Only '=', '!=', 'is_()', 'isnot()', "
"'is_distinct_from()', 'isnot_distinct_from()' "
"operators can be used with None/True/False")
else:
obj = _check_literal(expr, op, obj)
if reverse:
return BinaryExpression(obj,
expr,
op,
type_=result_type,
negate=negate, modifiers=kwargs)
else:
return BinaryExpression(expr,
obj,
op,
type_=result_type,
negate=negate, modifiers=kwargs)
def _custom_op_operate(expr, op, obj, reverse=False, result_type=None,
**kw):
if result_type is None:
if op.return_type:
result_type = op.return_type
elif op.is_comparison:
result_type = type_api.BOOLEANTYPE
return _binary_operate(
expr, op, obj, reverse=reverse, result_type=result_type, **kw)
def _binary_operate(expr, op, obj, reverse=False, result_type=None,
**kw):
obj = _check_literal(expr, op, obj)
if reverse:
left, right = obj, expr
else:
left, right = expr, obj
if result_type is None:
op, result_type = left.comparator._adapt_expression(
op, right.comparator)
return BinaryExpression(
left, right, op, type_=result_type, modifiers=kw)
def _conjunction_operate(expr, op, other, **kw):
if op is operators.and_:
return and_(expr, other)
elif op is operators.or_:
return or_(expr, other)
else:
raise NotImplementedError()
def _scalar(expr, op, fn, **kw):
return fn(expr)
def _in_impl(expr, op, seq_or_selectable, negate_op, **kw):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, ScalarSelect):
return _boolean_compare(expr, op, seq_or_selectable,
negate=negate_op)
elif isinstance(seq_or_selectable, SelectBase):
# TODO: if we ever want to support (x, y, z) IN (select x,
# y, z from table), we would need a multi-column version of
# as_scalar() to produce a multi- column selectable that
# does not export itself as a FROM clause
return _boolean_compare(
expr, op, seq_or_selectable.as_scalar(),
negate=negate_op, **kw)
elif isinstance(seq_or_selectable, (Selectable, TextClause)):
return _boolean_compare(expr, op, seq_or_selectable,
negate=negate_op, **kw)
elif isinstance(seq_or_selectable, ClauseElement):
if isinstance(seq_or_selectable, BindParameter) and \
seq_or_selectable.expanding:
return _boolean_compare(
expr, op,
seq_or_selectable,
negate=negate_op)
else:
raise exc.InvalidRequestError(
'in_() accepts'
' either a list of expressions, '
'a selectable, or an "expanding" bound parameter: %r'
% seq_or_selectable)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance(o, operators.ColumnOperators):
raise exc.InvalidRequestError(
'in_() accepts'
' either a list of expressions, '
'a selectable, or an "expanding" bound parameter: %r' % o)
elif o is None:
o = Null()
else:
o = expr._bind_param(op, o)
args.append(o)
if len(args) == 0:
op, negate_op = (
operators.empty_in_op,
operators.empty_notin_op) if op is operators.in_op \
else (
operators.empty_notin_op,
operators.empty_in_op)
return _boolean_compare(expr, op,
ClauseList(*args).self_group(against=op),
negate=negate_op)
def _getitem_impl(expr, op, other, **kw):
if isinstance(expr.type, type_api.INDEXABLE):
other = _check_literal(expr, op, other)
return _binary_operate(expr, op, other, **kw)
else:
_unsupported_impl(expr, op, other, **kw)
def _unsupported_impl(expr, op, *arg, **kw):
raise NotImplementedError("Operator '%s' is not supported on "
"this expression" % op.__name__)
def _inv_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__inv__`."""
if hasattr(expr, 'negation_clause'):
return expr.negation_clause
else:
return expr._negate()
def _neg_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__neg__`."""
return UnaryExpression(expr, operator=operators.neg, type_=expr.type)
def _match_impl(expr, op, other, **kw):
"""See :meth:`.ColumnOperators.match`."""
return _boolean_compare(
expr, operators.match_op,
_check_literal(
expr, operators.match_op, other),
result_type=type_api.MATCHTYPE,
negate=operators.notmatch_op
if op is operators.match_op else operators.match_op,
**kw
)
def _distinct_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.distinct`."""
return UnaryExpression(expr, operator=operators.distinct_op,
type_=expr.type)
def _between_impl(expr, op, cleft, cright, **kw):
"""See :meth:`.ColumnOperators.between`."""
return BinaryExpression(
expr,
ClauseList(
_check_literal(expr, operators.and_, cleft),
_check_literal(expr, operators.and_, cright),
operator=operators.and_,
group=False, group_contents=False),
op,
negate=operators.notbetween_op
if op is operators.between_op
else operators.between_op,
modifiers=kw)
def _collate_impl(expr, op, other, **kw):
return collate(expr, other)
# a mapping of operators with the method they use, along with
# their negated operator for comparison operators
operator_lookup = {
"and_": (_conjunction_operate,),
"or_": (_conjunction_operate,),
"inv": (_inv_impl,),
"add": (_binary_operate,),
"mul": (_binary_operate,),
"sub": (_binary_operate,),
"div": (_binary_operate,),
"mod": (_binary_operate,),
"truediv": (_binary_operate,),
"custom_op": (_custom_op_operate,),
"json_path_getitem_op": (_binary_operate, ),
"json_getitem_op": (_binary_operate, ),
"concat_op": (_binary_operate,),
"any_op": (_scalar, CollectionAggregate._create_any),
"all_op": (_scalar, CollectionAggregate._create_all),
"lt": (_boolean_compare, operators.ge),
"le": (_boolean_compare, operators.gt),
"ne": (_boolean_compare, operators.eq),
"gt": (_boolean_compare, operators.le),
"ge": (_boolean_compare, operators.lt),
"eq": (_boolean_compare, operators.ne),
"is_distinct_from": (_boolean_compare, operators.isnot_distinct_from),
"isnot_distinct_from": (_boolean_compare, operators.is_distinct_from),
"like_op": (_boolean_compare, operators.notlike_op),
"ilike_op": (_boolean_compare, operators.notilike_op),
"notlike_op": (_boolean_compare, operators.like_op),
"notilike_op": (_boolean_compare, operators.ilike_op),
"contains_op": (_boolean_compare, operators.notcontains_op),
"startswith_op": (_boolean_compare, operators.notstartswith_op),
"endswith_op": (_boolean_compare, operators.notendswith_op),
"desc_op": (_scalar, UnaryExpression._create_desc),
"asc_op": (_scalar, UnaryExpression._create_asc),
"nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst),
"nullslast_op": (_scalar, UnaryExpression._create_nullslast),
"in_op": (_in_impl, operators.notin_op),
"notin_op": (_in_impl, operators.in_op),
"is_": (_boolean_compare, operators.is_),
"isnot": (_boolean_compare, operators.isnot),
"collate": (_collate_impl,),
"match_op": (_match_impl,),
"notmatch_op": (_match_impl,),
"distinct_op": (_distinct_impl,),
"between_op": (_between_impl, ),
"notbetween_op": (_between_impl, ),
"neg": (_neg_impl,),
"getitem": (_getitem_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
"contains": (_unsupported_impl,),
}
def _check_literal(expr, operator, other, bindparam_type=None):
if isinstance(other, (ColumnElement, TextClause)):
if isinstance(other, BindParameter) and \
other.type._isnull:
other = other._clone()
other.type = expr.type
return other
elif hasattr(other, '__clause_element__'):
other = other.__clause_element__()
elif isinstance(other, type_api.TypeEngine.Comparator):
other = other.expr
if isinstance(other, (SelectBase, Alias)):
return other.as_scalar()
elif not isinstance(other, Visitable):
return expr._bind_param(operator, other, type_=bindparam_type)
else:
return other
|
vel/notebook/__init__.py | tigerwlin/vel | 273 | 6015 | from .loader import load |
CircuitPython_JEplayer_mp3/repeat.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 6022 | <gh_stars>100-1000
# The MIT License (MIT)
#
# Copyright (c) 2020 <NAME> for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Make a key (button) repeat when held down
"""
import time
class KeyRepeat:
"""Track the state of a button and, while it is held, output a press every
'rate' seconds"""
def __init__(self, getter, rate=0.5):
self.getter = getter
self.rate_ns = round(rate * 1e9)
self.next = -1
@property
def value(self):
"""True when a button is first pressed, or once every 'rate' seconds
thereafter"""
state = self.getter()
if not state:
self.next = -1
return False
now = time.monotonic_ns()
if state and now > self.next:
self.next = now + self.rate_ns
return True
return False
|
tests/test_env.py | Majanao/pytorch-blender | 381 | 6047 | import pytest
from pathlib import Path
from blendtorch import btt
BLENDDIR = Path(__file__).parent/'blender'
class MyEnv(btt.env.OpenAIRemoteEnv):
def __init__(self, background=True, **kwargs):
super().__init__(version='1.0.0')
self.launch(scene=BLENDDIR/'env.blend', script=BLENDDIR /
'env.blend.py', background=background, **kwargs)
# For Blender 2.9 if we pass scene='', the tests below fail since
# _env_post_step() is not called. Its unclear currently why this happens.
def _run_remote_env(background):
env = MyEnv(background=background)
obs = env.reset()
assert obs == 0.
obs, reward, done, info = env.step(0.1)
assert obs == pytest.approx(0.1)
assert reward == 0.
assert not done
assert info['count'] == 2 # 1 is already set by reset()
obs, reward, done, info = env.step(0.6)
assert obs == pytest.approx(0.6)
assert reward == 1.
assert not done
assert info['count'] == 3
for _ in range(8):
obs, reward, done, info = env.step(0.6)
assert done
obs = env.reset()
assert obs == 0.
obs, reward, done, info = env.step(0.1)
assert obs == pytest.approx(0.1)
assert reward == 0.
assert not done
assert info['count'] == 2
env.close()
@pytest.mark.background
def test_remote_env():
_run_remote_env(background=True)
def test_remote_env_ui():
_run_remote_env(background=False)
|
pytorch_ares/pytorch_ares/attack_torch/mim.py | thu-ml/realsafe | 107 | 6060 | <gh_stars>100-1000
import imp
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from pytorch_ares.attack_torch.utils import loss_adv
class MIM(object):
'''Projected Gradient Descent'''
def __init__(self, net, epsilon, p, stepsize, steps, decay_factor, data_name,target, loss, device):
self.epsilon = epsilon
self.p = p
self.net = net
self.decay_factor = decay_factor
self.stepsize = stepsize
self.target = target
self.steps = steps
self.loss = loss
self.data_name = data_name
self.device = device
if self.data_name=="cifar10" and self.target:
raise AssertionError('cifar10 dont support targeted attack')
def forward(self, image, label, target_labels):
image, label = image.to(self.device), label.to(self.device)
if target_labels is not None:
target_labels = target_labels.to(self.device)
batchsize = image.shape[0]
advimage = image
momentum = torch.zeros_like(image).detach()
# PGD to get adversarial example
for i in range(self.steps):
advimage = advimage.clone().detach().requires_grad_(True) # clone the advimage as the next iteration input
netOut = self.net(advimage)
loss = loss_adv(self.loss, netOut, label, target_labels, self.target, self.device)
grad = torch.autograd.grad(loss, [advimage])[0].detach()
grad_norm = torch.norm(nn.Flatten()(grad), p=1, dim=1)
grad = grad / grad_norm.view([-1]+[1]*(len(grad.shape)-1))
grad = grad + momentum*self.decay_factor
momentum = grad
if self.p==np.inf:
updates = grad.sign()
else:
normVal = torch.norm(grad.view(batchsize, -1), self.p, 1)
updates = grad/normVal.view(batchsize, 1, 1, 1)
updates = updates*self.stepsize
advimage = advimage+updates
# project the disturbed image to feasible set if needed
delta = advimage-image
if self.p==np.inf:
delta = torch.clamp(delta, -self.epsilon, self.epsilon)
else:
normVal = torch.norm(delta.view(batchsize, -1), self.p, 1)
mask = normVal<=self.epsilon
scaling = self.epsilon/normVal
scaling[mask] = 1
delta = delta*scaling.view(batchsize, 1, 1, 1)
advimage = image+delta
advimage = torch.clamp(advimage, 0, 1)#cifar10(-1,1)
return advimage |
ares/defense/randomization.py | KuanKuanQAQ/ares | 206 | 6064 | <reponame>KuanKuanQAQ/ares<gh_stars>100-1000
''' The randomization defense method, which applies random . '''
import tensorflow as tf
from ares.defense.input_transformation import input_transformation
def randomize(xs, scale_min=0.875, pad_value=0.0):
''' Apply random rescaling and padding to xs.
:param xs: A batch of inputs for some classifier.
:param scale_min: The random rescaling rate would be chosen between ``scale_min`` and 1.0.
:param pad_value: ``constant_values`` parameter for the ``tf.pad`` method.
:return: A new tensor with same shape and dtype as xs.
'''
ratio = tf.random.uniform((), minval=scale_min, maxval=1.0)
height, width = tf.cast(xs.shape[1].value * ratio, tf.int32), tf.cast(xs.shape[2].value * ratio, tf.int32)
xs_rescaled = tf.image.resize(xs, (height, width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True, preserve_aspect_ratio=False)
height_rem, width_rem = xs.shape[1].value - height, xs.shape[2].value - width
pad_left = tf.random_uniform((), 0, width_rem, dtype=tf.int32)
pad_right = width_rem - pad_left
pad_top = tf.random_uniform((), 0, height_rem, dtype=tf.int32)
pad_bottom = height_rem - pad_top
xs_padded = tf.pad(xs_rescaled, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]],
constant_values=pad_value)
xs_padded.set_shape(xs.shape)
return xs_padded
def randomization(scale_min=0.875, pad_value=0.0):
''' A decorator to apply randomize rescaling and padding to input of the classifier.
:param scale_min: The random rescaling rate would be chosen between ``scale_min`` and 1.0.
:param pad_value: ``constant_values`` parameter for the ``tf.pad`` method.
'''
def args_fn(_):
return (scale_min, pad_value)
def kwargs_fn(_):
return {}
return lambda rs_class: input_transformation(rs_class, randomize, args_fn, kwargs_fn)
|
jumpy/jumpy/ndarray.py | rghwer/testdocs | 13,006 | 6072 | ################################################################################
# Copyright (c) 2015-2018 Skymind, Inc.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
from .java_classes import *
import numpy as np
import ctypes
import warnings
native_ops = NativeOpsHolder.getInstance().getDeviceNativeOps()
# DATA TYPE MANAGEMENT
DOUBLE = DataType.DOUBLE
FLOAT = DataType.FLOAT
HALF = DataType.HALF
LONG = DataType.LONG
INT = DataType.INT
SHORT = DataType.SHORT
UBYTE = DataType.UBYTE
BYTE = DataType.BYTE
BOOL = DataType.BOOL
UTF8 = DataType.UTF8
COMPRESSED = DataType.COMPRESSED
UNKNOWN = DataType.UNKNOWN
SUPPORTED_JAVA_DTYPES = [
DOUBLE,
FLOAT,
HALF,
LONG,
INT,
SHORT,
BOOL
#UTF8
]
SUPPORTED_PYTHON_DTYPES = [
np.float64,
np.float32,
np.float16,
np.int64,
np.int32,
np.int16,
np.bool_
#np.str_
]
_PY2J = {SUPPORTED_PYTHON_DTYPES[i] : SUPPORTED_JAVA_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
_J2PY = {SUPPORTED_JAVA_DTYPES[i] : SUPPORTED_PYTHON_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
def _dtype_py2j(dtype):
if isinstance(dtype, str):
dtype = np.dtype(dtype).type
elif isinstance(dtype, np.dtype):
dtype = dtype.type
jtype = _PY2J.get(dtype)
if jtype is None:
raise NotImplementedError("Unsupported type: " + dtype.name)
return jtype
def _dtype_j2py(dtype):
pytype = _J2PY.get(dtype)
if pytype is None:
raise NotImplementedError("Unsupported type: " + (str(dtype)))
return pytype
def set_context_dtype(dtype):
'''
Sets the dtype for nd4j
# Arguments
dtype: 'float' or 'double'
'''
dtype_map = {
'float32': 'float',
'float64': 'double'
}
dtype = dtype_map.get(dtype, dtype)
if dtype not in ['float', 'double']:
raise ValueError("Invalid dtype '{}'. Available dtypes are 'float' and 'double'.".format(dtype))
dtype_ = DataTypeUtil.getDtypeFromContext(dtype)
DataTypeUtil.setDTypeForContext(dtype_)
if get_context_dtype() != dtype:
warnings.warn("Can not set context dtype now. Set it at the beginning of your program.")
def get_context_dtype():
'''
Returns the nd4j dtype
'''
dtype = DataTypeUtil.getDtypeFromContext()
return DataTypeUtil.getDTypeForName(dtype)
_refs = []
def _from_numpy(np_array):
'''
Convert numpy array to nd4j array
'''
pointer_address, _ = np_array.__array_interface__['data']
_refs.append(np_array)
pointer = native_ops.pointerForAddress(pointer_address)
size = np_array.size
pointer.limit(size)
jdtype = _dtype_py2j(np_array.dtype)
'''
mapping = {
DOUBLE: DoublePointer,
FLOAT: FloatPointer,
HALF: HalfPointer,
LONG: LongPointer,
INT: IntPointer,
SHORT: ShortPointer,
BOOL: BoolPointer
}
pc = mapping[jdtype]
#pointer = pc(pointer)
'''
buff = Nd4j.createBuffer(pointer, size, jdtype)
assert buff.address() == pointer_address
_refs.append(buff)
elem_size = buff.getElementSize()
assert elem_size == np_array.dtype.itemsize
strides = np_array.strides
strides = [dim / elem_size for dim in strides]
shape = np_array.shape
nd4j_array = Nd4j.create(buff, shape, strides, 0)
assert buff.address() == nd4j_array.data().address()
return nd4j_array
def _to_numpy(nd4j_array):
'''
Convert nd4j array to numpy array
'''
buff = nd4j_array.data()
address = buff.pointer().address()
dtype = nd4j_array.dataType().toString()
mapping = {
'DOUBLE': ctypes.c_double,
'FLOAT': ctypes.c_float,
'HALF': ctypes.c_short,
'LONG': ctypes.c_long,
'INT': ctypes.c_int,
'SHORT': ctypes.c_short,
'BOOL': ctypes.c_bool
}
Pointer = ctypes.POINTER(mapping[dtype])
pointer = ctypes.cast(address, Pointer)
np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape()))
return np_array
def _indarray(x):
typ = type(x)
if typ is INDArray:
return x
elif typ is ndarray:
return x.array
elif 'numpy' in str(typ):
return _from_numpy(x)
elif typ in (list, tuple):
return _from_numpy(np.array(x))
elif typ in (int, float):
return Nd4j.scalar(x)
else:
raise Exception('Data type not understood :' + str(typ))
def _nparray(x):
typ = type(x)
if typ is INDArray:
return ndarray(x).numpy()
elif typ is ndarray:
return x.numpy()
elif 'numpy' in str(typ):
return x
elif typ in (list, tuple):
return np.array(x)
elif typ in (int, float):
return np.array(x)
else:
raise Exception('Data type not understood :' + str(typ))
def broadcast_like(y, x):
xs = x.shape()
ys = y.shape()
if xs == ys:
return y
_xs = tuple(xs)
_ys = tuple(ys)
nx = len(xs)
ny = len(ys)
if nx > ny:
diff = nx - ny
ys = ([1] * diff) + ys
y = y.reshape(ys)
ny = nx
elif ny > nx:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
yt = []
rep_y = False
for xd, yd in zip(xs, ys):
if xd == yd:
yt.append(1)
elif xd == 1:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
elif yd == 1:
yt.append(xd)
rep_y = True
else:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
if rep_y:
y = y.repmat(*yt)
return y
def broadcast(x, y):
xs = x.shape()
ys = y.shape()
if xs == ys:
return x, y
_xs = tuple(xs)
_ys = tuple(ys)
nx = len(xs)
ny = len(ys)
if nx > ny:
diff = nx - ny
ys = ([1] * diff) + ys
y = y.reshape(*ys)
ny = nx
elif ny > nx:
diff = ny - nx
xs = ([1] * diff) + xs
x = x.reshape(*xs)
nx = ny
xt = []
yt = []
rep_x = False
rep_y = False
for xd, yd in zip(xs, ys):
if xd == yd:
xt.append(1)
yt.append(1)
elif xd == 1:
xt.append(yd)
yt.append(1)
rep_x = True
elif yd == 1:
xt.append(1)
yt.append(xd)
rep_y = True
else:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
if rep_x:
x = Nd4j.tile(x, *xt)
if rep_y:
try:
y = Nd4j.tile(y, *yt)
except:
y = Nd4j.tile(y, *yt)
return x, y
class ndarray(object):
def __init__(self, data, dtype=None):
# we ignore dtype for now
typ = type(data)
if 'nd4j' in typ.__name__:
# Note that we don't make a copy here
self.array = data
elif typ is ndarray:
self.array = data.array.dup()
else:
if typ is not np.ndarray:
data = np.array(data)
self.array = _from_numpy(data)
def numpy(self):
try:
return self.np_array
except AttributeError:
self.np_array = _to_numpy(self.array)
return self.np_array
@property
def size(self):
return self.array.length()
@property
def shape(self):
return tuple(self.array.shape())
@shape.setter
def shape(self, value):
arr = self.reshape(value)
self.array = arr.array
@property
def ndim(self):
return len(self.array.shape())
def __getitem__(self, key):
return ndarray(self.numpy()[key])
if type(key) is int:
return ndarray(self.array.get(NDArrayIndex.point(key)))
if type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
shape = self.array.shape()
if shape[0] == 1:
stop = shape[1]
else:
stop = shape[0]
if stop - start <= 0:
return None
if step is None or step == 1:
return ndarray(self.array.get(NDArrayIndex.interval(start, stop)))
else:
return ndarray(self.array.get(NDArrayIndex.interval(start, step, stop)))
if type(key) is list:
raise NotImplementedError(
'Sorry, this type of indexing is not supported yet.')
if type(key) is tuple:
key = list(key)
shape = self.array.shape()
ndim = len(shape)
nk = len(key)
key += [slice(None)] * (ndim - nk)
args = []
for i, dim in enumerate(key):
if type(dim) is int:
args.append(NDArrayIndex.point(dim))
elif type(dim) is slice:
if dim == slice(None):
args.append(NDArrayIndex.all())
else:
start = dim.start
stop = dim.stop
step = dim.step
if start is None:
start = 0
if stop is None:
stop = shape[i]
if stop - start <= 0:
return None
if step is None or step == 1:
args.append(NDArrayIndex.interval(start, stop))
else:
args.append(NDArrayIndex.interval(
start, step, stop))
elif type(dim) in (list, tuple):
raise NotImplementedError(
'Sorry, this type of indexing is not supported yet.')
return ndarray(self.array.get(*args))
def __setitem__(self, key, other):
self.numpy()[key] = _nparray(other)
return
other = _indarray(other)
view = self[key]
if view is None:
return
view = view.array
other = broadcast_like(other, view)
view.assign(other)
def __add__(self, other):
return ndarray(self.numpy() + _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.add(y))
def __sub__(self, other):
return ndarray(self.numpy() - _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.sub(y))
def __mul__(self, other):
return ndarray(self.numpy() * _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.mul(y))
def __div__(self, other):
return ndarray(self.numpy() / _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.div(y))
def __pow__(self, other):
return ndarray(self.numpy() ** _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(Transforms.pow(x, y))
def __iadd__(self, other):
self.numpy().__iadd__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.addi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.add(y)
return self
def __isub__(self, other):
self.numpy().__isub__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.subi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.sub(y)
return self
def __imul__(self, other):
self.numpy().__imul__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.muli(other)
else:
x, y = broadcast(self.array, other)
self.array = x.mul(y)
return self
def __idiv__(self, other):
self.numpy().__idiv__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.divi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.div(y)
return self
def __ipow__(self, other):
self.numpy().__ipow__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.divi(other)
else:
x, y = broadcast(self.array, other)
self.array = Transforms.pow(x, y)
return self
def __getattr__(self, attr):
import ops
f = getattr(ops, attr)
setattr(ndarray, attr, f)
return getattr(self, attr)
def __int__(self):
if self.array.length() == 1:
return self.array.getInt(0)
raise Exception('Applicable only for scalars')
def __float__(self):
if self.array.length() == 1:
return self.array.getDouble(0)
raise Exception('Applicable only for scalars')
@property
def T(self):
return self.transpose()
def array(*args, **kwargs):
return ndarray(*args, **kwargs)
|
localgraphclustering/algorithms/eig2_nL.py | vishalbelsare/LocalGraphClustering | 106 | 6078 | import numpy as np
import scipy as sp
import scipy.sparse.linalg as splinalg
def eig2_nL(g, tol_eigs = 1.0e-6, normalize:bool = True, dim:int=1):
"""
DESCRIPTION
-----------
Computes the eigenvector that corresponds to the second smallest eigenvalue
of the normalized Laplacian matrix then it uses sweep cut to round the solution.
PARAMETERS (mandatory)
----------------------
g: graph object
PARAMETERS (optional)
---------------------
dim: positive, int
default == 1
The number of eigenvectors or dimensions to compute.
tol_eigs: positive float, double
default == 1.0e-6
Tolerance for computation of the eigenvector that corresponds to
the second smallest eigenvalue of the normalized Laplacian matrix.
normalize: bool,
default == True
True if we should return the eigenvectors of the generalized
eigenvalue problem associated with the normalized Laplacian.
This should be on unless you know what you are doing.
RETURNS
------
p: Eigenvector or Eigenvector matrixthat
corresponds to the second smallest eigenvalue of the
normalized Laplacian matrix and larger eigenvectors if dim >= 0.
"""
n = g.adjacency_matrix.shape[0]
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt.transpose(), 0, n, n)
L = sp.sparse.identity(n) - D_sqrt_neg.dot((g.adjacency_matrix.dot(D_sqrt_neg)))
emb_eig_val, p = splinalg.eigsh(L, which='SM', k=1+dim, tol = tol_eigs)
F = np.real(p[:,1:])
if normalize:
F *= g.dn_sqrt[:,np.newaxis]
return F, emb_eig_val
"""
Random walks and local cuts in graphs, Chung, LAA 2007
We just form the sub-matrix of the Laplacian and use the eigenvector there.
"""
def eig2nL_subgraph(g, ref_nodes, tol_eigs = 1.0e-6, normalize: bool = True):
A_sub = g.adjacency_matrix.tocsr()[ref_nodes, :].tocsc()[:, ref_nodes]
nref = len(ref_nodes)
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt[ref_nodes].transpose(), 0, nref, nref)
L_sub = sp.sparse.identity(nref) - D_sqrt_neg.dot((A_sub.dot(D_sqrt_neg)))
emb_eig_val, emb_eig = splinalg.eigsh(L_sub, which='SM', k=1, tol=tol_eigs)
emb_eig *= -1 if max(emb_eig) < 0 else 1
f = emb_eig[:,0]
if normalize:
f *= g.dn_sqrt[ref_nodes]
return ((ref_nodes,f), emb_eig_val)
|
src/toil/batchSystems/abstractBatchSystem.py | Hexotical/toil | 348 | 6096 | <reponame>Hexotical/toil<gh_stars>100-1000
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import logging
import os
import shutil
from abc import ABC, abstractmethod
from argparse import ArgumentParser, _ArgumentGroup
from contextlib import contextmanager
from typing import (Any,
Callable,
ContextManager,
Dict,
Iterator,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
NamedTuple)
from toil.common import Toil, cacheDirName, Config
from toil.deferred import DeferredFunctionManager
from toil.fileStores.abstractFileStore import AbstractFileStore
from toil.job import JobDescription
from toil.resource import Resource
logger = logging.getLogger(__name__)
# Value to use as exitStatus in UpdatedBatchJobInfo.exitStatus when status is not available.
EXIT_STATUS_UNAVAILABLE_VALUE = 255
class BatchJobExitReason(enum.Enum):
FINISHED: int = 1 # Successfully finished.
FAILED: int = 2 # Job finished, but failed.
LOST: int = 3 # Preemptable failure (job's executing host went away).
KILLED: int = 4 # Job killed before finishing.
ERROR: int = 5 # Internal error.
MEMLIMIT: int = 6 # Job hit batch system imposed memory limit
class UpdatedBatchJobInfo(NamedTuple):
jobID: int
exitStatus: int
"""
The exit status (integer value) of the job. 0 implies successful.
EXIT_STATUS_UNAVAILABLE_VALUE is used when the exit status is not available (e.g. job is lost).
"""
exitReason: Optional[BatchJobExitReason]
wallTime: Union[float, int, None]
# Information required for worker cleanup on shutdown of the batch system.
class WorkerCleanupInfo(NamedTuple):
workDir: str
"""workdir path (where the cache would go)"""
workflowID: str
"""used to identify files specific to this workflow"""
cleanWorkDir: str
class AbstractBatchSystem(ABC):
"""
An abstract (as far as Python currently allows) base class to represent the interface the batch
system must provide to Toil.
"""
@classmethod
@abstractmethod
def supportsAutoDeployment(cls) -> bool:
"""
Whether this batch system supports auto-deployment of the user script itself. If it does,
the :meth:`.setUserScript` can be invoked to set the resource object representing the user
script.
Note to implementors: If your implementation returns True here, it should also override
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def supportsWorkerCleanup(cls) -> bool:
"""
Indicates whether this batch system invokes
:meth:`BatchSystemSupport.workerCleanup` after the last job for a
particular workflow invocation finishes. Note that the term *worker*
refers to an entire node, not just a worker process. A worker process
may run more than one job sequentially, and more than one concurrent
worker process may exist on a worker node, for the same workflow. The
batch system is said to *shut down* after the last worker process
terminates.
"""
raise NotImplementedError()
def setUserScript(self, userScript: Resource) -> None:
"""
Set the user script for this workflow. This method must be called before the first job is
issued to this batch system, and only if :meth:`.supportsAutoDeployment` returns True,
otherwise it will raise an exception.
:param userScript: the resource object representing the user script
or module and the modules it depends on.
"""
raise NotImplementedError()
@abstractmethod
def issueBatchJob(self, jobDesc: JobDescription, job_environment: Optional[Dict[str, str]] = None) -> int:
"""
Issues a job with the specified command to the batch system and returns a unique jobID.
:param jobDesc a toil.job.JobDescription
:param job_environment: a collection of job-specific environment variables
to be set on the worker.
:return: a unique jobID that can be used to reference the newly issued job
"""
raise NotImplementedError()
@abstractmethod
def killBatchJobs(self, jobIDs: List[int]) -> None:
"""
Kills the given job IDs. After returning, the killed jobs will not
appear in the results of getRunningBatchJobIDs. The killed job will not
be returned from getUpdatedBatchJob.
:param jobIDs: list of IDs of jobs to kill
"""
raise NotImplementedError()
# FIXME: Return value should be a set (then also fix the tests)
@abstractmethod
def getIssuedBatchJobIDs(self) -> List[int]:
"""
Gets all currently issued jobs
:return: A list of jobs (as jobIDs) currently issued (may be running, or may be
waiting to be run). Despite the result being a list, the ordering should not
be depended upon.
"""
raise NotImplementedError()
@abstractmethod
def getRunningBatchJobIDs(self) -> Dict[int, float]:
"""
Gets a map of jobs as jobIDs that are currently running (not just waiting)
and how long they have been running, in seconds.
:return: dictionary with currently running jobID keys and how many seconds they have
been running as the value
"""
raise NotImplementedError()
@abstractmethod
def getUpdatedBatchJob(self, maxWait: int) -> Optional[UpdatedBatchJobInfo]:
"""
Returns information about job that has updated its status (i.e. ceased
running, either successfully or with an error). Each such job will be
returned exactly once.
Does not return info for jobs killed by killBatchJobs, although they
may cause None to be returned earlier than maxWait.
:param maxWait: the number of seconds to block, waiting for a result
:return: If a result is available, returns UpdatedBatchJobInfo.
Otherwise it returns None. wallTime is the number of seconds (a strictly
positive float) in wall-clock time the job ran for, or None if this
batch system does not support tracking wall time.
"""
raise NotImplementedError()
def getSchedulingStatusMessage(self) -> Optional[str]:
"""
Get a log message fragment for the user about anything that might be
going wrong in the batch system, if available.
If no useful message is available, return None.
This can be used to report what resource is the limiting factor when
scheduling jobs, for example. If the leader thinks the workflow is
stuck, the message can be displayed to the user to help them diagnose
why it might be stuck.
:return: User-directed message about scheduling state.
"""
# Default implementation returns None.
# Override to provide scheduling status information.
return None
@abstractmethod
def shutdown(self) -> None:
"""
Called at the completion of a toil invocation.
Should cleanly terminate all worker threads.
"""
raise NotImplementedError()
def setEnv(self, name: str, value: Optional[str] = None) -> None:
"""
Set an environment variable for the worker process before it is launched. The worker
process will typically inherit the environment of the machine it is running on but this
method makes it possible to override specific variables in that inherited environment
before the worker is launched. Note that this mechanism is different to the one used by
the worker internally to set up the environment of a job. A call to this method affects
all jobs issued after this method returns. Note to implementors: This means that you
would typically need to copy the variables before enqueuing a job.
If no value is provided it will be looked up from the current environment.
"""
raise NotImplementedError()
@classmethod
def add_options(cls, parser: Union[ArgumentParser, _ArgumentGroup]) -> None:
"""
If this batch system provides any command line options, add them to the given parser.
"""
pass
OptionType = TypeVar('OptionType')
@classmethod
def setOptions(cls, setOption: Callable[[str, Optional[Callable[[Any], OptionType]], Optional[Callable[[OptionType], None]], Optional[OptionType], Optional[List[str]]], None]) -> None:
"""
Process command line or configuration options relevant to this batch system.
:param setOption: A function with signature
setOption(option_name, parsing_function=None, check_function=None, default=None, env=None)
returning nothing, used to update run configuration as a side effect.
"""
# TODO: change type to a Protocol to express kwarg names, or else use a
# different interface (generator?)
pass
def getWorkerContexts(self) -> List[ContextManager[Any]]:
"""
Get a list of picklable context manager objects to wrap worker work in,
in order.
Can be used to ask the Toil worker to do things in-process (such as
configuring environment variables, hot-deploying user scripts, or
cleaning up a node) that would otherwise require a wrapping "executor"
process.
"""
return []
class BatchSystemSupport(AbstractBatchSystem):
"""
Partial implementation of AbstractBatchSystem, support methods.
"""
def __init__(self, config: Config, maxCores: float, maxMemory: int, maxDisk: int) -> None:
"""
Initializes initial state of the object
:param toil.common.Config config: object is setup by the toilSetup script and
has configuration parameters for the jobtree. You can add code
to that script to get parameters for your batch system.
:param float maxCores: the maximum number of cores the batch system can
request for any one job
:param int maxMemory: the maximum amount of memory the batch system can
request for any one job, in bytes
:param int maxDisk: the maximum amount of disk space the batch system can
request for any one job, in bytes
"""
super().__init__()
self.config = config
self.maxCores = maxCores
self.maxMemory = maxMemory
self.maxDisk = maxDisk
self.environment: Dict[str, str] = {}
self.workerCleanupInfo = WorkerCleanupInfo(workDir=self.config.workDir,
workflowID=self.config.workflowID,
cleanWorkDir=self.config.cleanWorkDir)
def checkResourceRequest(self, memory: int, cores: float, disk: int, job_name: str = '', detail: str = '') -> None:
"""
Check resource request is not greater than that available or allowed.
:param int memory: amount of memory being requested, in bytes
:param float cores: number of cores being requested
:param int disk: amount of disk space being requested, in bytes
:param str job_name: Name of the job being checked, for generating a useful error report.
:param str detail: Batch-system-specific message to include in the error.
:raise InsufficientSystemResources: raised when a resource is requested in an amount
greater than allowed
"""
batch_system = self.__class__.__name__ or 'this batch system'
for resource, requested, available in [('cores', cores, self.maxCores),
('memory', memory, self.maxMemory),
('disk', disk, self.maxDisk)]:
assert requested is not None
if requested > available:
unit = 'bytes of ' if resource in ('disk', 'memory') else ''
R = f'The job {job_name} is r' if job_name else 'R'
if resource == 'disk':
msg = (f'{R}equesting {requested} {unit}{resource} for temporary space, '
f'more than the maximum of {available} {unit}{resource} of free space on '
f'{self.config.workDir} that {batch_system} was configured with, or enforced '
f'by --max{resource.capitalize()}. Try setting/changing the toil option '
f'"--workDir" or changing the base temporary directory by setting TMPDIR.')
else:
msg = (f'{R}equesting {requested} {unit}{resource}, more than the maximum of '
f'{available} {unit}{resource} that {batch_system} was configured with, '
f'or enforced by --max{resource.capitalize()}.')
if detail:
msg += detail
raise InsufficientSystemResources(msg)
def setEnv(self, name: str, value: Optional[str] = None) -> None:
"""
Set an environment variable for the worker process before it is launched. The worker
process will typically inherit the environment of the machine it is running on but this
method makes it possible to override specific variables in that inherited environment
before the worker is launched. Note that this mechanism is different to the one used by
the worker internally to set up the environment of a job. A call to this method affects
all jobs issued after this method returns. Note to implementors: This means that you
would typically need to copy the variables before enqueuing a job.
If no value is provided it will be looked up from the current environment.
:param str name: the environment variable to be set on the worker.
:param str value: if given, the environment variable given by name will be set to this value.
if None, the variable's current value will be used as the value on the worker
:raise RuntimeError: if value is None and the name cannot be found in the environment
"""
if value is None:
try:
value = os.environ[name]
except KeyError:
raise RuntimeError(f"{name} does not exist in current environment")
self.environment[name] = value
def formatStdOutErrPath(self, toil_job_id: int, cluster_job_id: str, std: str) -> str:
"""
Format path for batch system standard output/error and other files
generated by the batch system itself.
Files will be written to the Toil work directory (which may
be on a shared file system) with names containing both the Toil and
batch system job IDs, for ease of debugging job failures.
:param: int toil_job_id : The unique id that Toil gives a job.
:param: cluster_job_id : What the cluster, for example, GridEngine, uses as its internal job id.
:param: string std : The provenance of the stream (for example: 'err' for 'stderr' or 'out' for 'stdout')
:rtype: string : Formatted filename; however if self.config.noStdOutErr is true,
returns '/dev/null' or equivalent.
"""
if self.config.noStdOutErr:
return os.devnull
fileName: str = f'toil_{self.config.workflowID}.{toil_job_id}.{cluster_job_id}.{std}.log'
workDir: str = Toil.getToilWorkDir(self.config.workDir)
return os.path.join(workDir, fileName)
@staticmethod
def workerCleanup(info: WorkerCleanupInfo) -> None:
"""
Cleans up the worker node on batch system shutdown. Also see :meth:`supportsWorkerCleanup`.
:param WorkerCleanupInfo info: A named tuple consisting of all the relevant information
for cleaning up the worker.
"""
assert isinstance(info, WorkerCleanupInfo)
workflowDir = Toil.getLocalWorkflowDir(info.workflowID, info.workDir)
DeferredFunctionManager.cleanupWorker(workflowDir)
workflowDirContents = os.listdir(workflowDir)
AbstractFileStore.shutdownFileStore(workflowDir, info.workflowID)
if (info.cleanWorkDir == 'always'
or info.cleanWorkDir in ('onSuccess', 'onError')
and workflowDirContents in ([], [cacheDirName(info.workflowID)])):
shutil.rmtree(workflowDir, ignore_errors=True)
class NodeInfo:
"""
The coresUsed attribute is a floating point value between 0 (all cores idle) and 1 (all cores
busy), reflecting the CPU load of the node.
The memoryUsed attribute is a floating point value between 0 (no memory used) and 1 (all memory
used), reflecting the memory pressure on the node.
The coresTotal and memoryTotal attributes are the node's resources, not just the used resources
The requestedCores and requestedMemory attributes are all the resources that Toil Jobs have reserved on the
node, regardless of whether the resources are actually being used by the Jobs.
The workers attribute is an integer reflecting the number of workers currently active workers
on the node.
"""
def __init__(self, coresUsed: float, memoryUsed: float,
coresTotal: float, memoryTotal: int,
requestedCores: float, requestedMemory: int,
workers: int) -> None:
self.coresUsed = coresUsed
self.memoryUsed = memoryUsed
self.coresTotal = coresTotal
self.memoryTotal = memoryTotal
self.requestedCores = requestedCores
self.requestedMemory = requestedMemory
self.workers = workers
class AbstractScalableBatchSystem(AbstractBatchSystem):
"""
A batch system that supports a variable number of worker nodes. Used by :class:`toil.
provisioners.clusterScaler.ClusterScaler` to scale the number of worker nodes in the cluster
up or down depending on overall load.
"""
@abstractmethod
def getNodes(self, preemptable: Optional[bool] = None) -> Dict[str, NodeInfo]:
"""
Returns a dictionary mapping node identifiers of preemptable or non-preemptable nodes to
NodeInfo objects, one for each node.
:param preemptable: If True (False) only (non-)preemptable nodes will be returned.
If None, all nodes will be returned.
"""
raise NotImplementedError()
@abstractmethod
def nodeInUse(self, nodeIP: str) -> bool:
"""
Can be used to determine if a worker node is running any tasks. If the node is doesn't
exist, this function should simply return False.
:param nodeIP: The worker nodes private IP address
:return: True if the worker node has been issued any tasks, else False
"""
raise NotImplementedError()
# TODO: May be unused!
@abstractmethod
@contextmanager
def nodeFiltering(self, filter: Optional[Callable[[NodeInfo], bool]]) -> Iterator[None]:
"""
Used to prevent races in autoscaling where
1) nodes have reported to the autoscaler as having no jobs
2) scaler decides to terminate these nodes. In parallel the batch system assigns jobs to the same nodes
3) scaler terminates nodes, resulting in job failures for all jobs on that node.
Call this method prior to node termination to ensure that nodes being considered for termination are not
assigned new jobs. Call the method again passing None as the filter to disable the filtering
after node termination is done.
:param method: This will be used as a filter on nodes considered when assigning new jobs.
After this context manager exits the filter should be removed
"""
raise NotImplementedError()
@abstractmethod
def ignoreNode(self, nodeAddress: str) -> None:
"""
Stop sending jobs to this node. Used in autoscaling
when the autoscaler is ready to terminate a node, but
jobs are still running. This allows the node to be terminated
after the current jobs have finished.
:param nodeAddress: IP address of node to ignore.
"""
raise NotImplementedError()
@abstractmethod
def unignoreNode(self, nodeAddress: str) -> None:
"""
Stop ignoring this address, presumably after
a node with this address has been terminated. This allows for the
possibility of a new node having the same address as a terminated one.
"""
raise NotImplementedError()
class InsufficientSystemResources(Exception):
pass
|
robosuite/models/grippers/__init__.py | kyungjaelee/robosuite | 397 | 6120 | <reponame>kyungjaelee/robosuite<gh_stars>100-1000
from .gripper_model import GripperModel
from .gripper_factory import gripper_factory
from .gripper_tester import GripperTester
from .panda_gripper import PandaGripper
from .rethink_gripper import RethinkGripper
from .robotiq_85_gripper import Robotiq85Gripper
from .robotiq_three_finger_gripper import RobotiqThreeFingerGripper, RobotiqThreeFingerDexterousGripper
from .panda_gripper import PandaGripper
from .jaco_three_finger_gripper import JacoThreeFingerGripper, JacoThreeFingerDexterousGripper
from .robotiq_140_gripper import Robotiq140Gripper
from .wiping_gripper import WipingGripper
from .null_gripper import NullGripper
GRIPPER_MAPPING = {
"RethinkGripper": RethinkGripper,
"PandaGripper": PandaGripper,
"JacoThreeFingerGripper": JacoThreeFingerGripper,
"JacoThreeFingerDexterousGripper": JacoThreeFingerDexterousGripper,
"WipingGripper": WipingGripper,
"Robotiq85Gripper": Robotiq85Gripper,
"Robotiq140Gripper": Robotiq140Gripper,
"RobotiqThreeFingerGripper": RobotiqThreeFingerGripper,
"RobotiqThreeFingerDexterousGripper": RobotiqThreeFingerDexterousGripper,
None: NullGripper,
}
ALL_GRIPPERS = GRIPPER_MAPPING.keys()
|
tests/arch/x86/test_x86parser.py | IMULMUL/barf-project | 1,395 | 6128 | # Copyright (c) 2014, Fundacion Dr. <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import unittest
from barf.arch import ARCH_X86_MODE_32
from barf.arch import ARCH_X86_MODE_64
from barf.arch.x86.parser import X86Parser
class X86Parser32BitsTests(unittest.TestCase):
def setUp(self):
self._parser = X86Parser(ARCH_X86_MODE_32)
def test_two_oprnd_reg_reg(self):
asm = self._parser.parse("add eax, ebx")
self.assertEqual(str(asm), "add eax, ebx")
def test_two_oprnd_reg_imm(self):
asm = self._parser.parse("add eax, 0x12345678")
self.assertEqual(str(asm), "add eax, 0x12345678")
def test_two_oprnd_reg_mem(self):
asm = self._parser.parse("add eax, [ebx + edx * 4 + 0x10]")
self.assertEqual(str(asm), "add eax, [ebx+edx*4+0x10]")
def test_two_oprnd_mem_reg(self):
asm = self._parser.parse("add [ebx + edx * 4 + 0x10], eax")
self.assertEqual(str(asm), "add [ebx+edx*4+0x10], eax")
def test_one_oprnd_reg(self):
asm = self._parser.parse("inc eax")
self.assertEqual(str(asm), "inc eax")
def test_one_oprnd_imm(self):
asm = self._parser.parse("jmp 0x12345678")
self.assertEqual(str(asm), "jmp 0x12345678")
def test_one_oprnd_mem(self):
asm = self._parser.parse("inc dword ptr [ebx+edx*4+0x10]")
self.assertEqual(str(asm), "inc dword ptr [ebx+edx*4+0x10]")
def test_zero_oprnd(self):
asm = self._parser.parse("nop")
self.assertEqual(str(asm), "nop")
# Misc
# ======================================================================== #
def test_misc_1(self):
asm = self._parser.parse("mov dword ptr [-0x21524111], ecx")
self.assertEqual(str(asm), "mov dword ptr [-0x21524111], ecx")
self.assertNotEqual(str(asm), "mov dword ptr [0xdeadbeef], ecx")
def test_misc_2(self):
asm = self._parser.parse("fucompi st(1)")
self.assertEqual(str(asm), "fucompi st1")
class X86Parser64BitsTests(unittest.TestCase):
def setUp(self):
self._parser = X86Parser(ARCH_X86_MODE_64)
def test_64_two_oprnd_reg_reg(self):
asm = self._parser.parse("add rax, rbx")
self.assertEqual(str(asm), "add rax, rbx")
def test_64_two_oprnd_reg_reg_2(self):
asm = self._parser.parse("add rax, r8")
self.assertEqual(str(asm), "add rax, r8")
def test_64_two_oprnd_reg_mem(self):
asm = self._parser.parse("add rax, [rbx + r15 * 4 + 0x10]")
self.assertEqual(str(asm), "add rax, [rbx+r15*4+0x10]")
# Misc
# ======================================================================== #
def test_misc_offset_1(self):
asm = self._parser.parse("add byte ptr [rax+0xffffff89], cl")
self.assertEqual(str(asm), "add byte ptr [rax+0xffffff89], cl")
def main():
unittest.main()
if __name__ == '__main__':
main()
|
unittest/scripts/py_devapi/scripts/mysqlx_collection_remove.py | mueller/mysql-shell | 119 | 6140 | # Assumptions: validate_crud_functions available
# Assumes __uripwd is defined as <user>:<pwd>@<host>:<plugin_port>
from __future__ import print_function
from mysqlsh import mysqlx
mySession = mysqlx.get_session(__uripwd)
ensure_schema_does_not_exist(mySession, 'js_shell_test')
schema = mySession.create_schema('js_shell_test')
# Creates a test collection and inserts data into it
collection = schema.create_collection('collection1')
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA01", "name": 'jack', "age": 17, "gender": 'male'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA02", "name": 'adam', "age": 15, "gender": 'male'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA03", "name": 'brian', "age": 14, "gender": 'male'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA04", "name": 'alma', "age": 13, "gender": 'female'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA05", "name": 'carol', "age": 14, "gender": 'female'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA06", "name": 'donna', "age": 16, "gender": 'female'}).execute()
result = collection.add({"_id": "3C514FF38144B714E7119BCF48B4CA07", "name": 'angel', "age": 14, "gender": 'male'}).execute()
# ------------------------------------------------
# collection.remove Unit Testing: Dynamic Behavior
# ------------------------------------------------
#@ CollectionRemove: valid operations after remove
crud = collection.remove('some_condition')
validate_crud_functions(crud, ['sort', 'limit', 'bind', 'execute'])
#@ CollectionRemove: valid operations after sort
crud = crud.sort(['name'])
validate_crud_functions(crud, ['limit', 'bind', 'execute'])
#@ CollectionRemove: valid operations after limit
crud = crud.limit(1)
validate_crud_functions(crud, ['bind', 'execute'])
#@ CollectionRemove: valid operations after bind
crud = collection.remove('name = :data').bind('data', 'donna')
validate_crud_functions(crud, ['bind', 'execute'])
#@ CollectionRemove: valid operations after execute
result = crud.execute()
validate_crud_functions(crud, ['limit', 'bind', 'execute'])
#@ Reusing CRUD with binding
print('Deleted donna:', result.affected_items_count, '\n')
result=crud.bind('data', 'alma').execute()
print('Deleted alma:', result.affected_items_count, '\n')
# ----------------------------------------------
# collection.remove Unit Testing: Error Conditions
# ----------------------------------------------
#@# CollectionRemove: Error conditions on remove
crud = collection.remove()
crud = collection.remove(' ')
crud = collection.remove(5)
crud = collection.remove('test = "2')
#@# CollectionRemove: Error conditions sort
crud = collection.remove('some_condition').sort()
crud = collection.remove('some_condition').sort(5)
crud = collection.remove('some_condition').sort([])
crud = collection.remove('some_condition').sort(['name', 5])
crud = collection.remove('some_condition').sort('name', 5)
#@# CollectionRemove: Error conditions on limit
crud = collection.remove('some_condition').limit()
crud = collection.remove('some_condition').limit('')
#@# CollectionRemove: Error conditions on bind
crud = collection.remove('name = :data and age > :years').bind()
crud = collection.remove('name = :data and age > :years').bind(5, 5)
crud = collection.remove('name = :data and age > :years').bind('another', 5)
#@# CollectionRemove: Error conditions on execute
crud = collection.remove('name = :data and age > :years').execute()
crud = collection.remove('name = :data and age > :years').bind('years', 5).execute()
# ---------------------------------------
# collection.remove Unit Testing: Execution
# ---------------------------------------
#@ CollectionRemove: remove under condition
//! [CollectionRemove: remove under condition]
result = collection.remove('age = 15').execute()
print('Affected Rows:', result.affected_items_count, '\n')
docs = collection.find().execute().fetch_all()
print('Records Left:', len(docs), '\n')
//! [CollectionRemove: remove under condition]
#@ CollectionRemove: remove with binding
//! [CollectionRemove: remove with binding]
result = collection.remove('gender = :heorshe').limit(2).bind('heorshe', 'male').execute()
print('Affected Rows:', result.affected_items_count, '\n')
//! [CollectionRemove: remove with binding]
docs = collection.find().execute().fetch_all()
print('Records Left:', len(docs), '\n')
#@ CollectionRemove: full remove
//! [CollectionRemove: full remove]
result = collection.remove('1').execute()
print('Affected Rows:', result.affected_items_count, '\n')
docs = collection.find().execute().fetch_all()
print('Records Left:', len(docs), '\n')
//! [CollectionRemove: full remove]
# Cleanup
mySession.drop_schema('js_shell_test')
mySession.close()
|
leaf/rbac/model/__init__.py | guiqiqi/leaf | 119 | 6142 | """用户, 组, 及相关认证数据库模型"""
from .group import Group
from .user import User
from .user import UserIndex
from .auth import Authentication
from .accesspoint import AccessPoint
|
alipay/aop/api/domain/KbAdvertSettleBillResponse.py | snowxmas/alipay-sdk-python-all | 213 | 6146 | <filename>alipay/aop/api/domain/KbAdvertSettleBillResponse.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KbAdvertSettleBillResponse(object):
def __init__(self):
self._download_url = None
self._paid_date = None
@property
def download_url(self):
return self._download_url
@download_url.setter
def download_url(self, value):
self._download_url = value
@property
def paid_date(self):
return self._paid_date
@paid_date.setter
def paid_date(self, value):
self._paid_date = value
def to_alipay_dict(self):
params = dict()
if self.download_url:
if hasattr(self.download_url, 'to_alipay_dict'):
params['download_url'] = self.download_url.to_alipay_dict()
else:
params['download_url'] = self.download_url
if self.paid_date:
if hasattr(self.paid_date, 'to_alipay_dict'):
params['paid_date'] = self.paid_date.to_alipay_dict()
else:
params['paid_date'] = self.paid_date
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KbAdvertSettleBillResponse()
if 'download_url' in d:
o.download_url = d['download_url']
if 'paid_date' in d:
o.paid_date = d['paid_date']
return o
|
django_loci/tests/base/test_admin.py | yashikajotwani12/django-loci | 205 | 6161 | <filename>django_loci/tests/base/test_admin.py
import json
import os
import responses
from django.urls import reverse
from .. import TestAdminMixin, TestLociMixin
class BaseTestAdmin(TestAdminMixin, TestLociMixin):
geocode_url = 'https://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer/'
def test_location_list(self):
self._login_as_admin()
self._create_location(name='test-admin-location-1')
url = reverse('{0}_location_changelist'.format(self.url_prefix))
r = self.client.get(url)
self.assertContains(r, 'test-admin-location-1')
def test_floorplan_list(self):
self._login_as_admin()
self._create_floorplan()
self._create_location()
url = reverse('{0}_floorplan_changelist'.format(self.url_prefix))
r = self.client.get(url)
self.assertContains(r, '1st floor')
def test_location_json_view(self):
self._login_as_admin()
loc = self._create_location()
r = self.client.get(reverse('admin:django_loci_location_json', args=[loc.pk]))
expected = {
'name': loc.name,
'address': loc.address,
'type': loc.type,
'is_mobile': loc.is_mobile,
'geometry': json.loads(loc.geometry.json),
}
self.assertDictEqual(r.json(), expected)
def test_location_floorplan_json_view(self):
self._login_as_admin()
fl = self._create_floorplan()
r = self.client.get(
reverse('admin:django_loci_location_floorplans_json', args=[fl.location.pk])
)
expected = {
'choices': [
{
'id': str(fl.pk),
'str': str(fl),
'floor': fl.floor,
'image': fl.image.url,
'image_width': fl.image.width,
'image_height': fl.image.height,
}
]
}
self.assertDictEqual(r.json(), expected)
def test_location_change_image_removed(self):
self._login_as_admin()
loc = self._create_location(name='test-admin-location-1', type='indoor')
fl = self._create_floorplan(location=loc)
# remove floorplan image
os.remove(fl.image.path)
url = reverse('{0}_location_change'.format(self.url_prefix), args=[loc.pk])
r = self.client.get(url)
self.assertContains(r, 'test-admin-location-1')
def test_floorplan_change_image_removed(self):
self._login_as_admin()
loc = self._create_location(name='test-admin-location-1', type='indoor')
fl = self._create_floorplan(location=loc)
# remove floorplan image
os.remove(fl.image.path)
url = reverse('{0}_floorplan_change'.format(self.url_prefix), args=[fl.pk])
r = self.client.get(url)
self.assertContains(r, 'test-admin-location-1')
def test_is_mobile_location_json_view(self):
self._login_as_admin()
loc = self._create_location(is_mobile=True, geometry=None)
response = self.client.get(
reverse('admin:django_loci_location_json', args=[loc.pk])
)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content['geometry'], None)
loc1 = self._create_location(
name='location2', address='loc2 add', type='outdoor'
)
response1 = self.client.get(
reverse('admin:django_loci_location_json', args=[loc1.pk])
)
self.assertEqual(response1.status_code, 200)
content1 = json.loads(response1.content)
expected = {
'name': 'location2',
'address': 'loc2 add',
'type': 'outdoor',
'is_mobile': False,
'geometry': {'type': 'Point', 'coordinates': [12.512124, 41.898903]},
}
self.assertEqual(content1, expected)
@responses.activate
def test_geocode(self):
self._login_as_admin()
address = 'Red Square'
url = '{0}?address={1}'.format(
reverse('admin:django_loci_location_geocode_api'), address
)
# Mock HTTP request to the URL to work offline
responses.add(
responses.GET,
f'{self.geocode_url}findAddressCandidates?singleLine=Red+Square&f=json&maxLocations=1',
body=self._load_content('base/static/test-geocode.json'),
content_type='application/json',
)
response = self.client.get(url)
response_lat = round(response.json()['lat'])
response_lng = round(response.json()['lng'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response_lat, 56)
self.assertEqual(response_lng, 38)
def test_geocode_no_address(self):
self._login_as_admin()
url = reverse('admin:django_loci_location_geocode_api')
response = self.client.get(url)
expected = {'error': 'Address parameter not defined'}
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), expected)
@responses.activate
def test_geocode_invalid_address(self):
self._login_as_admin()
invalid_address = 'thisaddressisnotvalid123abc'
url = '{0}?address={1}'.format(
reverse('admin:django_loci_location_geocode_api'), invalid_address
)
responses.add(
responses.GET,
f'{self.geocode_url}findAddressCandidates?singleLine=thisaddressisnotvalid123abc'
'&f=json&maxLocations=1',
body=self._load_content('base/static/test-geocode-invalid-address.json'),
content_type='application/json',
)
response = self.client.get(url)
expected = {'error': 'Not found location with given name'}
self.assertEqual(response.status_code, 404)
self.assertEqual(response.json(), expected)
@responses.activate
def test_reverse_geocode(self):
self._login_as_admin()
lat = 52
lng = 21
url = '{0}?lat={1}&lng={2}'.format(
reverse('admin:django_loci_location_reverse_geocode_api'), lat, lng
)
# Mock HTTP request to the URL to work offline
responses.add(
responses.GET,
f'{self.geocode_url}reverseGeocode?location=21.0%2C52.0&f=json&outSR=4326',
body=self._load_content('base/static/test-reverse-geocode.json'),
content_type='application/json',
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'POL')
@responses.activate
def test_reverse_location_with_no_address(self):
self._login_as_admin()
lat = -30
lng = -30
url = '{0}?lat={1}&lng={2}'.format(
reverse('admin:django_loci_location_reverse_geocode_api'), lat, lng
)
responses.add(
responses.GET,
f'{self.geocode_url}reverseGeocode?location=-30.0%2C-30.0&f=json&outSR=4326',
body=self._load_content(
'base/static/test-reverse-location-with-no-address.json'
),
content_type='application/json',
)
response = self.client.get(url)
response_address = response.json()['address']
self.assertEqual(response.status_code, 404)
self.assertEqual(response_address, '')
def test_reverse_geocode_no_coords(self):
self._login_as_admin()
url = reverse('admin:django_loci_location_reverse_geocode_api')
response = self.client.get(url)
expected = {'error': 'lat or lng parameter not defined'}
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), expected)
|
test/run/t344.py | timmartin/skulpt | 2,671 | 6171 | for ch in "Hello world!":
d = ord(ch)
h = hex(d)
o = oct(d)
b = bin(d)
print ch, d, h, o, b
|
build/lib/jet_django/views/model.py | lukejamison/jet-dasboard | 193 | 6226 | from django.core.exceptions import NON_FIELD_ERRORS
from rest_framework import status, viewsets, serializers
from rest_framework.decorators import list_route
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer
from jet_django.filters.model_aggregate import AggregateFilter
from jet_django.filters.model_group import GroupFilter
from jet_django.pagination import CustomPageNumberPagination
from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo
from jet_django.serializers.reorder import reorder_serializer_factory
class AggregateSerializer(serializers.Serializer):
y_func = serializers.IntegerField()
def __init__(self, *args, **kwargs):
if 'y_func_serializer' in kwargs:
self.fields['y_func'] = kwargs.pop('y_func_serializer')
super().__init__(*args, **kwargs)
class GroupSerializer(serializers.Serializer):
group = serializers.CharField()
y_func = serializers.IntegerField()
def __init__(self, *args, **kwargs):
if 'group_serializer' in kwargs:
self.fields['group'] = kwargs.pop('group_serializer')
if 'y_func_serializer' in kwargs:
self.fields['y_func'] = kwargs.pop('y_func_serializer')
super().__init__(*args, **kwargs)
def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field):
ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field)
class Viewset(viewsets.ModelViewSet):
model = build_model
queryset = build_queryset
pagination_class = CustomPageNumberPagination
filter_class = build_filter_class
authentication_classes = ()
permission_classes = (HasProjectPermissions, ModifyNotInDemo)
def get_serializer_class(self):
if self.action == 'aggregate':
return AggregateSerializer
elif self.action == 'group':
return GroupSerializer
elif self.action == 'retrieve':
return build_detail_serializer_class
else:
return build_serializer_class
@list_route(methods=['get'])
def aggregate(self, request):
queryset = self.filter_queryset(self.get_queryset())
y_func = request.GET['_y_func'].lower()
y_column = request.GET.get('_y_column', 'id')
y_field = self.model._meta.get_field(y_column)
y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)
y_serializer = y_serializer_class(**y_serializer_kwargs)
queryset = AggregateFilter().filter(queryset, {
'y_func': y_func,
'y_column': y_column
})
serializer = self.get_serializer(
queryset,
y_func_serializer=y_serializer
)
return Response(serializer.data)
@list_route(methods=['get'])
def group(self, request):
queryset = self.filter_queryset(self.get_queryset())
x_column = request.GET['_x_column']
x_lookup_name = request.GET.get('_x_lookup')
y_func = request.GET['_y_func'].lower()
y_column = request.GET.get('_y_column', 'id')
x_field = self.model._meta.get_field(x_column)
x_lookup = x_field.class_lookups.get(x_lookup_name)
y_field = self.model._meta.get_field(y_column)
if x_lookup:
x_field = x_lookup('none').output_field
x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field)
x_serializer = x_serializer_class(**x_serializer_kwargs)
y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)
y_serializer = y_serializer_class(**y_serializer_kwargs)
queryset = GroupFilter().filter(queryset, {
'x_column': x_column,
'x_lookup': x_lookup,
'y_func': y_func,
'y_column': y_column
})
serializer = self.get_serializer(
queryset,
many=True,
group_serializer=x_serializer,
y_func_serializer=y_serializer
)
return Response(serializer.data)
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
@list_route(methods=['post'])
def reorder(self, request):
serializer = ReorderSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
@list_route(methods=['post'])
def reset_order(self, request):
i = 1
for instance in build_queryset:
setattr(instance, ordering_field, i)
instance.save()
i += 1
return Response({})
for action in build_actions:
def route(self, request):
form = action(data=request.data)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
queryset = form.filer_queryset(self.get_queryset())
try:
result = form.save(queryset)
except Exception as e:
return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST)
return Response({'action': form._meta.name, 'result': result})
decorator = list_route(methods=['post'])
route = decorator(route)
setattr(Viewset, action._meta.name, route)
return Viewset
|
pylayers/em/openems/test/Rect_Waveguide.py | usmanwardag/pylayers | 143 | 6254 | <filename>pylayers/em/openems/test/Rect_Waveguide.py<gh_stars>100-1000
from openems.openems import *
# A simple simulation
#
# FDTD Simulation Setting
#
F = FDTD()
F.add(Exc(typ='Sinus',f0=100000))
F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR']))
#
# CSX (Geometry setting)
#
C = CSX()
# The Box is added as a property
C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0))
C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0))
C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1)))
C.add(Polyhedron())
S = OpenEMS(F,C)
S.save(filename='RectWaveguide.xml')
#gnd = Matter('gnd')
#sphere = Matter('sphere')
#patch = Matter('patch')
#substrate = Matter('substrate',typ='Ma',Epsilon="3.38",Kappa="0.00046")
#cdgsht = Matter('copper',typ='Cs',conductivity="56e6",thickness="40e-6")
#b1 = Box(P1=[0,0,0],P2=[100,100,200],Pr=0)
#b2 = Box(P1=[0,0,0],P2=[10,20,30],Pr=10)
#b4 = Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)
#s1 = Sphere(P=[0,0,0],R=100,Pr=50)
#dump = DumpBox()
#C.add(gnd)
#C.add(patch)
#C.add(substrate)
#C.add(sphere)
#C.add(cdgsht)
#C.add(exc)
#C.add(dump)
#C.set('gnd',b1)
#C.set('gnd',b2)
#C.set('sphere',s1)
#C.set('copper',b1)
#C.set('copper',b2)
#C.set('Et',b4)
#C.save(filename='structure.xml')
##C.AddBox(prop='ConductingSheet',name='copper',P1=[0,-50,200],P2=[1000,50,200],Pri=10)
##C.AddCylinder(prop='Metal',name='cyl0',P1=[0,0,0],P2=[0,0,100],Rad=50,Pri=10)
#
|
nodes/2.x/python/View.ViewTemplate.py | andydandy74/ClockworkForDynamo | 147 | 6264 | import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
def GetViewTemplate(view):
if not view: return None
elif hasattr(view, "ViewTemplateId"):
if view.ViewTemplateId.IntegerValue == -1: return None
else: return view.Document.GetElement(view.ViewTemplateId)
else: return None
views = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetViewTemplate(x) for x in views]
else: OUT = GetViewTemplate(views) |
igibson/object_states/aabb.py | mamadbiabon/iGibson | 360 | 6273 | <reponame>mamadbiabon/iGibson
import numpy as np
from igibson.external.pybullet_tools.utils import aabb_union, get_aabb, get_all_links
from igibson.object_states.object_state_base import CachingEnabledObjectState
class AABB(CachingEnabledObjectState):
def _compute_value(self):
body_id = self.obj.get_body_id()
all_links = get_all_links(body_id)
aabbs = [get_aabb(body_id, link=link) for link in all_links]
aabb_low, aabb_hi = aabb_union(aabbs)
if not hasattr(self.obj, "category") or self.obj.category != "floors" or self.obj.room_floor is None:
return np.array(aabb_low), np.array(aabb_hi)
# TODO: remove after split floors
# room_floor will be set to the correct RoomFloor beforehand
room_instance = self.obj.room_floor.room_instance
# Get the x-y values from the room segmentation map
room_aabb_low, room_aabb_hi = self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance)
if room_aabb_low is None:
return np.array(aabb_low), np.array(aabb_hi)
# Use the z values from pybullet
room_aabb_low[2] = aabb_low[2]
room_aabb_hi[2] = aabb_hi[2]
return np.array(room_aabb_low), np.array(room_aabb_hi)
def _set_value(self, new_value):
raise NotImplementedError("AABB state currently does not support setting.")
# Nothing needs to be done to save/load AABB since it will happen due to pose caching.
def _dump(self):
return None
def load(self, data):
return
|
Kmeans Cluster/Kmeans_Compare.py | Jojoxiao/Machine-Learning-for-Beginner-by-Python3 | 397 | 6285 | #-*- coding:utf-8 -*-
# &Author AnFany
# 引入方法
import Kmeans_AnFany as K_Af # AnFany
import Kmeans_Sklearn as K_Sk # Sklearn
import matplotlib.pyplot as plt
from pylab import mpl # 作图显示中文
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体
mpl.rcParams['axes.unicode_minus'] = False
import numpy as np
# 利用sklearn生成数据集
from sklearn.datasets import make_blobs
X, Y = make_blobs(n_samples=600, centers=6, n_features=2)
# 绘制散点图
def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g', 'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']):
typeclass = sorted(list(set(eydata)))
for ii in range(len(typeclass)):
datax = exdata[eydata == typeclass[ii]]
plt.scatter(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii])
plt.title(titl)
#plt.legend(['%d类'%i for i in typeclass], bbox_to_anchor=(1.2, 0.9))
plt.xlabel('特征1')
plt.ylabel('特征2')
# 调用不同的方法
# AnFany
kresult = K_Af.op_kmeans(X, countcen=6)
# Sklearn
sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10)
train = sk.fit(X)
result = sk.predict(X)
skru = K_Sk.trans(result)
#绘制算法后的类别的散点图
def sca(Xdata, Center, signdict, co=['r', 'g', 'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'):
du = 1
for jj in signdict:
xdata = Xdata[signdict[jj]]
plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj], label='%d类' % jj) # 绘制样本散点图
for ss in Center:
if du:
plt.scatter(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心') #绘制类别中心点
du = 0
else:
plt.scatter(ss[0], ss[1], c='k', s=100, marker='8') # 绘制类别中心点
plt.legend(bbox_to_anchor=(1.2, 1))
plt.title(titl)
plt.xlabel('特征1')
plt.ylabel('特征2')
# 定义欧几里得距离
def dis(sample, center):
cen = np.array([center])
sample = np.array(sample)
if len(sample) != 0:
usb = np.sum((sample - cen) ** 2, axis=1) ** 0.5
return usb
else:
return 0
# 计算最终的分类结果的成本值
def Cost(Xdata, typedict):
center = {}
for kk in typedict:
center[kk] = np.mean(Xdata[typedict[kk]], axis=0) # 均值
cio = 0
for cc in typedict:
cio += np.sum(dis(Xdata[typedict[cc]], center[cc]))
return cio
# 最终的结果展示
plt.subplot(2, 2, 1)
fig_scatter(X, Y)
plt.subplot(2, 2, 2)
sca(X, kresult[0], kresult[2])
plt.subplot(2, 2, 3)
sca(X, train.cluster_centers_, skru, titl='Sklearn 结果')
plt.subplot(2, 2, 4)
plt.axis('off')
plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2]))
plt.text(0.3, 0.3, 'Sklearn 最终的分类成本值为:%.5f'%Cost(X, skru))
plt.show()
|
sdc/utilities/sdc_typing_utils.py | dlee992/sdc | 540 | 6295 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| This file contains SDC utility functions related to typing compilation phase
"""
import numpy
import numba
import sdc
from numba import types
from numba.core.errors import TypingError
from numba.np import numpy_support
from sdc.datatypes.indexes import *
from sdc.str_arr_type import string_array_type, StringArrayType
from sdc.datatypes.categorical.types import Categorical
sdc_old_index_types = (types.Array, StringArrayType, )
sdc_pandas_index_types = (
EmptyIndexType,
PositionalIndexType,
RangeIndexType,
Int64IndexType,
MultiIndexType,
) + sdc_old_index_types
sdc_indexes_range_like = (
PositionalIndexType,
RangeIndexType,
)
# TO-DO: support caching of data allocated for range indexes at request for .values
sdc_indexes_wo_values_cache = (
EmptyIndexType,
PositionalIndexType,
RangeIndexType,
)
sdc_pandas_df_column_types = (
types.Array,
StringArrayType,
Categorical,
)
class TypeChecker:
"""
Validate object type and raise TypingError if the type is invalid, e.g.:
Method nsmallest(). The object n
given: bool
expected: int
"""
msg_template = '{} The object {}\n given: {}\n expected: {}'
def __init__(self, func_name):
"""
Parameters
----------
func_name: :obj:`str`
name of the function where types checking
"""
self.func_name = func_name
def raise_exc(self, data, expected_types, name=''):
"""
Raise exception with unified message
Parameters
----------
data: :obj:`any`
real type of the data
expected_types: :obj:`str`
expected types inserting directly to the exception
name: :obj:`str`
name of the parameter
"""
msg = self.msg_template.format(self.func_name, name, data, expected_types)
raise TypingError(msg)
def check(self, data, accepted_type, name=''):
"""
Check data type belongs to specified type
Parameters
----------
data: :obj:`any`
real type of the data
accepted_type: :obj:`type`
accepted type
name: :obj:`str`
name of the parameter
"""
if not isinstance(data, accepted_type):
self.raise_exc(data, accepted_type.__name__, name=name)
class SDCLimitation(Exception):
"""Exception to be raised in case of SDC limitation"""
pass
def kwsparams2list(params):
"""Convert parameters dict to a list of string of a format 'key=value'"""
return ['{}={}'.format(k, v) for k, v in params.items()]
def sigparams2list(param_names, defaults):
"""Creates a list of strings of a format 'key=value' from parameter names and default values"""
return [(f'{param}' if param not in defaults else f'{param}={defaults[param]}') for param in param_names]
def has_literal_value(var, value):
"""Used during typing to check that variable var is a Numba literal value equal to value"""
if not isinstance(var, types.Literal):
return False
if value is None:
return isinstance(var, types.NoneType) or var.literal_value is value
elif isinstance(value, type(bool)):
return var.literal_value is value
else:
return var.literal_value == value
def has_python_value(var, value):
"""Used during typing to check that variable var was resolved as Python type and has specific value"""
if not isinstance(var, type(value)):
return False
if value is None or isinstance(value, type(bool)):
return var is value
else:
return var == value
def is_default(var, value):
return has_literal_value(var, value) or has_python_value(var, value) or isinstance(var, types.Omitted)
def check_is_numeric_array(type_var):
"""Used during typing to check that type_var is a numeric numpy arrays"""
return check_is_array_of_dtype(type_var, types.Number)
def check_index_is_numeric(ty_series):
"""Used during typing to check that series has numeric index"""
return isinstance(ty_series.index.dtype, types.Number)
def check_types_comparable(ty_left, ty_right):
"""Used during typing to check that specified types can be compared"""
if hasattr(ty_left, 'dtype'):
ty_left = ty_left.dtype
if hasattr(ty_right, 'dtype'):
ty_right = ty_right.dtype
# add the rest of supported types here
if isinstance(ty_left, types.Number):
return isinstance(ty_right, types.Number)
if isinstance(ty_left, types.UnicodeType):
return isinstance(ty_right, types.UnicodeType)
if isinstance(ty_left, types.Boolean):
return isinstance(ty_right, types.Boolean)
if isinstance(ty_left, (types.Tuple, types.UniTuple)):
# FIXME: just for now to unblock compilation
return ty_left == ty_right
return False
def check_arrays_comparable(ty_left, ty_right):
"""Used during typing to check that underlying arrays of specified types can be compared"""
return ((ty_left == string_array_type and ty_right == string_array_type)
or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right)))
def check_is_array_of_dtype(type_var, dtype):
"""Used during typing to check that type_var is a numeric numpy array of specific dtype"""
return isinstance(type_var, types.Array) and isinstance(type_var.dtype, dtype)
def find_common_dtype_from_numpy_dtypes(array_types, scalar_types):
"""Used to find common numba dtype for a sequences of numba dtypes each representing some numpy dtype"""
np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype in array_types]
np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype in scalar_types]
np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes)
numba_common_dtype = numpy_support.from_dtype(np_common_dtype)
return numba_common_dtype
def find_index_common_dtype(left, right):
"""Used to find common dtype for indexes of two series and verify if index dtypes are equal"""
left_index_dtype = left.dtype
right_index_dtype = right.dtype
index_dtypes_match = left_index_dtype == right_index_dtype
if not index_dtypes_match:
numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(
[left_index_dtype, right_index_dtype], [])
else:
numba_index_common_dtype = left_index_dtype
return index_dtypes_match, numba_index_common_dtype
def gen_impl_generator(codegen, impl_name):
"""Generate generator of an implementation"""
def _df_impl_generator(*args, **kwargs):
func_text, global_vars = codegen(*args, **kwargs)
loc_vars = {}
exec(func_text, global_vars, loc_vars)
_impl = loc_vars[impl_name]
return _impl
return _df_impl_generator
def check_signed_integer(ty):
return isinstance(ty, types.Integer) and ty.signed
def _check_dtype_param_type(dtype):
""" Returns True is dtype is a valid type for dtype parameter and False otherwise.
Used in RangeIndex ctor and other methods that take dtype parameter. """
valid_dtype_types = (types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass)
return isinstance(dtype, valid_dtype_types) or dtype is None
|
DQM/L1TMonitorClient/python/L1EmulatorErrorFlagClient_cfi.py | ckamtsikis/cmssw | 852 | 6297 | <filename>DQM/L1TMonitorClient/python/L1EmulatorErrorFlagClient_cfi.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
l1EmulatorErrorFlagClient = DQMEDHarvester("L1EmulatorErrorFlagClient",
#
# for each L1 system, give:
# - SystemLabel: system label
# - HwValLabel: system label as used in hardware validation package
# (the package producing the ErrorFlag histogram)
# - SystemMask: system mask: if 1, the system is masked in the summary plot
# - SystemFolder: the folder where the ErrorFlag histogram is looked for
#
# the position in the parameter set gives, in reverse order, the position in the reportSummaryMap
# in the emulator column (left column)
L1Systems = cms.VPSet(
cms.PSet(
SystemLabel = cms.string("ECAL"),
HwValLabel = cms.string("ETP"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("HCAL"),
HwValLabel = cms.string("HTP"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("RCT"),
HwValLabel = cms.string("RCT"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("Stage1Layer2"),
HwValLabel = cms.string("Stage1Layer2"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("DTTF"),
HwValLabel = cms.string("DTF"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("DTTPG"),
HwValLabel = cms.string("DTP"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("CSCTF"),
HwValLabel = cms.string("CTF"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("CSCTPG"),
HwValLabel = cms.string("CTP"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("RPC"),
HwValLabel = cms.string("RPC"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("GMT"),
HwValLabel = cms.string("GMT"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("GT"),
HwValLabel = cms.string("GT"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("L1TEMU/Stage1GTexpert")
)
)
)
|
examples/ingenerator.py | quynhanh-ngx/pytago | 206 | 6315 | def main():
n = 111
gen = (n * 7 for x in range(10))
if 777 in gen:
print("Yes!")
if __name__ == '__main__':
main()
|
azbankgateways/views/__init__.py | lordmahyar/az-iranian-bank-gateways | 196 | 6330 | <reponame>lordmahyar/az-iranian-bank-gateways<gh_stars>100-1000
from .banks import callback_view, go_to_bank_gateway
from .samples import sample_payment_view, sample_result_view
|
src/resources/clients/python_client/visitstate.py | visit-dav/vis | 226 | 6344 | <reponame>visit-dav/vis
import sys
class RPCType(object):
CloseRPC = 0
DetachRPC = 1
AddWindowRPC = 2
DeleteWindowRPC = 3
SetWindowLayoutRPC = 4
SetActiveWindowRPC = 5
ClearWindowRPC = 6
ClearAllWindowsRPC = 7
OpenDatabaseRPC = 8
CloseDatabaseRPC = 9
ActivateDatabaseRPC = 10
CheckForNewStatesRPC = 11
CreateDatabaseCorrelationRPC = 12
AlterDatabaseCorrelationRPC = 13
DeleteDatabaseCorrelationRPC = 14
ReOpenDatabaseRPC = 15
ReplaceDatabaseRPC = 16
OverlayDatabaseRPC = 17
OpenComputeEngineRPC = 18
CloseComputeEngineRPC = 19
AnimationSetNFramesRPC = 20
AnimationPlayRPC = 21
AnimationReversePlayRPC = 22
AnimationStopRPC = 23
TimeSliderNextStateRPC = 24
TimeSliderPreviousStateRPC = 25
SetTimeSliderStateRPC = 26
SetActiveTimeSliderRPC = 27
AddPlotRPC = 28
SetPlotFrameRangeRPC = 29
DeletePlotKeyframeRPC = 30
MovePlotKeyframeRPC = 31
DeleteActivePlotsRPC = 32
HideActivePlotsRPC = 33
DrawPlotsRPC = 34
DisableRedrawRPC = 35
RedrawRPC = 36
SetActivePlotsRPC = 37
ChangeActivePlotsVarRPC = 38
AddOperatorRPC = 39
AddInitializedOperatorRPC = 40
PromoteOperatorRPC = 41
DemoteOperatorRPC = 42
RemoveOperatorRPC = 43
RemoveLastOperatorRPC = 44
RemoveAllOperatorsRPC = 45
SaveWindowRPC = 46
SetDefaultPlotOptionsRPC = 47
SetPlotOptionsRPC = 48
SetDefaultOperatorOptionsRPC = 49
SetOperatorOptionsRPC = 50
WriteConfigFileRPC = 51
ConnectToMetaDataServerRPC = 52
IconifyAllWindowsRPC = 53
DeIconifyAllWindowsRPC = 54
ShowAllWindowsRPC = 55
HideAllWindowsRPC = 56
UpdateColorTableRPC = 57
SetAnnotationAttributesRPC = 58
SetDefaultAnnotationAttributesRPC = 59
ResetAnnotationAttributesRPC = 60
SetKeyframeAttributesRPC = 61
SetPlotSILRestrictionRPC = 62
SetViewAxisArrayRPC = 63
SetViewCurveRPC = 64
SetView2DRPC = 65
SetView3DRPC = 66
ResetPlotOptionsRPC = 67
ResetOperatorOptionsRPC = 68
SetAppearanceRPC = 69
ProcessExpressionsRPC = 70
SetLightListRPC = 71
SetDefaultLightListRPC = 72
ResetLightListRPC = 73
SetAnimationAttributesRPC = 74
SetWindowAreaRPC = 75
PrintWindowRPC = 76
ResetViewRPC = 77
RecenterViewRPC = 78
ToggleAllowPopupRPC = 79
ToggleMaintainViewModeRPC = 80
ToggleBoundingBoxModeRPC = 81
ToggleCameraViewModeRPC = 82
TogglePerspectiveViewRPC = 83
ToggleSpinModeRPC = 84
ToggleLockTimeRPC = 85
ToggleLockToolsRPC = 86
ToggleLockViewModeRPC = 87
ToggleFullFrameRPC = 88
UndoViewRPC = 89
RedoViewRPC = 90
InvertBackgroundRPC = 91
ClearPickPointsRPC = 92
SetWindowModeRPC = 93
EnableToolRPC = 94
SetToolUpdateModeRPC = 95
CopyViewToWindowRPC = 96
CopyLightingToWindowRPC = 97
CopyAnnotationsToWindowRPC = 98
CopyPlotsToWindowRPC = 99
ClearCacheRPC = 100
ClearCacheForAllEnginesRPC = 101
SetViewExtentsTypeRPC = 102
ClearRefLinesRPC = 103
SetRenderingAttributesRPC = 104
QueryRPC = 105
CloneWindowRPC = 106
SetMaterialAttributesRPC = 107
SetDefaultMaterialAttributesRPC = 108
ResetMaterialAttributesRPC = 109
SetPlotDatabaseStateRPC = 110
DeletePlotDatabaseKeyframeRPC = 111
MovePlotDatabaseKeyframeRPC = 112
ClearViewKeyframesRPC = 113
DeleteViewKeyframeRPC = 114
MoveViewKeyframeRPC = 115
SetViewKeyframeRPC = 116
OpenMDServerRPC = 117
EnableToolbarRPC = 118
HideToolbarsRPC = 119
HideToolbarsForAllWindowsRPC = 120
ShowToolbarsRPC = 121
ShowToolbarsForAllWindowsRPC = 122
SetToolbarIconSizeRPC = 123
SaveViewRPC = 124
SetGlobalLineoutAttributesRPC = 125
SetPickAttributesRPC = 126
ExportColorTableRPC = 127
ExportEntireStateRPC = 128
ImportEntireStateRPC = 129
ImportEntireStateWithDifferentSourcesRPC = 130
ResetPickAttributesRPC = 131
AddAnnotationObjectRPC = 132
HideActiveAnnotationObjectsRPC = 133
DeleteActiveAnnotationObjectsRPC = 134
RaiseActiveAnnotationObjectsRPC = 135
LowerActiveAnnotationObjectsRPC = 136
SetAnnotationObjectOptionsRPC = 137
SetDefaultAnnotationObjectListRPC = 138
ResetAnnotationObjectListRPC = 139
ResetPickLetterRPC = 140
SetDefaultPickAttributesRPC = 141
ChooseCenterOfRotationRPC = 142
SetCenterOfRotationRPC = 143
SetQueryOverTimeAttributesRPC = 144
SetDefaultQueryOverTimeAttributesRPC = 145
ResetQueryOverTimeAttributesRPC = 146
ResetLineoutColorRPC = 147
SetInteractorAttributesRPC = 148
SetDefaultInteractorAttributesRPC = 149
ResetInteractorAttributesRPC = 150
GetProcInfoRPC = 151
SendSimulationCommandRPC = 152
UpdateDBPluginInfoRPC = 153
ExportDBRPC = 154
SetTryHarderCyclesTimesRPC = 155
OpenClientRPC = 156
OpenGUIClientRPC = 157
OpenCLIClientRPC = 158
SuppressQueryOutputRPC = 159
SetQueryFloatFormatRPC = 160
SetMeshManagementAttributesRPC = 161
SetDefaultMeshManagementAttributesRPC = 162
ResetMeshManagementAttributesRPC = 163
ResizeWindowRPC = 164
MoveWindowRPC = 165
MoveAndResizeWindowRPC = 166
SetStateLoggingRPC = 167
ConstructDataBinningRPC = 168
RequestMetaDataRPC = 169
SetTreatAllDBsAsTimeVaryingRPC = 170
SetCreateMeshQualityExpressionsRPC = 171
SetCreateTimeDerivativeExpressionsRPC = 172
SetCreateVectorMagnitudeExpressionsRPC = 173
CopyActivePlotsRPC = 174
SetPlotFollowsTimeRPC = 175
TurnOffAllLocksRPC = 176
SetDefaultFileOpenOptionsRPC = 177
SetSuppressMessagesRPC = 178
ApplyNamedSelectionRPC = 179
CreateNamedSelectionRPC = 180
DeleteNamedSelectionRPC = 181
LoadNamedSelectionRPC = 182
SaveNamedSelectionRPC = 183
SetNamedSelectionAutoApplyRPC = 184
UpdateNamedSelectionRPC = 185
InitializeNamedSelectionVariablesRPC = 186
MenuQuitRPC = 187
SetPlotDescriptionRPC = 188
MovePlotOrderTowardFirstRPC = 189
MovePlotOrderTowardLastRPC = 190
SetPlotOrderToFirstRPC = 191
SetPlotOrderToLastRPC = 192
RenamePickLabelRPC = 193
GetQueryParametersRPC = 194
DDTConnectRPC = 195
DDTFocusRPC = 196
ReleaseToDDTRPC = 197
MaxRPC = 198
|
core/src/main/python/akdl/entry/base_entry.py | zhangjun0x01/Alink | 3,301 | 6356 | import abc
from typing import Dict, Callable
import tensorflow as tf
from flink_ml_framework.context import Context
from flink_ml_framework.java_file import *
from ..runner import tf_helper, io_helper
from ..runner.output_writer import DirectOutputWriter
try:
from flink_ml_tensorflow.tensorflow_context import TFContext
except:
from flink_ml_tensorflow2.tensorflow_context import TFContext
# noinspection PyUnresolvedReferences
from tensorflow_io.core.python.ops import core_ops
__all__ = ['TF1_TYPE', 'TF2_TYPE']
TF1_TYPE = 'tf1'
TF2_TYPE = 'tf2'
class BaseEntry(abc.ABC):
def __init__(self, func_name, engine_type):
self.func_name = func_name
self.engine_type = engine_type
@staticmethod
def get_func_by_name(func_name):
"""
Get function by the func name
:param func_name: func name
:return: function
"""
if '.' not in func_name:
if func_name in globals():
return globals()[func_name]
else:
raise RuntimeError('cannot find function[{}]'.format(func_name))
else:
module_name, func_name = func_name.rsplit('.', 1)
import importlib
# load the module, will raise ImportError if module cannot be loaded
m = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
c = getattr(m, func_name)
return c
@abc.abstractmethod
def construct_args(self, **kwargs):
pass
def is_batch(self):
return True
def post_process(self, **kwargs):
pass
def entry_func(self, context: Context):
tf_context = TFContext(context)
properties = tf_context.properties
print('properties', properties, flush=True)
# intra_op_parallelism is set by akdl, because there is a bug in TensorFlow 1.x
# See: https://stackoverflow.com/questions/34426268/restricting-number-of-cores-used
intra_op_parallelism = int(properties['ALINK:intra_op_parallelism'])
if self.engine_type == TF1_TYPE:
tf_helper.set_intra_op_parallelism(intra_op_parallelism_threads=intra_op_parallelism)
elif self.engine_type == TF2_TYPE:
tf.config.threading.set_intra_op_parallelism_threads(intra_op_parallelism)
num_workers = int(properties['ALINK:num_workers'])
work_dir = properties['ALINK:work_dir']
cluster, task_type, task_index = tf_context.export_estimator_cluster()
if self.is_batch():
java_queue_file = JavaFile(context.from_java(), context.to_java())
dataset_file = os.path.join(work_dir, 'dataset.tfrecords')
dataset, dataset_length = io_helper.convert_java_queue_file_to_repeatable_dataset(java_queue_file,
dataset_file)
print("number of records: " + str(dataset_length), flush=True)
dataset_fn: Callable[[], tf.data.TFRecordDataset] = lambda: tf.data.TFRecordDataset(dataset_file)
else:
dataset_fn: Callable[[], tf.data.TFRecordDataset] = lambda: tf_context.flink_stream_dataset()
dataset = None
dataset_file = None
dataset_length = None
saved_model_dir = os.path.join(work_dir, 'savedmodel')
user_params: Dict = json.loads(properties['ALINK:user_defined_params'])
for i in range(1, 1024):
key = "ALINK:bc_" + str(i)
if key in properties:
user_params[key] = context.properties[key]
key = "ALINK:model_dir"
if key in properties:
user_params[key] = properties[key]
output_writer = DirectOutputWriter(tf_context.from_java(), tf_context.to_java())
locals_copy = locals().copy()
locals_copy.pop("self")
print("locals_copy = ", locals_copy, flush=True)
args = self.construct_args(**locals_copy)
func = self.get_func_by_name(self.func_name)
func(args)
print("task_type = {}, task_index = {}: done tf_user_main".format(task_type, task_index), flush=True)
local_vars = locals().copy()
local_vars.pop('self')
self.post_process(**local_vars)
print("task_type = {}, task_index = {}: exit".format(task_type, task_index), flush=True)
output_writer.close()
|
simba/run_dash_tkinter.py | justinshenk/simba | 172 | 6364 | # All credit to https://stackoverflow.com/questions/46571448/tkinter-and-a-html-file - thanks DELICA - https://stackoverflow.com/users/7027346/delica
from cefpython3 import cefpython as cef
import ctypes
try:
import tkinter as tk
from tkinter import messagebox
except ImportError:
import Tkinter as tk
import sys
import platform
import logging as _logging
# Fix for PyCharm hints warnings
WindowUtils = cef.WindowUtils()
# Platforms
WINDOWS = (platform.system() == "Windows")
LINUX = (platform.system() == "Linux")
MAC = (platform.system() == "Darwin")
# Globals
logger = _logging.getLogger("tkinter_.py")
url = "localhost:8050/"
class MainFrame(tk.Frame):
def __init__(self, root):
self.closing = False
self.browser = None
# Root
root.geometry("900x640")
tk.Grid.rowconfigure(root, 0, weight=1)
tk.Grid.columnconfigure(root, 0, weight=1)
# MainFrame
tk.Frame.__init__(self, root)
self.master.title('SimBA Dashboard')
self.master.protocol("WM_DELETE_WINDOW", self.on_close)
self.bind("<Configure>", self.on_configure)
self.bind("<FocusIn>", self.on_focus_in)
self.bind("<FocusOut>", self.on_focus_out)
self.focus_set()
# Pack MainFrame
self.pack(fill=tk.BOTH, expand=tk.YES)
def embed_browser(self):
window_info = cef.WindowInfo()
rect = [0, 0, self.winfo_width(), self.winfo_height()]
window_info.SetAsChild(self.get_window_handle(), rect)
self.browser = cef.CreateBrowserSync(window_info,
url=url) #todo
assert self.browser
self.browser.SetClientHandler(LoadHandler(self))
self.browser.SetClientHandler(FocusHandler(self))
self.message_loop_work()
def get_window_handle(self):
if self.winfo_id() > 0:
return self.winfo_id()
else:
raise Exception("Couldn't obtain window handle")
def message_loop_work(self):
cef.MessageLoopWork()
self.after(10, self.message_loop_work)
def on_configure(self, event):
width = event.width
height = event.height
if self.browser:
if WINDOWS:
ctypes.windll.user32.SetWindowPos(
self.browser.GetWindowHandle(), 0,
0, 0, width, height, 0x0002)
elif LINUX:
self.browser.SetBounds(0, 0, width, height)
self.browser.NotifyMoveOrResizeStarted()
if not self.browser:
self.embed_browser()
def on_focus_in(self, _):
logger.debug("BrowserFrame.on_focus_in")
if self.browser:
self.browser.SetFocus(True)
self.focus_set()
def on_focus_out(self, _):
logger.debug("BrowserFrame.on_focus_out")
if self.browser:
self.browser.SetFocus(False)
def on_close(self):
if self.browser:
self.browser.CloseBrowser(True)
self.clear_browser_references()
self.destroy()
self.master.destroy()
def get_browser(self):
if self.browser:
return self.browser
return None
def clear_browser_references(self):
self.browser = None
class LoadHandler(object):
def __init__(self, browser_frame):
self.browser_frame = browser_frame
class FocusHandler(object):
def __init__(self, browser):
self.browser = browser
def OnTakeFocus(self, next_component, **_):
logger.debug("FocusHandler.OnTakeFocus, next={next}"
.format(next=next_component))
def OnSetFocus(self, source, **_):
logger.debug("FocusHandler.OnSetFocus, source={source}"
.format(source=source))
return False
def OnGotFocus(self, **_):
"""Fix CEF focus issues (#255). Call browser frame's focus_set
to get rid of type cursor in url entry widget."""
logger.debug("FocusHandler.OnGotFocus")
self.browser.focus_set()
# if __name__ == '__main__':
logger.setLevel(_logging.INFO)
stream_handler = _logging.StreamHandler()
formatter = _logging.Formatter("[%(filename)s] %(message)s")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("CEF Python {ver}".format(ver=cef.__version__))
logger.info("Python {ver} {arch}".format(
ver=platform.python_version(), arch=platform.architecture()[0]))
logger.info("Tk {ver}".format(ver=tk.Tcl().eval('info patchlevel')))
assert cef.__version__ >= "55.3", "CEF Python v55.3+ required to run this"
sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error
root = tk.Tk()
app = MainFrame(root)
def on_closing():
if messagebox.askokcancel("Quit", "Do you want to quit?"):
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
# Tk must be initialized before CEF otherwise fatal error (Issue #306)
cef.Initialize()
root.mainloop()
# app.mainloop()
cef.Shutdown()
|
security_monkey/watchers/vpc/vpn.py | boladmin/security_monkey | 4,258 | 6374 | <reponame>boladmin/security_monkey
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.vpc.vpn
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: <NAME> <<EMAIL>> @alex.cline
"""
from cloudaux.aws.ec2 import describe_vpn_connections
from security_monkey.cloudaux_watcher import CloudAuxWatcher
from security_monkey.watcher import ChangeItem
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
class VPN(CloudAuxWatcher):
index = 'vpn'
i_am_singular = 'VPN Connection'
i_am_plural = 'VPN Connections'
def __init__(self, *args, **kwargs):
super(VPN, self).__init__(*args, **kwargs)
self.honor_ephemerals = True
self.ephemeral_paths = [
'VgwTelemetry$*$LastStatusChange',
'VgwTelemetry$*$Status',
'VgwTelemetry$*$StatusMessage',
]
def get_name_from_list_output(self, item):
if item.get("Tags"):
for tag in item["Tags"]:
if tag["Key"] == "Name":
return "{} ({})".format(tag["Value"], item["VpnConnectionId"])
return item["VpnConnectionId"]
def list_method(self, **kwargs):
return describe_vpn_connections(**kwargs)
def get_method(self, item, **kwargs):
# Remove the CustomerGatewayConfiguration -- it's not necessary as all the details are present anyway:
item.pop("CustomerGatewayConfiguration", None)
# Set the ARN:
item["Arn"] = "arn:aws:ec2:{region}:{account}:vpn-connection/{id}".format(region=kwargs["region"],
account=kwargs["account_number"],
id=item["VpnConnectionId"])
# Cast the datetimes to something JSON serializable (ISO 8601 string):
for vgw in item.get("VgwTelemetry", []):
if vgw.get("LastStatusChange"):
vgw["LastStatusChange"] = vgw["LastStatusChange"].strftime(DATETIME_FORMAT)
return item
class VPNItem(ChangeItem):
def __init__(self, region=None, account=None, name=None, arn=None, config=None, source_watcher=None):
super(VPNItem, self).__init__(
index=VPN.index,
region=region,
account=account,
name=name,
arn=arn,
new_config=config if config else {},
source_watcher=source_watcher)
|
_ar/masking_provement.py | TomKingsfordUoA/ResidualMaskingNetwork | 242 | 6388 | import os
import glob
import cv2
import numpy as np
import torch
from torchvision.transforms import transforms
from natsort import natsorted
from models import resmasking_dropout1
from utils.datasets.fer2013dataset import EMOTION_DICT
from barez import show
transform = transforms.Compose(
[
transforms.ToPILImage(),
transforms.ToTensor(),
]
)
def activations_mask(tensor):
tensor = torch.squeeze(tensor, 0)
tensor = torch.mean(tensor, 0)
tensor = tensor.detach().cpu().numpy()
tensor = np.maximum(tensor, 0)
tensor = cv2.resize(tensor, (224, 224))
tensor = tensor - np.min(tensor)
tensor = tensor / np.max(tensor)
heatmap = cv2.applyColorMap(np.uint8(255 * tensor), cv2.COLORMAP_JET)
return heatmap
model = resmasking_dropout1(3, 7)
# state = torch.load('./saved/checkpoints/resmasking_dropout1_rot30_2019Nov17_14.33')
state = torch.load("./saved/checkpoints/Z_resmasking_dropout1_rot30_2019Nov30_13.32")
model.load_state_dict(state["net"])
model.cuda()
model.eval()
for image_path in natsorted(
glob.glob("/home/z/research/bkemo/images/**/*.png", recursive=True)
):
image_name = os.path.basename(image_path)
print(image_name)
# image_path = '/home/z/research/bkemo/images/disgust/0.0_dc10a3_1976_0.png'
image = cv2.imread(image_path)
image = cv2.resize(image, (224, 224))
tensor = transform(image)
tensor = torch.unsqueeze(tensor, 0)
tensor = tensor.cuda()
# output = model(tensor)
x = model.conv1(tensor) # 112
x = model.bn1(x)
x = model.relu(x)
x = model.maxpool(x) # 56
x = model.layer1(x) # 56
m = model.mask1(x)
x = x * (1 + m)
x = model.layer2(x) # 28
m = model.mask2(x)
x = x * (1 + m)
x = model.layer3(x) # 14
heat_1 = activations_mask(x)
m = model.mask3(x)
x = x * (1 + m)
# heat_2 = activations_mask(m)
x = model.layer4(x) # 7
m = model.mask4(x)
x = x * (1 + m)
x = model.avgpool(x)
x = torch.flatten(x, 1)
output = model.fc(x)
# print(np.sum(heat_1 - heat_2))
# show(np.concatenate((image, heat_1, heat_2), axis=1))
cv2.imwrite(
"./masking_provements/{}".format(image_name),
np.concatenate((image, heat_1), axis=1),
)
# np.concatenate((image, heat_1, heat_2), axis=1))
# output = output.cpu().numpy()
# print(EMOTION_DICT[torch.argmax(output, 1).item()])
|
mod/tools/ccmake.py | mattiasljungstrom/fips | 429 | 6394 | <gh_stars>100-1000
"""
wrapper for ccmake command line tool
"""
import subprocess
name = 'ccmake'
platforms = ['linux', 'osx']
optional = True
not_found = "required for 'fips config' functionality"
#-------------------------------------------------------------------------------
def check_exists(fips_dir) :
"""test if ccmake is in the path
:returns: True if ccmake is in the path
"""
try:
out = subprocess.check_output(['ccmake', '--version'])
return True
except (OSError, subprocess.CalledProcessError):
return False
#-------------------------------------------------------------------------------
def run(build_dir) :
"""run ccmake to configure cmake project
:param build_dir: directory where ccmake should run
:returns: True if ccmake returns successful
"""
res = subprocess.call('ccmake .', cwd=build_dir, shell=True)
return res == 0
|
heat/api/openstack/v1/views/stacks_view.py | noironetworks/heat | 265 | 6404 | <reponame>noironetworks/heat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from heat.api.openstack.v1 import util
from heat.api.openstack.v1.views import views_common
from heat.rpc import api as rpc_api
_collection_name = 'stacks'
basic_keys = (
rpc_api.STACK_ID,
rpc_api.STACK_NAME,
rpc_api.STACK_DESCRIPTION,
rpc_api.STACK_STATUS,
rpc_api.STACK_STATUS_DATA,
rpc_api.STACK_CREATION_TIME,
rpc_api.STACK_DELETION_TIME,
rpc_api.STACK_UPDATED_TIME,
rpc_api.STACK_OWNER,
rpc_api.STACK_PARENT,
rpc_api.STACK_USER_PROJECT_ID,
rpc_api.STACK_TAGS,
)
def format_stack(req, stack, keys=None, include_project=False):
def transform(key, value):
if keys and key not in keys:
return
if key == rpc_api.STACK_ID:
yield ('id', value['stack_id'])
yield ('links', [util.make_link(req, value)])
if include_project:
yield ('project', value['tenant'])
elif key == rpc_api.STACK_ACTION:
return
elif (key == rpc_api.STACK_STATUS and
rpc_api.STACK_ACTION in stack):
# To avoid breaking API compatibility, we join RES_ACTION
# and RES_STATUS, so the API format doesn't expose the
# internal split of state into action/status
yield (key, '_'.join((stack[rpc_api.STACK_ACTION], value)))
else:
# TODO(zaneb): ensure parameters can be formatted for XML
# elif key == rpc_api.STACK_PARAMETERS:
# return key, json.dumps(value)
yield (key, value)
return dict(itertools.chain.from_iterable(
transform(k, v) for k, v in stack.items()))
def collection(req, stacks, count=None, include_project=False):
keys = basic_keys
formatted_stacks = [format_stack(req, s, keys, include_project)
for s in stacks]
result = {'stacks': formatted_stacks}
links = views_common.get_collection_links(req, formatted_stacks)
if links:
result['links'] = links
if count is not None:
result['count'] = count
return result
|
desktop/core/ext-py/pyasn1-0.1.8/pyasn1/compat/iterfunc.py | kokosing/hue | 422 | 6410 | <filename>desktop/core/ext-py/pyasn1-0.1.8/pyasn1/compat/iterfunc.py
from sys import version_info
if version_info[0] <= 2 and version_info[1] <= 4:
def all(iterable):
for element in iterable:
if not element:
return False
return True
else:
all = all
|
tzwhere/tzwhere.py | tuxiqae/pytzwhere | 115 | 6420 | <reponame>tuxiqae/pytzwhere
#!/usr/bin/env python
'''tzwhere.py - time zone computation from latitude/longitude.
Ordinarily this is loaded as a module and instances of the tzwhere
class are instantiated and queried directly
'''
import collections
try:
import ujson as json # loads 2 seconds faster than normal json
except:
try:
import json
except ImportError:
import simplejson as json
import math
import gzip
import os
import shapely.geometry as geometry
import shapely.prepared as prepared
# We can save about 222MB of RAM by turning our polygon lists into
# numpy arrays rather than tuples, if numpy is installed.
try:
import numpy
WRAP = numpy.asarray
COLLECTION_TYPE = numpy.ndarray
except ImportError:
WRAP = tuple
COLLECTION_TYPE = tuple
# for navigation and pulling values/files
this_dir, this_filename = os.path.split(__file__)
BASE_DIR = os.path.dirname(this_dir)
class tzwhere(object):
SHORTCUT_DEGREES_LATITUDE = 1.0
SHORTCUT_DEGREES_LONGITUDE = 1.0
# By default, use the data file in our package directory
DEFAULT_SHORTCUTS = os.path.join(os.path.dirname(__file__),
'tz_world_shortcuts.json')
DEFAULT_POLYGONS = os.path.join(os.path.dirname(__file__),
'tz_world.json.gz')
def __init__(self, forceTZ=False):
'''
Initializes the tzwhere class.
@forceTZ: If you want to force the lookup method to a return a
timezone even if the point you are looking up is slightly outside it's
bounds, you need to specify this during initialization arleady
'''
featureCollection = read_tzworld(tzwhere.DEFAULT_POLYGONS)
pgen = feature_collection_polygons(featureCollection)
self.timezoneNamesToPolygons = collections.defaultdict(list)
self.unprepTimezoneNamesToPolygons = collections.defaultdict(list)
for tzname, poly in pgen:
self.timezoneNamesToPolygons[tzname].append(poly)
for tzname, polys in self.timezoneNamesToPolygons.items():
self.timezoneNamesToPolygons[tzname] = WRAP(polys)
if forceTZ:
self.unprepTimezoneNamesToPolygons[tzname] = WRAP(polys)
with open(tzwhere.DEFAULT_SHORTCUTS, 'r') as f:
self.timezoneLongitudeShortcuts, self.timezoneLatitudeShortcuts = json.load(f)
self.forceTZ = forceTZ
for tzname in self.timezoneNamesToPolygons:
# Convert things to tuples to save memory
for degree in self.timezoneLatitudeShortcuts:
for tzname in self.timezoneLatitudeShortcuts[degree].keys():
self.timezoneLatitudeShortcuts[degree][tzname] = \
tuple(self.timezoneLatitudeShortcuts[degree][tzname])
for degree in self.timezoneLongitudeShortcuts.keys():
for tzname in self.timezoneLongitudeShortcuts[degree].keys():
self.timezoneLongitudeShortcuts[degree][tzname] = \
tuple(self.timezoneLongitudeShortcuts[degree][tzname])
def tzNameAt(self, latitude, longitude, forceTZ=False):
'''
Let's you lookup for a given latitude and longitude the appropriate
timezone.
@latitude: latitude
@longitude: longitude
@forceTZ: If forceTZ is true and you can't find a valid timezone return
the closest timezone you can find instead. Only works if the point has
the same integer value for its degree than the timezeone
'''
if forceTZ:
assert self.forceTZ, 'You need to initialize tzwhere with forceTZ'
latTzOptions = self.timezoneLatitudeShortcuts[str(
(math.floor(latitude / self.SHORTCUT_DEGREES_LATITUDE) *
self.SHORTCUT_DEGREES_LATITUDE)
)]
latSet = set(latTzOptions.keys())
lngTzOptions = self.timezoneLongitudeShortcuts[str(
(math.floor(longitude / self.SHORTCUT_DEGREES_LONGITUDE) *
self.SHORTCUT_DEGREES_LONGITUDE)
)]
lngSet = set(lngTzOptions.keys())
possibleTimezones = lngSet.intersection(latSet)
queryPoint = geometry.Point(longitude, latitude)
if possibleTimezones:
for tzname in possibleTimezones:
if isinstance(self.timezoneNamesToPolygons[tzname], COLLECTION_TYPE):
self.timezoneNamesToPolygons[tzname] = list(
map(lambda p: prepared.prep(
geometry.Polygon(p[0], p[1])
), self.timezoneNamesToPolygons[tzname]))
polyIndices = set(latTzOptions[tzname]).intersection(set(
lngTzOptions[tzname]
))
for polyIndex in polyIndices:
poly = self.timezoneNamesToPolygons[tzname][polyIndex]
if poly.contains_properly(queryPoint):
return tzname
if forceTZ:
return self.__forceTZ__(possibleTimezones, latTzOptions,
lngTzOptions, queryPoint)
def __forceTZ__(self, possibleTimezones, latTzOptions,
lngTzOptions, queryPoint):
distances = []
if possibleTimezones:
if len(possibleTimezones) == 1:
return possibleTimezones.pop()
else:
for tzname in possibleTimezones:
if isinstance(self.unprepTimezoneNamesToPolygons[tzname],
COLLECTION_TYPE):
self.unprepTimezoneNamesToPolygons[tzname] = list(
map(lambda p: p.context if isinstance(p, prepared.PreparedGeometry) else geometry.Polygon(p[0], p[1]),
self.timezoneNamesToPolygons[tzname]))
polyIndices = set(latTzOptions[tzname]).intersection(
set(lngTzOptions[tzname]))
for polyIndex in polyIndices:
poly = self.unprepTimezoneNamesToPolygons[
tzname][polyIndex]
d = poly.distance(queryPoint)
distances.append((d, tzname))
if len(distances) > 0:
return sorted(distances, key=lambda x: x[0])[0][1]
class prepareMap(object):
def __init__(self):
DEFAULT_SHORTCUTS = os.path.join(os.path.dirname(__file__),
'tz_world_shortcuts.json')
DEFAULT_POLYGONS = os.path.join(os.path.dirname(__file__),
'tz_world.json.gz')
featureCollection = read_tzworld(DEFAULT_POLYGONS)
pgen = feature_collection_polygons(featureCollection)
tzNamesToPolygons = collections.defaultdict(list)
for tzname, poly in pgen:
tzNamesToPolygons[tzname].append(poly)
for tzname, polys in tzNamesToPolygons.items():
tzNamesToPolygons[tzname] = \
WRAP(tzNamesToPolygons[tzname])
timezoneLongitudeShortcuts,\
timezoneLatitudeShortcuts = self.construct_shortcuts(
tzNamesToPolygons, tzwhere.SHORTCUT_DEGREES_LONGITUDE,
tzwhere.SHORTCUT_DEGREES_LATITUDE)
with open(DEFAULT_SHORTCUTS, 'w') as f:
json.dump(
(timezoneLongitudeShortcuts, timezoneLatitudeShortcuts), f)
@staticmethod
def construct_shortcuts(timezoneNamesToPolygons,
shortcut_long, shortcut_lat):
''' Construct our shortcuts for looking up polygons. Much faster
than using an r-tree '''
def find_min_max(ls, gridSize):
minLs = (math.floor(min(ls) / gridSize) *
gridSize)
maxLs = (math.floor(max(ls) / gridSize) *
gridSize)
return minLs, maxLs
timezoneLongitudeShortcuts = {}
timezoneLatitudeShortcuts = {}
for tzname in timezoneNamesToPolygons:
tzLngs = []
tzLats = []
for polyIndex, poly in enumerate(timezoneNamesToPolygons[tzname]):
lngs = [x[0] for x in poly[0]]
lats = [x[1] for x in poly[0]]
tzLngs.extend(lngs)
tzLats.extend(lats)
minLng, maxLng = find_min_max(
lngs, shortcut_long)
minLat, maxLat = find_min_max(
lats, shortcut_lat)
degree = minLng
while degree <= maxLng:
if degree not in timezoneLongitudeShortcuts:
timezoneLongitudeShortcuts[degree] =\
collections.defaultdict(list)
timezoneLongitudeShortcuts[degree][tzname].append(polyIndex)
degree = degree + shortcut_long
degree = minLat
while degree <= maxLat:
if degree not in timezoneLatitudeShortcuts:
timezoneLatitudeShortcuts[degree] =\
collections.defaultdict(list)
timezoneLatitudeShortcuts[degree][tzname].append(polyIndex)
degree = degree + shortcut_lat
return timezoneLongitudeShortcuts, timezoneLatitudeShortcuts
def read_tzworld(path):
reader = read_json
return reader(path)
def read_json(path):
with gzip.open(path, "rb") as f:
featureCollection = json.loads(f.read().decode("utf-8"))
return featureCollection
def feature_collection_polygons(featureCollection):
"""Turn a feature collection
into an iterator over polygons.
Given a featureCollection of the kind loaded from the json
input, unpack it to an iterator which produces a series of
(tzname, polygon) pairs, one for every polygon in the
featureCollection. Here tzname is a string and polygon is a
list of floats.
"""
for feature in featureCollection['features']:
tzname = feature['properties']['TZID']
if feature['geometry']['type'] == 'Polygon':
exterior = feature['geometry']['coordinates'][0]
interior = feature['geometry']['coordinates'][1:]
yield (tzname, (exterior, interior))
if __name__ == "__main__":
prepareMap()
|
examples/scripts/segmentation/nnet3-segmenter.py | mxmpl/pykaldi | 916 | 6481 | #!/usr/bin/env python
from __future__ import print_function
from kaldi.segmentation import NnetSAD, SegmentationProcessor
from kaldi.nnet3 import NnetSimpleComputationOptions
from kaldi.util.table import SequentialMatrixReader
# Construct SAD
model = NnetSAD.read_model("final.raw")
post = NnetSAD.read_average_posteriors("post_output.vec")
transform = NnetSAD.make_sad_transform(post)
graph = NnetSAD.make_sad_graph()
decodable_opts = NnetSimpleComputationOptions()
decodable_opts.extra_left_context = 79
decodable_opts.extra_right_context = 21
decodable_opts.extra_left_context_initial = 0
decodable_opts.extra_right_context_final = 0
decodable_opts.frames_per_chunk = 150
decodable_opts.acoustic_scale = 0.3
sad = NnetSAD(model, transform, graph, decodable_opts=decodable_opts)
seg = SegmentationProcessor(target_labels=[2])
# Define feature pipeline as a Kaldi rspecifier
feats_rspec = "ark:compute-mfcc-feats --config=mfcc.conf scp:wav.scp ark:- |"
# Segment
with SequentialMatrixReader(feats_rspec) as f, open ("segments", "w") as s:
for key, feats in f:
out = sad.segment(feats)
segments, stats = seg.process(out["alignment"])
seg.write(key, segments, s)
print("segments:", segments, flush=True)
print("stats:", stats, flush=True)
print("global stats:", seg.stats, flush=True)
|
ppcls/data/preprocess/__init__.py | zhusonghe/PaddleClas-1 | 3,763 | 6502 | <filename>ppcls/data/preprocess/__init__.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy as RawImageNetPolicy
from ppcls.data.preprocess.ops.randaugment import RandAugment as RawRandAugment
from ppcls.data.preprocess.ops.timm_autoaugment import RawTimmAutoAugment
from ppcls.data.preprocess.ops.cutout import Cutout
from ppcls.data.preprocess.ops.hide_and_seek import HideAndSeek
from ppcls.data.preprocess.ops.random_erasing import RandomErasing
from ppcls.data.preprocess.ops.grid import GridMask
from ppcls.data.preprocess.ops.operators import DecodeImage
from ppcls.data.preprocess.ops.operators import ResizeImage
from ppcls.data.preprocess.ops.operators import CropImage
from ppcls.data.preprocess.ops.operators import RandCropImage
from ppcls.data.preprocess.ops.operators import RandFlipImage
from ppcls.data.preprocess.ops.operators import NormalizeImage
from ppcls.data.preprocess.ops.operators import ToCHWImage
from ppcls.data.preprocess.ops.operators import AugMix
from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator
import numpy as np
from PIL import Image
def transform(data, ops=[]):
""" transform """
for op in ops:
data = op(data)
return data
class AutoAugment(RawImageNetPolicy):
""" ImageNetPolicy wrapper to auto fit different img types """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, img):
if not isinstance(img, Image.Image):
img = np.ascontiguousarray(img)
img = Image.fromarray(img)
img = super().__call__(img)
if isinstance(img, Image.Image):
img = np.asarray(img)
return img
class RandAugment(RawRandAugment):
""" RandAugment wrapper to auto fit different img types """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, img):
if not isinstance(img, Image.Image):
img = np.ascontiguousarray(img)
img = Image.fromarray(img)
img = super().__call__(img)
if isinstance(img, Image.Image):
img = np.asarray(img)
return img
class TimmAutoAugment(RawTimmAutoAugment):
""" TimmAutoAugment wrapper to auto fit different img tyeps. """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, img):
if not isinstance(img, Image.Image):
img = np.ascontiguousarray(img)
img = Image.fromarray(img)
img = super().__call__(img)
if isinstance(img, Image.Image):
img = np.asarray(img)
return img
|
SearchService/test/unit/test_solr_interface.py | loftwah/appscale | 790 | 6522 | <gh_stars>100-1000
#!/usr/bin/env python
import os
import json
import sys
import unittest
import urllib2
from flexmock import flexmock
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
import solr_interface
import search_exceptions
class FakeSolrDoc():
def __init__(self):
self.fields = []
class FakeDocument():
INDEX_NAME = "indexname"
INDEX_LOCALE = "indexlocale"
def __init__(self):
self.fields = []
self.id = "id"
self.language = "lang"
class FakeSchema():
def __init__(self):
self.fields = []
class FakeIndex():
def __init__(self):
self.name = "name"
self.schema = FakeSchema()
class FakeIndexSpec():
def __init__(self):
pass
def namespace(self):
return 'ns'
def name(self):
return self.name
class FakeUpdate():
def __init__(self, name, field_type):
self.name = name
self.field_type = field_type
class FakeConnection():
def __init__(self, is_good_code):
self.code = 200
if not is_good_code:
self.code = 500
def getcode(self):
return self.code
class TestSolrInterface(unittest.TestCase):
"""
A set of test cases for the solr interface module.
"""
def test_get_index_adapter(self):
appscale_info = flexmock()
appscale_info.should_receive("get_search_location").\
and_return("somelocation")
solr = solr_interface.Solr()
solr = flexmock(solr)
flexmock(solr_interface)
solr_interface.should_receive("get_index_name").and_return("index_ns_name")
flexmock(urllib2)
urllib2.should_receive("urlopen").and_return(FakeConnection(False))
self.assertRaises(search_exceptions.InternalError,
solr._get_index_adapter, "app_id", "ns", "name")
# Test the case of ValueError on a json.load.
urllib2.should_receive("urlopen").and_return(FakeConnection(True))
flexmock(json)
json.should_receive("load").and_raise(ValueError)
self.assertRaises(search_exceptions.InternalError,
solr._get_index_adapter, "app_id", "ns", "name")
# Test a bad status from SOLR.
dictionary = {'responseHeader':{'status': 1}}
json.should_receive("load").and_return(dictionary)
self.assertRaises(search_exceptions.InternalError,
solr._get_index_adapter, "app_id", "ns", "name")
fields = [{'name':"index_ns_name_"}]
dictionary = {'responseHeader':{'status': 0}, "fields": fields}
json.should_receive("load").and_return(dictionary)
index = solr._get_index_adapter("app_id", "ns", "name")
self.assertEquals(index.schema[0]['name'], "index_ns_name_")
def test_update_schema(self):
appscale_info = flexmock()
appscale_info.should_receive("get_search_location").\
and_return("somelocation")
solr = solr_interface.Solr()
flexmock(urllib2)
urllib2.should_receive("urlopen").and_return(FakeConnection(False))
updates = []
self.assertRaises(search_exceptions.InternalError,
solr.update_schema, updates)
updates = [{'name': 'name1', 'type':'type1'}]
flexmock(json)
json.should_receive("load").and_raise(ValueError)
urllib2.should_receive("urlopen").and_return(FakeConnection(True))
self.assertRaises(search_exceptions.InternalError,
solr.update_schema, updates)
dictionary = {"responseHeader":{"status":1}}
json.should_receive("load").and_return(dictionary)
self.assertRaises(search_exceptions.InternalError,
solr.update_schema, updates)
dictionary = {"responseHeader":{"status":0}}
json.should_receive("load").and_return(dictionary)
solr.update_schema(updates)
def test_to_solr_hash_map(self):
appscale_info = flexmock()
appscale_info.should_receive("get_search_location").\
and_return("somelocation")
solr = solr_interface.Solr()
self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(), FakeDocument()), {})
def test_commit_update(self):
appscale_info = flexmock()
appscale_info.should_receive("get_search_location").\
and_return("somelocation")
solr = solr_interface.Solr()
flexmock(json)
json.should_receive("loads").and_return({})
flexmock(urllib2)
urllib2.should_receive("urlopen").and_return(FakeConnection(False))
self.assertRaises(search_exceptions.InternalError, solr.commit_update, {})
json.should_receive("load").and_raise(ValueError)
urllib2.should_receive("urlopen").and_return(FakeConnection(True))
self.assertRaises(search_exceptions.InternalError, solr.commit_update, {})
dictionary = {'responseHeader':{'status': 1}}
json.should_receive("load").and_return(dictionary).once()
self.assertRaises(search_exceptions.InternalError, solr.commit_update, {})
dictionary = {'responseHeader':{'status': 0}}
json.should_receive("load").and_return(dictionary).once()
solr.commit_update({})
def test_update_document(self):
appscale_info = flexmock()
appscale_info.should_receive("get_search_location").\
and_return("somelocation")
solr = solr_interface.Solr()
solr = flexmock(solr)
solr.should_receive("to_solr_doc").and_return(FakeSolrDoc())
solr.should_receive("_get_index_adapter").and_return(FakeIndex())
solr.should_receive("compute_updates").and_return([])
solr.should_receive("to_solr_hash_map").and_return(None)
solr.should_receive("commit_update").and_return(None)
solr.update_document("app_id", None, FakeIndexSpec())
solr.should_receive("compute_updates").and_return([1,2])
solr.should_receive("update_schema").twice()
solr.update_document("app_id", None, FakeIndexSpec())
solr.should_receive("to_solr_hash_map").and_return(None).once()
solr.update_document("app_id", None, FakeIndexSpec())
def test_json_loads_byteified(self):
json_with_unicode = (
'{"key2": [{"\\u2611": 28, "\\u2616": ["\\u263a"]}, "second", "third"], '
'"key1": "value", '
'"\\u2604": {"\\u2708": "\\u2708"}}'
)
parsed_obj = solr_interface.json_loads_byteified(json_with_unicode)
def walk_and_check_type(obj):
if isinstance(obj, dict):
for key, value in obj.iteritems():
self.assertIsInstance(key, str)
walk_and_check_type(value)
elif isinstance(obj, list):
for value in obj:
walk_and_check_type(value)
else:
self.assertIsInstance(obj, (str, int))
walk_and_check_type(parsed_obj)
self.assertEqual(parsed_obj, {
'key1': 'value',
'key2': [
{'\<KEY>': 28, '\xe2\x98\x96': ['\xe2\x98\xba']},
'second',
'third'
],
'\xe2\x98\x84': {'\xe2\x9c\x88': '\xe2\x9c\x88'}
})
|
script.video.F4mProxy/lib/flvlib/constants.py | akuala/REPO.KUALA | 105 | 6534 | <filename>script.video.F4mProxy/lib/flvlib/constants.py
"""
The constants used in FLV files and their meanings.
"""
# Tag type
(TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8, 9, 18)
# Sound format
(SOUND_FORMAT_PCM_PLATFORM_ENDIAN,
SOUND_FORMAT_ADPCM,
SOUND_FORMAT_MP3,
SOUND_FORMAT_PCM_LITTLE_ENDIAN,
SOUND_FORMAT_NELLYMOSER_16KHZ,
SOUND_FORMAT_NELLYMOSER_8KHZ,
SOUND_FORMAT_NELLYMOSER,
SOUND_FORMAT_G711_A_LAW,
SOUND_FORMAT_G711_MU_LAW) = range(9)
(SOUND_FORMAT_AAC,
SOUND_FORMAT_SPEEX) = range(10, 12)
(SOUND_FORMAT_MP3_8KHZ,
SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16)
sound_format_to_string = {
SOUND_FORMAT_PCM_PLATFORM_ENDIAN: "Linear PCM, platform endian",
SOUND_FORMAT_ADPCM: "ADPCM",
SOUND_FORMAT_MP3: "MP3",
SOUND_FORMAT_PCM_LITTLE_ENDIAN: "Linear PCM, little endian",
SOUND_FORMAT_NELLYMOSER_16KHZ: "Nellymoser 16-kHz mono",
SOUND_FORMAT_NELLYMOSER_8KHZ: "Nellymoser 8-kHz mono",
SOUND_FORMAT_NELLYMOSER: "Nellymoser",
SOUND_FORMAT_G711_A_LAW: "G.711 A-law logarithmic PCM",
SOUND_FORMAT_G711_MU_LAW: "G.711 mu-law logarithmic PCM",
SOUND_FORMAT_AAC: "AAC",
SOUND_FORMAT_SPEEX: "Speex",
SOUND_FORMAT_MP3_8KHZ: "MP3 8-kHz",
SOUND_FORMAT_DEVICE_SPECIFIC: "Device-specific sound"
}
# Sound rate
(SOUND_RATE_5_5_KHZ,
SOUND_RATE_11_KHZ,
SOUND_RATE_22_KHZ,
SOUND_RATE_44_KHZ) = range(4)
sound_rate_to_string = {
SOUND_RATE_5_5_KHZ: "5.5-kHz",
SOUND_RATE_11_KHZ: "11-kHz",
SOUND_RATE_22_KHZ: "22-kHz",
SOUND_RATE_44_KHZ: "44-kHz"
}
# Sound size
(SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2)
sound_size_to_string = {
SOUND_SIZE_8_BIT: "snd8Bit",
SOUND_SIZE_16_BIT: "snd16Bit"
}
# Sound type
(SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2)
sound_type_to_string = {
SOUND_TYPE_MONO: "sndMono",
SOUND_TYPE_STEREO: "sndStereo"
}
# AAC packet type
(AAC_PACKET_TYPE_SEQUENCE_HEADER,
AAC_PACKET_TYPE_RAW) = range(2)
aac_packet_type_to_string = {
AAC_PACKET_TYPE_SEQUENCE_HEADER: "sequence header",
AAC_PACKET_TYPE_RAW: "raw"
}
# Codec ID
(CODEC_ID_JPEG,
CODEC_ID_H263,
CODEC_ID_SCREEN_VIDEO,
CODEC_ID_VP6,
CODEC_ID_VP6_WITH_ALPHA,
CODEC_ID_SCREEN_VIDEO_V2,
CODEC_ID_H264) = range(1, 8)
codec_id_to_string = {
CODEC_ID_JPEG: "JPEG",
CODEC_ID_H263: "Sorenson H.263",
CODEC_ID_SCREEN_VIDEO: "Screen video",
CODEC_ID_VP6: "On2 VP6",
CODEC_ID_VP6_WITH_ALPHA: "On2 VP6 with alpha channel",
CODEC_ID_SCREEN_VIDEO_V2: "Screen video version 2",
CODEC_ID_H264: "H.264"
}
# Frame type
(FRAME_TYPE_KEYFRAME,
FRAME_TYPE_INTERFRAME,
FRAME_TYPE_DISPOSABLE_INTERFRAME,
FRAME_TYPE_GENERATED_KEYFRAME,
FRAME_TYPE_INFO_FRAME) = range(1, 6)
frame_type_to_string = {
FRAME_TYPE_KEYFRAME: "keyframe",
FRAME_TYPE_INTERFRAME: "interframe",
FRAME_TYPE_DISPOSABLE_INTERFRAME: "disposable interframe",
FRAME_TYPE_GENERATED_KEYFRAME: "generated keyframe",
FRAME_TYPE_INFO_FRAME: "video info/command frame"
}
# H.264 packet type
(H264_PACKET_TYPE_SEQUENCE_HEADER,
H264_PACKET_TYPE_NALU,
H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3)
h264_packet_type_to_string = {
H264_PACKET_TYPE_SEQUENCE_HEADER: "sequence header",
H264_PACKET_TYPE_NALU: "NAL unit",
H264_PACKET_TYPE_END_OF_SEQUENCE: "sequence end"
}
# Value type
(VALUE_TYPE_NUMBER,
VALUE_TYPE_BOOLEAN,
VALUE_TYPE_STRING,
VALUE_TYPE_OBJECT,
VALUE_TYPE_MOVIECLIP,
VALUE_TYPE_NULL,
VALUE_TYPE_UNDEFINED,
VALUE_TYPE_REFERENCE,
VALUE_TYPE_ECMA_ARRAY) = range(9)
(VALUE_TYPE_STRICT_ARRAY,
VALUE_TYPE_DATE,
VALUE_TYPE_LONGSTRING) = range(10, 13)
value_type_to_string = {
VALUE_TYPE_NUMBER: 'Number',
VALUE_TYPE_BOOLEAN: 'Boolean',
VALUE_TYPE_STRING: 'String',
VALUE_TYPE_OBJECT: 'Object',
VALUE_TYPE_MOVIECLIP: 'MovieClip',
VALUE_TYPE_NULL: 'Null',
VALUE_TYPE_UNDEFINED: 'Undefined',
VALUE_TYPE_REFERENCE: 'Reference',
VALUE_TYPE_ECMA_ARRAY: 'ECMA Array',
VALUE_TYPE_STRICT_ARRAY: 'Strict Array',
VALUE_TYPE_DATE: 'Date',
VALUE_TYPE_LONGSTRING: 'Longstring'
}
|
jug/subcommands/demo.py | rdenham/jug | 309 | 6556 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2017, <NAME> <<EMAIL>>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from . import SubCommand
__all__ = ['DemoCommand']
class DemoCommand(SubCommand):
'''Create demo directory.
'''
name = "demo"
def run(self, *args, **kwargs):
import os
from os import path
print('''
Jug will create a directory called 'jug-demo/' with a file called 'primes.py'
inside.
You can test jug by switching to that directory and running the commands:
jug status primes.py
followed by
jug execute primes.py
Upon termination of the process, results will be in a file called 'output.txt'.
PARALLEL USAGE
You can speed up the process by running several 'jug execute' in parallel:
jug execute primes.py &
jug execute primes.py &
jug execute primes.py &
jug execute primes.py &
TROUBLE SHOOTING:
Should you run into issues, you can run the internal tests for jug with
jug test-jug
FURTHER READING
The online documentation contains further reading. You can read the next
tutorial here:
http://jug.readthedocs.io/en/latest/decrypt-example.html
''')
if path.exists('jug-demo'):
print("Jug-demo previously created")
return
os.mkdir('jug-demo')
output = open('jug-demo/primes.py', 'wt')
output.write(r'''
from time import sleep
from jug import TaskGenerator
@TaskGenerator
def is_prime(n):
sleep(1.)
for j in range(2, n - 1):
if (n % j) == 0:
return False
return True
@TaskGenerator
def count_primes(ps):
return sum(ps)
@TaskGenerator
def write_output(n):
output = open('output.txt', 'wt')
output.write("Found {0} primes <= 100.\n".format(n))
output.close()
primes100 = []
for n in range(2, 101):
primes100.append(is_prime(n))
n_primes = count_primes(primes100)
write_output(n_primes)
''')
output.close()
demo = DemoCommand()
|
src/python/nimbusml/internal/entrypoints/trainers_lightgbmbinaryclassifier.py | montehoover/NimbusML | 134 | 6559 | # - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
Trainers.LightGbmBinaryClassifier
"""
import numbers
from ..utils.entrypoints import EntryPoint
from ..utils.utils import try_set, unlist
def trainers_lightgbmbinaryclassifier(
training_data,
predictor_model=None,
number_of_iterations=100,
learning_rate=None,
number_of_leaves=None,
minimum_example_count_per_leaf=None,
feature_column_name='Features',
booster=None,
label_column_name='Label',
example_weight_column_name=None,
row_group_column_name=None,
normalize_features='Auto',
caching='Auto',
unbalanced_sets=False,
weight_of_positive_examples=1.0,
sigmoid=0.5,
evaluation_metric='Logloss',
maximum_bin_count_per_feature=255,
verbose=False,
silent=True,
number_of_threads=None,
early_stopping_round=0,
batch_size=1048576,
use_categorical_split=None,
handle_missing_value=True,
use_zero_as_missing_value=False,
minimum_example_count_per_group=100,
maximum_categorical_split_point_count=32,
categorical_smoothing=10.0,
l2_categorical_regularization=10.0,
seed=None,
parallel_trainer=None,
**params):
"""
**Description**
Train a LightGBM binary classification model.
:param number_of_iterations: Number of iterations. (inputs).
:param training_data: The data to be used for training (inputs).
:param learning_rate: Shrinkage rate for trees, used to prevent
over-fitting. Range: (0,1]. (inputs).
:param number_of_leaves: Maximum leaves for trees. (inputs).
:param minimum_example_count_per_leaf: Minimum number of
instances needed in a child. (inputs).
:param feature_column_name: Column to use for features (inputs).
:param booster: Which booster to use, can be gbtree, gblinear or
dart. gbtree and dart use tree based model while gblinear
uses linear function. (inputs).
:param label_column_name: Column to use for labels (inputs).
:param example_weight_column_name: Column to use for example
weight (inputs).
:param row_group_column_name: Column to use for example groupId
(inputs).
:param normalize_features: Normalize option for the feature
column (inputs).
:param caching: Whether trainer should cache input training data
(inputs).
:param unbalanced_sets: Use for binary classification when
training data is not balanced. (inputs).
:param weight_of_positive_examples: Control the balance of
positive and negative weights, useful for unbalanced classes.
A typical value to consider: sum(negative cases) /
sum(positive cases). (inputs).
:param sigmoid: Parameter for the sigmoid function. (inputs).
:param evaluation_metric: Evaluation metrics. (inputs).
:param maximum_bin_count_per_feature: Maximum number of bucket
bin for features. (inputs).
:param verbose: Verbose (inputs).
:param silent: Printing running messages. (inputs).
:param number_of_threads: Number of parallel threads used to run
LightGBM. (inputs).
:param early_stopping_round: Rounds of early stopping, 0 will
disable it. (inputs).
:param batch_size: Number of entries in a batch when loading
data. (inputs).
:param use_categorical_split: Enable categorical split or not.
(inputs).
:param handle_missing_value: Enable special handling of missing
value or not. (inputs).
:param use_zero_as_missing_value: Enable usage of zero (0) as
missing value. (inputs).
:param minimum_example_count_per_group: Minimum number of
instances per categorical group. (inputs).
:param maximum_categorical_split_point_count: Max number of
categorical thresholds. (inputs).
:param categorical_smoothing: Lapalace smooth term in categorical
feature spilt. Avoid the bias of small categories. (inputs).
:param l2_categorical_regularization: L2 Regularization for
categorical split. (inputs).
:param seed: Sets the random seed for LightGBM to use. (inputs).
:param parallel_trainer: Parallel LightGBM Learning Algorithm
(inputs).
:param predictor_model: The trained model (outputs).
"""
entrypoint_name = 'Trainers.LightGbmBinaryClassifier'
inputs = {}
outputs = {}
if number_of_iterations is not None:
inputs['NumberOfIterations'] = try_set(
obj=number_of_iterations,
none_acceptable=True,
is_of_type=numbers.Real)
if training_data is not None:
inputs['TrainingData'] = try_set(
obj=training_data,
none_acceptable=False,
is_of_type=str)
if learning_rate is not None:
inputs['LearningRate'] = try_set(
obj=learning_rate,
none_acceptable=True,
is_of_type=numbers.Real)
if number_of_leaves is not None:
inputs['NumberOfLeaves'] = try_set(
obj=number_of_leaves,
none_acceptable=True,
is_of_type=numbers.Real)
if minimum_example_count_per_leaf is not None:
inputs['MinimumExampleCountPerLeaf'] = try_set(
obj=minimum_example_count_per_leaf,
none_acceptable=True,
is_of_type=numbers.Real)
if feature_column_name is not None:
inputs['FeatureColumnName'] = try_set(
obj=feature_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if booster is not None:
inputs['Booster'] = try_set(
obj=booster,
none_acceptable=True,
is_of_type=dict)
if label_column_name is not None:
inputs['LabelColumnName'] = try_set(
obj=label_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if example_weight_column_name is not None:
inputs['ExampleWeightColumnName'] = try_set(
obj=example_weight_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if row_group_column_name is not None:
inputs['RowGroupColumnName'] = try_set(
obj=row_group_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if normalize_features is not None:
inputs['NormalizeFeatures'] = try_set(
obj=normalize_features,
none_acceptable=True,
is_of_type=str,
values=[
'No',
'Warn',
'Auto',
'Yes'])
if caching is not None:
inputs['Caching'] = try_set(
obj=caching,
none_acceptable=True,
is_of_type=str,
values=[
'Auto',
'Memory',
'None'])
if unbalanced_sets is not None:
inputs['UnbalancedSets'] = try_set(
obj=unbalanced_sets,
none_acceptable=True,
is_of_type=bool)
if weight_of_positive_examples is not None:
inputs['WeightOfPositiveExamples'] = try_set(
obj=weight_of_positive_examples,
none_acceptable=True,
is_of_type=numbers.Real)
if sigmoid is not None:
inputs['Sigmoid'] = try_set(
obj=sigmoid,
none_acceptable=True,
is_of_type=numbers.Real)
if evaluation_metric is not None:
inputs['EvaluationMetric'] = try_set(
obj=evaluation_metric,
none_acceptable=True,
is_of_type=str,
values=[
'None',
'Default',
'Logloss',
'Error',
'AreaUnderCurve'])
if maximum_bin_count_per_feature is not None:
inputs['MaximumBinCountPerFeature'] = try_set(
obj=maximum_bin_count_per_feature,
none_acceptable=True,
is_of_type=numbers.Real)
if verbose is not None:
inputs['Verbose'] = try_set(
obj=verbose,
none_acceptable=True,
is_of_type=bool)
if silent is not None:
inputs['Silent'] = try_set(
obj=silent,
none_acceptable=True,
is_of_type=bool)
if number_of_threads is not None:
inputs['NumberOfThreads'] = try_set(
obj=number_of_threads,
none_acceptable=True,
is_of_type=numbers.Real)
if early_stopping_round is not None:
inputs['EarlyStoppingRound'] = try_set(
obj=early_stopping_round,
none_acceptable=True,
is_of_type=numbers.Real)
if batch_size is not None:
inputs['BatchSize'] = try_set(
obj=batch_size,
none_acceptable=True,
is_of_type=numbers.Real)
if use_categorical_split is not None:
inputs['UseCategoricalSplit'] = try_set(
obj=use_categorical_split, none_acceptable=True, is_of_type=bool)
if handle_missing_value is not None:
inputs['HandleMissingValue'] = try_set(
obj=handle_missing_value,
none_acceptable=True,
is_of_type=bool)
if use_zero_as_missing_value is not None:
inputs['UseZeroAsMissingValue'] = try_set(
obj=use_zero_as_missing_value,
none_acceptable=True,
is_of_type=bool)
if minimum_example_count_per_group is not None:
inputs['MinimumExampleCountPerGroup'] = try_set(
obj=minimum_example_count_per_group,
none_acceptable=True,
is_of_type=numbers.Real,
valid_range={
'Inf': 0,
'Max': 2147483647})
if maximum_categorical_split_point_count is not None:
inputs['MaximumCategoricalSplitPointCount'] = try_set(
obj=maximum_categorical_split_point_count,
none_acceptable=True,
is_of_type=numbers.Real,
valid_range={
'Inf': 0,
'Max': 2147483647})
if categorical_smoothing is not None:
inputs['CategoricalSmoothing'] = try_set(
obj=categorical_smoothing,
none_acceptable=True,
is_of_type=numbers.Real, valid_range={'Min': 0.0})
if l2_categorical_regularization is not None:
inputs['L2CategoricalRegularization'] = try_set(
obj=l2_categorical_regularization,
none_acceptable=True,
is_of_type=numbers.Real, valid_range={'Min': 0.0})
if seed is not None:
inputs['Seed'] = try_set(
obj=seed,
none_acceptable=True,
is_of_type=numbers.Real)
if parallel_trainer is not None:
inputs['ParallelTrainer'] = try_set(
obj=parallel_trainer,
none_acceptable=True,
is_of_type=dict)
if predictor_model is not None:
outputs['PredictorModel'] = try_set(
obj=predictor_model, none_acceptable=False, is_of_type=str)
input_variables = {
x for x in unlist(inputs.values())
if isinstance(x, str) and x.startswith("$")}
output_variables = {
x for x in unlist(outputs.values())
if isinstance(x, str) and x.startswith("$")}
entrypoint = EntryPoint(
name=entrypoint_name, inputs=inputs, outputs=outputs,
input_variables=input_variables,
output_variables=output_variables)
return entrypoint
|